diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 6f5a391c4c..3059e209c1 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -29,14 +29,14 @@ schedules: always: true branches: include: - - stable-8 - - stable-7 + - stable-10 + - stable-9 - cron: 0 11 * * 0 displayName: Weekly (old stable branches) always: true branches: include: - - stable-6 + - stable-8 variables: - name: checkoutPath @@ -53,7 +53,7 @@ variables: resources: containers: - container: default - image: quay.io/ansible/azure-pipelines-test-container:4.0.1 + image: quay.io/ansible/azure-pipelines-test-container:6.0.0 pool: Standard @@ -73,6 +73,19 @@ stages: - test: 3 - test: 4 - test: extra + - stage: Sanity_2_18 + displayName: Sanity 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Test {0} + testFormat: 2.18/sanity/{0} + targets: + - test: 1 + - test: 2 + - test: 3 + - test: 4 - stage: Sanity_2_17 displayName: Sanity 2.17 dependsOn: [] @@ -99,19 +112,6 @@ stages: - test: 2 - test: 3 - test: 4 - - stage: Sanity_2_15 - displayName: Sanity 2.15 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Test {0} - testFormat: 2.15/sanity/{0} - targets: - - test: 1 - - test: 2 - - test: 3 - - test: 4 ### Units - stage: Units_devel displayName: Units devel @@ -127,6 +127,18 @@ stages: - test: '3.10' - test: '3.11' - test: '3.12' + - test: '3.13' + - stage: Units_2_18 + displayName: Units 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: 2.18/units/{0}/1 + targets: + - test: 3.8 + - test: "3.13" - stage: Units_2_17 displayName: Units 2.17 dependsOn: [] @@ -150,17 +162,6 @@ stages: - test: 2.7 - test: 3.6 - test: "3.11" - - stage: Units_2_15 - displayName: Units 2.15 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.15/units/{0}/1 - targets: - - test: 3.5 - - test: "3.10" ## Remote - stage: Remote_devel_extra_vms @@ -171,12 +172,14 @@ stages: parameters: testFormat: devel/{0} targets: - - name: Alpine 3.19 - test: alpine/3.19 - # - name: Fedora 39 - # test: fedora/39 + - name: Alpine 3.21 + test: alpine/3.21 + # - name: Fedora 41 + # test: fedora/41 - name: Ubuntu 22.04 test: ubuntu/22.04 + - name: Ubuntu 24.04 + test: ubuntu/24.04 groups: - vm - stage: Remote_devel @@ -189,10 +192,28 @@ stages: targets: - name: macOS 14.3 test: macos/14.3 - - name: RHEL 9.3 - test: rhel/9.3 - - name: FreeBSD 14.0 - test: freebsd/14.0 + - name: RHEL 9.5 + test: rhel/9.5 + - name: FreeBSD 14.2 + test: freebsd/14.2 + - name: FreeBSD 13.4 + test: freebsd/13.4 + groups: + - 1 + - 2 + - 3 + - stage: Remote_2_18 + displayName: Remote 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.18/{0} + targets: + - name: RHEL 9.4 + test: rhel/9.4 + - name: FreeBSD 14.1 + test: freebsd/14.1 groups: - 1 - 2 @@ -207,6 +228,10 @@ stages: targets: - name: FreeBSD 13.3 test: freebsd/13.3 + - name: RHEL 9.3 + test: rhel/9.3 + - name: FreeBSD 14.0 + test: freebsd/14.0 groups: - 1 - 2 @@ -225,30 +250,10 @@ stages: test: rhel/9.2 - name: RHEL 8.8 test: rhel/8.8 - - name: FreeBSD 13.2 - test: freebsd/13.2 - groups: - - 1 - - 2 - - 3 - - stage: Remote_2_15 - displayName: Remote 2.15 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.15/{0} - targets: - - name: RHEL 9.1 - test: rhel/9.1 - - name: RHEL 8.7 - test: rhel/8.7 - name: RHEL 7.9 test: rhel/7.9 - # - name: FreeBSD 13.1 - # test: freebsd/13.1 - # - name: FreeBSD 12.4 - # test: freebsd/12.4 + # - name: FreeBSD 13.2 + # test: freebsd/13.2 groups: - 1 - 2 @@ -263,12 +268,32 @@ stages: parameters: testFormat: devel/linux/{0} targets: - - name: Fedora 39 - test: fedora39 - - name: Ubuntu 20.04 - test: ubuntu2004 + - name: Fedora 41 + test: fedora41 + - name: Alpine 3.21 + test: alpine321 - name: Ubuntu 22.04 test: ubuntu2204 + - name: Ubuntu 24.04 + test: ubuntu2404 + groups: + - 1 + - 2 + - 3 + - stage: Docker_2_18 + displayName: Docker 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.18/linux/{0} + targets: + - name: Fedora 40 + test: fedora40 + - name: Alpine 3.20 + test: alpine320 + - name: Ubuntu 24.04 + test: ubuntu2404 groups: - 1 - 2 @@ -281,8 +306,12 @@ stages: parameters: testFormat: 2.17/linux/{0} targets: + - name: Fedora 39 + test: fedora39 - name: Alpine 3.19 test: alpine319 + - name: Ubuntu 20.04 + test: ubuntu2004 groups: - 1 - 2 @@ -301,20 +330,6 @@ stages: test: opensuse15 - name: Alpine 3 test: alpine3 - groups: - - 1 - - 2 - - 3 - - stage: Docker_2_15 - displayName: Docker 2.15 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.15/linux/{0} - targets: - - name: Fedora 37 - test: fedora37 - name: CentOS 7 test: centos7 groups: @@ -336,83 +351,86 @@ stages: - name: Debian Bookworm test: debian-bookworm/3.11 - name: ArchLinux - test: archlinux/3.12 + test: archlinux/3.13 groups: - 1 - 2 - 3 ### Generic - - stage: Generic_devel - displayName: Generic devel - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: devel/generic/{0}/1 - targets: - - test: '3.8' - - test: '3.11' - - stage: Generic_2_17 - displayName: Generic 2.17 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.17/generic/{0}/1 - targets: - - test: '3.7' - - test: '3.12' - - stage: Generic_2_16 - displayName: Generic 2.16 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.16/generic/{0}/1 - targets: - - test: '2.7' - - test: '3.6' - - test: '3.11' - - stage: Generic_2_15 - displayName: Generic 2.15 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.15/generic/{0}/1 - targets: - - test: '3.9' +# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. +# - stage: Generic_devel +# displayName: Generic devel +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: devel/generic/{0}/1 +# targets: +# - test: '3.8' +# - test: '3.11' +# - test: '3.13' +# - stage: Generic_2_18 +# displayName: Generic 2.18 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.18/generic/{0}/1 +# targets: +# - test: '3.8' +# - test: '3.13' +# - stage: Generic_2_17 +# displayName: Generic 2.17 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.17/generic/{0}/1 +# targets: +# - test: '3.7' +# - test: '3.12' +# - stage: Generic_2_16 +# displayName: Generic 2.16 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.16/generic/{0}/1 +# targets: +# - test: '2.7' +# - test: '3.6' +# - test: '3.11' - stage: Summary condition: succeededOrFailed() dependsOn: - Sanity_devel + - Sanity_2_18 - Sanity_2_17 - Sanity_2_16 - - Sanity_2_15 - Units_devel + - Units_2_18 - Units_2_17 - Units_2_16 - - Units_2_15 - Remote_devel_extra_vms - Remote_devel + - Remote_2_18 - Remote_2_17 - Remote_2_16 - - Remote_2_15 - Docker_devel + - Docker_2_18 - Docker_2_17 - Docker_2_16 - - Docker_2_15 - Docker_community_devel # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. # - Generic_devel +# - Generic_2_18 # - Generic_2_17 # - Generic_2_16 -# - Generic_2_15 jobs: - template: templates/coverage.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index add3249355..6827441a4c 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -131,6 +131,8 @@ files: maintainers: $team_huawei $doc_fragments/nomad.py: maintainers: chris93111 apecnascimento + $doc_fragments/pipx.py: + maintainers: russoz $doc_fragments/xenserver.py: labels: xenserver maintainers: bvitnik @@ -157,6 +159,8 @@ files: $filters/jc.py: maintainers: kellyjonbrazil $filters/json_query.py: {} + $filters/keep_keys.py: + maintainers: vbotka $filters/lists.py: maintainers: cfiehe $filters/lists_difference.yml: @@ -170,6 +174,12 @@ files: $filters/lists_union.yml: maintainers: cfiehe $filters/random_mac.py: {} + $filters/remove_keys.py: + maintainers: vbotka + $filters/replace_keys.py: + maintainers: vbotka + $filters/reveal_ansible_type.py: + maintainers: vbotka $filters/time.py: maintainers: resmo $filters/to_days.yml: @@ -437,9 +447,11 @@ files: $modules/bearychat.py: maintainers: tonyseek $modules/bigpanda.py: - maintainers: hkariti + ignore: hkariti $modules/bitbucket_: maintainers: catcombo + $modules/bootc_manage.py: + maintainers: cooktheryan $modules/bower.py: maintainers: mwarkentin $modules/btrfs_: @@ -502,12 +514,16 @@ files: maintainers: tintoy $modules/discord.py: maintainers: cwollinger + $modules/django_check.py: + maintainers: russoz + $modules/django_command.py: + maintainers: russoz + $modules/django_createcachetable.py: + maintainers: russoz $modules/django_manage.py: ignore: scottanderson42 tastychutney labels: django_manage maintainers: russoz - $modules/django_command.py: - maintainers: russoz $modules/dnf_versionlock.py: maintainers: moreda $modules/dnf_config_manager.py: @@ -634,6 +650,11 @@ files: labels: homebrew_ macos maintainers: $team_macos notify: chris-short + $modules/homebrew_services.py: + ignore: ryansb + keywords: brew cask services darwin homebrew macosx macports osx + labels: homebrew_ macos + maintainers: $team_macos kitizz $modules/homectl.py: maintainers: jameslivulpi $modules/honeybadger_deployment.py: @@ -693,6 +714,8 @@ files: $modules/ipa_: maintainers: $team_ipa ignore: fxfitz + $modules/ipa_getkeytab.py: + maintainers: abakanovskii $modules/ipa_dnsrecord.py: maintainers: $team_ipa jwbernin $modules/ipbase_info.py: @@ -790,6 +813,8 @@ files: maintainers: elfelip $modules/keycloak_user_federation.py: maintainers: laurpaum + $modules/keycloak_userprofile.py: + maintainers: yeoldegrove $modules/keycloak_component_info.py: maintainers: desand01 $modules/keycloak_client_rolescope.py: @@ -955,6 +980,8 @@ files: maintainers: $team_opennebula $modules/one_host.py: maintainers: rvalle + $modules/one_vnet.py: + maintainers: abakanovskii $modules/oneandone_: maintainers: aajdinov edevenport $modules/onepassword_info.py: @@ -1332,16 +1359,19 @@ files: keywords: sophos utm maintainers: $team_e_spirit $modules/utm_ca_host_key_cert.py: - maintainers: stearz + ignore: stearz + maintainers: $team_e_spirit $modules/utm_ca_host_key_cert_info.py: - maintainers: stearz + ignore: stearz + maintainers: $team_e_spirit $modules/utm_network_interface_address.py: maintainers: steamx $modules/utm_network_interface_address_info.py: maintainers: steamx $modules/utm_proxy_auth_profile.py: keywords: sophos utm - maintainers: $team_e_spirit stearz + ignore: stearz + maintainers: $team_e_spirit $modules/utm_proxy_exception.py: keywords: sophos utm maintainers: $team_e_spirit RickS-C137 @@ -1415,10 +1445,16 @@ files: ignore: matze labels: zypper maintainers: $team_suse + $plugin_utils/ansible_type.py: + maintainers: vbotka + $plugin_utils/keys_filter.py: + maintainers: vbotka $plugin_utils/unsafe.py: maintainers: felixfontein $tests/a_module.py: maintainers: felixfontein + $tests/ansible_type.py: + maintainers: vbotka $tests/fqdn_valid.py: maintainers: vbotka ######################### @@ -1432,6 +1468,14 @@ files: maintainers: felixfontein docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst: maintainers: cfiehe + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst: + maintainers: vbotka docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst: maintainers: vbotka docs/docsite/rst/filter_guide_conversions.rst: @@ -1448,12 +1492,20 @@ files: maintainers: ericzolf docs/docsite/rst/guide_alicloud.rst: maintainers: xiaozhu36 + docs/docsite/rst/guide_cmdrunner.rst: + maintainers: russoz + docs/docsite/rst/guide_deps.rst: + maintainers: russoz + docs/docsite/rst/guide_modulehelper.rst: + maintainers: russoz docs/docsite/rst/guide_online.rst: maintainers: remyleone docs/docsite/rst/guide_packet.rst: maintainers: baldwinSPC nurfet-becirevic t0mk teebes docs/docsite/rst/guide_scaleway.rst: maintainers: $team_scaleway + docs/docsite/rst/guide_vardict.rst: + maintainers: russoz docs/docsite/rst/test_guide.rst: maintainers: felixfontein ######################### @@ -1485,7 +1537,7 @@ macros: team_ansible_core: team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo - team_consul: sgargan apollo13 + team_consul: sgargan apollo13 Ilgmi team_cyberark_conjur: jvanderhoof ryanprior team_e_spirit: MatrixCrawler getjack team_flatpak: JayKayy oolongbrothers @@ -1494,7 +1546,7 @@ macros: team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2 team_ipa: Akasurde Nosmoht justchris1 team_jboss: Wolfant jairojunior wbrefvem - team_keycloak: eikef ndclt mattock + team_keycloak: eikef ndclt mattock thomasbach-dev team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder @@ -1507,6 +1559,6 @@ macros: team_rhsm: cnsnyder ptoscano team_scaleway: remyleone abarbare team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l - team_suse: commel evrardjp lrupp toabctl AnderEnder alxgu andytom sealor + team_suse: commel evrardjp lrupp AnderEnder alxgu andytom sealor team_virt: joshainglis karmab Thulium-Drake Ajpantuso team_wdc: mikemoerk diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml index e57213e9fa..89a3006f56 100644 --- a/.github/workflows/ansible-test.yml +++ b/.github/workflows/ansible-test.yml @@ -31,6 +31,7 @@ jobs: ansible: - '2.13' - '2.14' + - '2.15' # Ansible-test on various stable branches does not yet work well with cgroups v2. # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04 # image for these stable branches. The list of branches where this is necessary will @@ -76,6 +77,10 @@ jobs: python: '3.8' - ansible: '2.14' python: '3.9' + - ansible: '2.15' + python: '3.5' + - ansible: '2.15' + python: '3.10' steps: - name: >- @@ -166,16 +171,32 @@ jobs: docker: alpine3 python: '' target: azp/posix/3/ + # 2.15 + - ansible: '2.15' + docker: fedora37 + python: '' + target: azp/posix/1/ + - ansible: '2.15' + docker: fedora37 + python: '' + target: azp/posix/2/ + - ansible: '2.15' + docker: fedora37 + python: '' + target: azp/posix/3/ # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. # - ansible: '2.13' # docker: default # python: '3.9' # target: azp/generic/1/ - # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. # - ansible: '2.14' # docker: default # python: '3.10' # target: azp/generic/1/ + # - ansible: '2.15' + # docker: default + # python: '3.9' + # target: azp/generic/1/ steps: - name: >- diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index c93162a72a..e8572fafb6 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -25,6 +25,8 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v4 + with: + persist-credentials: false # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/reuse.yml b/.github/workflows/reuse.yml index 031e94cb7a..3c5e986e57 100644 --- a/.github/workflows/reuse.yml +++ b/.github/workflows/reuse.yml @@ -7,10 +7,14 @@ name: Verify REUSE on: push: - branches: [main] - pull_request_target: + branches: + - main + - stable-* + pull_request: types: [opened, synchronize, reopened] - branches: [main] + branches: + - main + - stable-* # Run CI once per day (at 07:30 UTC) schedule: - cron: '30 7 * * *' @@ -24,7 +28,8 @@ jobs: steps: - uses: actions/checkout@v4 with: + persist-credentials: false ref: ${{ github.event.pull_request.head.sha || '' }} - name: REUSE Compliance Check - uses: fsfe/reuse-action@v3 + uses: fsfe/reuse-action@v5 diff --git a/CHANGELOG.md b/CHANGELOG.md index b35c52441b..c9f6ffbaa5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,890 @@ -# Placeholder changelog +# Community General Release Notes -This file is a placeholder; a version-specific `CHANGELOG-vX.md` will be generated during releases from fragments -under `changelogs/fragments`. On release branches once a release has been created, consult the branch's version-specific -file for changes that have occurred in that branch. +**Topics** + +- v9\.5\.4 + - Security Fixes + - Bugfixes +- v9\.5\.3 + - Release Summary + - Minor Changes + - Security Fixes + - Bugfixes +- v9\.5\.2 + - Release Summary + - Minor Changes + - Bugfixes +- v9\.5\.1 + - Release Summary + - Minor Changes + - Bugfixes +- v9\.5\.0 + - Release Summary + - Minor Changes + - Deprecated Features + - Bugfixes + - New Modules +- v9\.4\.0 + - Release Summary + - Minor Changes + - Deprecated Features + - Bugfixes + - New Modules +- v9\.3\.0 + - Release Summary + - Minor Changes + - Bugfixes + - New Modules +- v9\.2\.0 + - Release Summary + - Minor Changes + - Bugfixes + - New Plugins + - Filter + - Test +- v9\.1\.0 + - Release Summary + - Minor Changes + - Deprecated Features + - Bugfixes + - Known Issues + - New Plugins + - Filter + - New Modules +- v9\.0\.1 + - Release Summary + - Minor Changes + - Bugfixes +- v9\.0\.0 + - Release Summary + - Minor Changes + - Breaking Changes / Porting Guide + - Deprecated Features + - Removed Features \(previously deprecated\) + - Security Fixes + - Bugfixes + - New Plugins + - Become + - Callback + - Connection + - Filter + - Lookup + - Test + - New Modules +This changelog describes changes after version 8\.0\.0\. + + +## v9\.5\.4 + + +### Security Fixes + +* keycloak\_client \- Sanitize saml\.encryption\.private\.key so it does not show in the logs \([https\://github\.com/ansible\-collections/community\.general/pull/9621](https\://github\.com/ansible\-collections/community\.general/pull/9621)\)\. + + +### Bugfixes + +* redhat\_subscription \- do not try to unsubscribe \(i\.e\. remove subscriptions\) + when unregistering a system\: newer versions of subscription\-manager\, as + available in EL 10 and Fedora 41\+\, do not support entitlements anymore\, and + thus unsubscribing will fail + \([https\://github\.com/ansible\-collections/community\.general/pull/9578](https\://github\.com/ansible\-collections/community\.general/pull/9578)\)\. + + +## v9\.5\.3 + + +### Release Summary + +Regular bugfix release\. + + +### Minor Changes + +* proxmox module utils \- add method api\_task\_complete that can wait for task completion and return error message \([https\://github\.com/ansible\-collections/community\.general/pull/9256](https\://github\.com/ansible\-collections/community\.general/pull/9256)\)\. + + +### Security Fixes + +* keycloak\_authentication \- API calls did not properly set the priority during update resulting in incorrectly sorted authentication flows\. This apparently only affects Keycloak 25 or newer \([https\://github\.com/ansible\-collections/community\.general/pull/9263](https\://github\.com/ansible\-collections/community\.general/pull/9263)\)\. + + +### Bugfixes + +* dig lookup plugin \- correctly handle NoNameserver exception \([https\://github\.com/ansible\-collections/community\.general/pull/9363](https\://github\.com/ansible\-collections/community\.general/pull/9363)\, [https\://github\.com/ansible\-collections/community\.general/issues/9362](https\://github\.com/ansible\-collections/community\.general/issues/9362)\)\. +* htpasswd \- report changes when file permissions are adjusted \([https\://github\.com/ansible\-collections/community\.general/issues/9485](https\://github\.com/ansible\-collections/community\.general/issues/9485)\, [https\://github\.com/ansible\-collections/community\.general/pull/9490](https\://github\.com/ansible\-collections/community\.general/pull/9490)\)\. +* proxmox\_disk \- fix async method and make resize\_disk method handle errors correctly \([https\://github\.com/ansible\-collections/community\.general/pull/9256](https\://github\.com/ansible\-collections/community\.general/pull/9256)\)\. +* proxmox\_template \- fix the wrong path called on proxmox\_template\.task\_status \([https\://github\.com/ansible\-collections/community\.general/issues/9276](https\://github\.com/ansible\-collections/community\.general/issues/9276)\, [https\://github\.com/ansible\-collections/community\.general/pull/9277](https\://github\.com/ansible\-collections/community\.general/pull/9277)\)\. +* qubes connection plugin \- fix the printing of debug information \([https\://github\.com/ansible\-collections/community\.general/pull/9334](https\://github\.com/ansible\-collections/community\.general/pull/9334)\)\. +* redfish\_utils module utils \- Fix VerifyBiosAttributes command on multi system resource nodes \([https\://github\.com/ansible\-collections/community\.general/pull/9234](https\://github\.com/ansible\-collections/community\.general/pull/9234)\)\. + + +## v9\.5\.2 + + +### Release Summary + +Regular bugfix release\. + + +### Minor Changes + +* proxmox inventory plugin \- fix urllib3 InsecureRequestWarnings not being suppressed when a token is used \([https\://github\.com/ansible\-collections/community\.general/pull/9099](https\://github\.com/ansible\-collections/community\.general/pull/9099)\)\. + + +### Bugfixes + +* dnf\_config\_manager \- fix hanging when prompting to import GPG keys \([https\://github\.com/ansible\-collections/community\.general/pull/9124](https\://github\.com/ansible\-collections/community\.general/pull/9124)\, [https\://github\.com/ansible\-collections/community\.general/issues/8830](https\://github\.com/ansible\-collections/community\.general/issues/8830)\)\. +* dnf\_config\_manager \- forces locale to C before module starts\. If the locale was set to non\-English\, the output of the dnf config\-manager could not be parsed \([https\://github\.com/ansible\-collections/community\.general/pull/9157](https\://github\.com/ansible\-collections/community\.general/pull/9157)\, [https\://github\.com/ansible\-collections/community\.general/issues/9046](https\://github\.com/ansible\-collections/community\.general/issues/9046)\)\. +* flatpak \- force the locale language to C when running the flatpak command \([https\://github\.com/ansible\-collections/community\.general/pull/9187](https\://github\.com/ansible\-collections/community\.general/pull/9187)\, [https\://github\.com/ansible\-collections/community\.general/issues/8883](https\://github\.com/ansible\-collections/community\.general/issues/8883)\)\. +* github\_key \- in check mode\, a faulty call to \`datetime\.strftime\(\.\.\.\)\` was being made which generated an exception \([https\://github\.com/ansible\-collections/community\.general/issues/9185](https\://github\.com/ansible\-collections/community\.general/issues/9185)\)\. +* homebrew\_cask \- allow \+ symbol in Homebrew cask name validation regex \([https\://github\.com/ansible\-collections/community\.general/pull/9128](https\://github\.com/ansible\-collections/community\.general/pull/9128)\)\. +* keycloak\_client \- fix diff by removing code that turns the attributes dict which contains additional settings into a list \([https\://github\.com/ansible\-collections/community\.general/pull/9077](https\://github\.com/ansible\-collections/community\.general/pull/9077)\)\. +* keycloak\_clientscope \- fix diff and end\_state by removing the code that turns the attributes dict\, which contains additional config items\, into a list \([https\://github\.com/ansible\-collections/community\.general/pull/9082](https\://github\.com/ansible\-collections/community\.general/pull/9082)\)\. +* keycloak\_clientscope\_type \- sort the default and optional clientscope lists to improve the diff \([https\://github\.com/ansible\-collections/community\.general/pull/9202](https\://github\.com/ansible\-collections/community\.general/pull/9202)\)\. +* redfish\_utils module utils \- remove undocumented default applytime \([https\://github\.com/ansible\-collections/community\.general/pull/9114](https\://github\.com/ansible\-collections/community\.general/pull/9114)\)\. +* slack \- fail if Slack API response is not OK with error message \([https\://github\.com/ansible\-collections/community\.general/pull/9198](https\://github\.com/ansible\-collections/community\.general/pull/9198)\)\. + + +## v9\.5\.1 + + +### Release Summary + +Regular bugfix release\. + + +### Minor Changes + +* redfish\_utils module utils \- schedule a BIOS configuration job at next reboot when the BIOS config is changed \([https\://github\.com/ansible\-collections/community\.general/pull/9012](https\://github\.com/ansible\-collections/community\.general/pull/9012)\)\. + + +### Bugfixes + +* bitwarden lookup plugin \- support BWS v0\.3\.0 syntax breaking change \([https\://github\.com/ansible\-collections/community\.general/pull/9028](https\://github\.com/ansible\-collections/community\.general/pull/9028)\)\. +* collection\_version lookup plugin \- use importlib directly instead of the deprecated and in ansible\-core 2\.19 removed ansible\.module\_utils\.compat\.importlib \([https\://github\.com/ansible\-collections/community\.general/pull/9084](https\://github\.com/ansible\-collections/community\.general/pull/9084)\)\. +* gitlab\_label \- update label\'s color \([https\://github\.com/ansible\-collections/community\.general/pull/9010](https\://github\.com/ansible\-collections/community\.general/pull/9010)\)\. +* keycloak\_clientscope\_type \- fix detect changes in check mode \([https\://github\.com/ansible\-collections/community\.general/issues/9092](https\://github\.com/ansible\-collections/community\.general/issues/9092)\, [https\://github\.com/ansible\-collections/community\.general/pull/9093](https\://github\.com/ansible\-collections/community\.general/pull/9093)\)\. +* keycloak\_group \- fix crash caused in subgroup creation\. The crash was caused by a missing or empty subGroups property in Keycloak ≥23 \([https\://github\.com/ansible\-collections/community\.general/issues/8788](https\://github\.com/ansible\-collections/community\.general/issues/8788)\, [https\://github\.com/ansible\-collections/community\.general/pull/8979](https\://github\.com/ansible\-collections/community\.general/pull/8979)\)\. +* modprobe \- fix check mode not being honored for persistent option \([https\://github\.com/ansible\-collections/community\.general/issues/9051](https\://github\.com/ansible\-collections/community\.general/issues/9051)\, [https\://github\.com/ansible\-collections/community\.general/pull/9052](https\://github\.com/ansible\-collections/community\.general/pull/9052)\)\. +* one\_host \- fix if statements for cases when ID\=0 \([https\://github\.com/ansible\-collections/community\.general/issues/1199](https\://github\.com/ansible\-collections/community\.general/issues/1199)\, [https\://github\.com/ansible\-collections/community\.general/pull/8907](https\://github\.com/ansible\-collections/community\.general/pull/8907)\)\. +* one\_image \- fix module failing due to a class method typo \([https\://github\.com/ansible\-collections/community\.general/pull/9056](https\://github\.com/ansible\-collections/community\.general/pull/9056)\)\. +* one\_image\_info \- fix module failing due to a class method typo \([https\://github\.com/ansible\-collections/community\.general/pull/9056](https\://github\.com/ansible\-collections/community\.general/pull/9056)\)\. +* one\_vnet \- fix module failing due to a variable typo \([https\://github\.com/ansible\-collections/community\.general/pull/9019](https\://github\.com/ansible\-collections/community\.general/pull/9019)\)\. +* redfish\_utils module utils \- fix issue with URI parsing to gracefully handling trailing slashes when extracting member identifiers \([https\://github\.com/ansible\-collections/community\.general/issues/9047](https\://github\.com/ansible\-collections/community\.general/issues/9047)\, [https\://github\.com/ansible\-collections/community\.general/pull/9057](https\://github\.com/ansible\-collections/community\.general/pull/9057)\)\. + + +## v9\.5\.0 + + +### Release Summary + +Regular bugfix and feature release\. + +Please note that this is the last feature release for community\.general 9\.x\.y\. +From now on\, new features will only go into community\.general 10\.x\.y\. + + +### Minor Changes + +* dig lookup plugin \- add port option to specify DNS server port \([https\://github\.com/ansible\-collections/community\.general/pull/8966](https\://github\.com/ansible\-collections/community\.general/pull/8966)\)\. +* flatpak \- improve the parsing of Flatpak application IDs based on official guidelines \([https\://github\.com/ansible\-collections/community\.general/pull/8909](https\://github\.com/ansible\-collections/community\.general/pull/8909)\)\. +* gio\_mime \- adjust code ahead of the old VardDict deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8855](https\://github\.com/ansible\-collections/community\.general/pull/8855)\)\. +* gitlab\_deploy\_key \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* gitlab\_group \- add many new parameters \([https\://github\.com/ansible\-collections/community\.general/pull/8908](https\://github\.com/ansible\-collections/community\.general/pull/8908)\)\. +* gitlab\_group \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* gitlab\_issue \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* gitlab\_merge\_request \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* gitlab\_runner \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* icinga2\_host \- replace loop with dict comprehension \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* jira \- adjust code ahead of the old VardDict deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8856](https\://github\.com/ansible\-collections/community\.general/pull/8856)\)\. +* keycloak\_client \- add client\-x509 choice to client\_authenticator\_type \([https\://github\.com/ansible\-collections/community\.general/pull/8973](https\://github\.com/ansible\-collections/community\.general/pull/8973)\)\. +* keycloak\_user\_federation \- add the user federation config parameter referral to the module arguments \([https\://github\.com/ansible\-collections/community\.general/pull/8954](https\://github\.com/ansible\-collections/community\.general/pull/8954)\)\. +* memset\_dns\_reload \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* memset\_memstore\_info \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* memset\_server\_info \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* memset\_zone \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* memset\_zone\_domain \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* memset\_zone\_record \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* nmcli \- add conn\_enable param to reload connection \([https\://github\.com/ansible\-collections/community\.general/issues/3752](https\://github\.com/ansible\-collections/community\.general/issues/3752)\, [https\://github\.com/ansible\-collections/community\.general/issues/8704](https\://github\.com/ansible\-collections/community\.general/issues/8704)\, [https\://github\.com/ansible\-collections/community\.general/pull/8897](https\://github\.com/ansible\-collections/community\.general/pull/8897)\)\. +* nmcli \- add state\=up and state\=down to enable/disable connections \([https\://github\.com/ansible\-collections/community\.general/issues/3752](https\://github\.com/ansible\-collections/community\.general/issues/3752)\, [https\://github\.com/ansible\-collections/community\.general/issues/8704](https\://github\.com/ansible\-collections/community\.general/issues/8704)\, [https\://github\.com/ansible\-collections/community\.general/issues/7152](https\://github\.com/ansible\-collections/community\.general/issues/7152)\, [https\://github\.com/ansible\-collections/community\.general/pull/8897](https\://github\.com/ansible\-collections/community\.general/pull/8897)\)\. +* nmcli \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* npm \- add force parameter to allow \-\-force \([https\://github\.com/ansible\-collections/community\.general/pull/8885](https\://github\.com/ansible\-collections/community\.general/pull/8885)\)\. +* one\_image \- add option persistent to manage image persistence \([https\://github\.com/ansible\-collections/community\.general/issues/3578](https\://github\.com/ansible\-collections/community\.general/issues/3578)\, [https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\. +* one\_image \- extend xsd scheme to make it return a lot more info about image \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\. +* one\_image \- refactor code to make it more similar to one\_template and one\_vnet \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\. +* one\_image\_info \- extend xsd scheme to make it return a lot more info about image \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\. +* one\_image\_info \- refactor code to make it more similar to one\_template and one\_vnet \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\. +* open\_iscsi \- allow login to a portal with multiple targets without specifying any of them \([https\://github\.com/ansible\-collections/community\.general/pull/8719](https\://github\.com/ansible\-collections/community\.general/pull/8719)\)\. +* opennebula\.py \- add VM id and VM host to inventory host data \([https\://github\.com/ansible\-collections/community\.general/pull/8532](https\://github\.com/ansible\-collections/community\.general/pull/8532)\)\. +* passwordstore lookup plugin \- add subkey creation/update support \([https\://github\.com/ansible\-collections/community\.general/pull/8952](https\://github\.com/ansible\-collections/community\.general/pull/8952)\)\. +* proxmox inventory plugin \- clean up authentication code \([https\://github\.com/ansible\-collections/community\.general/pull/8917](https\://github\.com/ansible\-collections/community\.general/pull/8917)\)\. +* redfish\_command \- add handling of the PasswordChangeRequired message from services in the UpdateUserPassword command to directly modify the user\'s password if the requested user is the one invoking the operation \([https\://github\.com/ansible\-collections/community\.general/issues/8652](https\://github\.com/ansible\-collections/community\.general/issues/8652)\, [https\://github\.com/ansible\-collections/community\.general/pull/8653](https\://github\.com/ansible\-collections/community\.general/pull/8653)\)\. +* redfish\_confg \- remove CapacityBytes from required paramaters of the CreateVolume command \([https\://github\.com/ansible\-collections/community\.general/pull/8956](https\://github\.com/ansible\-collections/community\.general/pull/8956)\)\. +* redfish\_config \- add parameter storage\_none\_volume\_deletion to CreateVolume command in order to control the automatic deletion of non\-RAID volumes \([https\://github\.com/ansible\-collections/community\.general/pull/8990](https\://github\.com/ansible\-collections/community\.general/pull/8990)\)\. +* redfish\_info \- adds RedfishURI and StorageId to Disk inventory \([https\://github\.com/ansible\-collections/community\.general/pull/8937](https\://github\.com/ansible\-collections/community\.general/pull/8937)\)\. +* scaleway\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. +* scaleway\_container\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. +* scaleway\_container\_namespace \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. +* scaleway\_container\_namespace\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. +* scaleway\_container\_registry \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. +* scaleway\_container\_registry\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. +* scaleway\_function \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. +* scaleway\_function\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. +* scaleway\_function\_namespace \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. +* scaleway\_function\_namespace\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. +* scaleway\_user\_data \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. +* udm\_dns\_record \- replace loop with dict\.update\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. + + +### Deprecated Features + +* hipchat \- the hipchat service has been discontinued and the self\-hosted variant has been End of Life since 2020\. The module is therefore deprecated and will be removed from community\.general 11\.0\.0 if nobody provides compelling reasons to still keep it \([https\://github\.com/ansible\-collections/community\.general/pull/8919](https\://github\.com/ansible\-collections/community\.general/pull/8919)\)\. + + +### Bugfixes + +* cloudflare\_dns \- fix changing Cloudflare SRV records \([https\://github\.com/ansible\-collections/community\.general/issues/8679](https\://github\.com/ansible\-collections/community\.general/issues/8679)\, [https\://github\.com/ansible\-collections/community\.general/pull/8948](https\://github\.com/ansible\-collections/community\.general/pull/8948)\)\. +* cmd\_runner module utils \- call to get\_best\_parsable\_locales\(\) was missing parameter \([https\://github\.com/ansible\-collections/community\.general/pull/8929](https\://github\.com/ansible\-collections/community\.general/pull/8929)\)\. +* dig lookup plugin \- fix using only the last nameserver specified \([https\://github\.com/ansible\-collections/community\.general/pull/8970](https\://github\.com/ansible\-collections/community\.general/pull/8970)\)\. +* django\_command \- option command is now split lexically before passed to underlying PythonRunner \([https\://github\.com/ansible\-collections/community\.general/pull/8944](https\://github\.com/ansible\-collections/community\.general/pull/8944)\)\. +* homectl \- the module now tries to use legacycrypt on Python 3\.13\+ \([https\://github\.com/ansible\-collections/community\.general/issues/4691](https\://github\.com/ansible\-collections/community\.general/issues/4691)\, [https\://github\.com/ansible\-collections/community\.general/pull/8987](https\://github\.com/ansible\-collections/community\.general/pull/8987)\)\. +* ini\_file \- pass absolute paths to module\.atomic\_move\(\) \([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\. +* ipa\_host \- add force\_create\, fix enabled and disabled states \([https\://github\.com/ansible\-collections/community\.general/issues/1094](https\://github\.com/ansible\-collections/community\.general/issues/1094)\, [https\://github\.com/ansible\-collections/community\.general/pull/8920](https\://github\.com/ansible\-collections/community\.general/pull/8920)\)\. +* ipa\_hostgroup \- fix enabled \`\` and \`\`disabled states \([https\://github\.com/ansible\-collections/community\.general/issues/8408](https\://github\.com/ansible\-collections/community\.general/issues/8408)\, [https\://github\.com/ansible\-collections/community\.general/pull/8900](https\://github\.com/ansible\-collections/community\.general/pull/8900)\)\. +* java\_keystore \- pass absolute paths to module\.atomic\_move\(\) \([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\. +* jenkins\_plugin \- pass absolute paths to module\.atomic\_move\(\) \([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\. +* kdeconfig \- pass absolute paths to module\.atomic\_move\(\) \([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\. +* keycloak\_realm \- fix change detection in check mode by sorting the lists in the realms beforehand \([https\://github\.com/ansible\-collections/community\.general/pull/8877](https\://github\.com/ansible\-collections/community\.general/pull/8877)\)\. +* keycloak\_user\_federation \- add module argument allowing users to configure the update mode for the parameter bindCredential \([https\://github\.com/ansible\-collections/community\.general/pull/8898](https\://github\.com/ansible\-collections/community\.general/pull/8898)\)\. +* keycloak\_user\_federation \- minimize change detection by setting krbPrincipalAttribute to \'\' in Keycloak responses if missing \([https\://github\.com/ansible\-collections/community\.general/pull/8785](https\://github\.com/ansible\-collections/community\.general/pull/8785)\)\. +* keycloak\_user\_federation \- remove lastSync parameter from Keycloak responses to minimize diff/changes \([https\://github\.com/ansible\-collections/community\.general/pull/8812](https\://github\.com/ansible\-collections/community\.general/pull/8812)\)\. +* keycloak\_userprofile \- fix empty response when fetching userprofile component by removing parent\=parent\_id filter \([https\://github\.com/ansible\-collections/community\.general/pull/8923](https\://github\.com/ansible\-collections/community\.general/pull/8923)\)\. +* keycloak\_userprofile \- improve diff by deserializing the fetched kc\.user\.profile\.config and serialize it only when sending back \([https\://github\.com/ansible\-collections/community\.general/pull/8940](https\://github\.com/ansible\-collections/community\.general/pull/8940)\)\. +* lxd\_container \- fix bug introduced in previous commit \([https\://github\.com/ansible\-collections/community\.general/pull/8895](https\://github\.com/ansible\-collections/community\.general/pull/8895)\, [https\://github\.com/ansible\-collections/community\.general/issues/8888](https\://github\.com/ansible\-collections/community\.general/issues/8888)\)\. +* one\_service \- fix service creation after it was deleted with unique parameter \([https\://github\.com/ansible\-collections/community\.general/issues/3137](https\://github\.com/ansible\-collections/community\.general/issues/3137)\, [https\://github\.com/ansible\-collections/community\.general/pull/8887](https\://github\.com/ansible\-collections/community\.general/pull/8887)\)\. +* pam\_limits \- pass absolute paths to module\.atomic\_move\(\) \([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\. +* python\_runner module utils \- parameter path\_prefix was being handled as string when it should be a list \([https\://github\.com/ansible\-collections/community\.general/pull/8944](https\://github\.com/ansible\-collections/community\.general/pull/8944)\)\. +* udm\_user \- the module now tries to use legacycrypt on Python 3\.13\+ \([https\://github\.com/ansible\-collections/community\.general/issues/4690](https\://github\.com/ansible\-collections/community\.general/issues/4690)\, [https\://github\.com/ansible\-collections/community\.general/pull/8987](https\://github\.com/ansible\-collections/community\.general/pull/8987)\)\. + + +### New Modules + +* community\.general\.ipa\_getkeytab \- Manage keytab file in FreeIPA\. + + +## v9\.4\.0 + + +### Release Summary + +Bugfix and feature release\. + + +### Minor Changes + +* MH module utils \- add parameter when to cause\_changes decorator \([https\://github\.com/ansible\-collections/community\.general/pull/8766](https\://github\.com/ansible\-collections/community\.general/pull/8766)\)\. +* MH module utils \- minor refactor in decorators \([https\://github\.com/ansible\-collections/community\.general/pull/8766](https\://github\.com/ansible\-collections/community\.general/pull/8766)\)\. +* alternatives \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* apache2\_mod\_proxy \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* apache2\_mod\_proxy \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* consul\_acl \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* copr \- Added includepkgs and excludepkgs parameters to limit the list of packages fetched or excluded from the repository\([https\://github\.com/ansible\-collections/community\.general/pull/8779](https\://github\.com/ansible\-collections/community\.general/pull/8779)\)\. +* credstash lookup plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* csv module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* deco MH module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* etcd3 \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* gio\_mime \- mute the old VarDict deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8776](https\://github\.com/ansible\-collections/community\.general/pull/8776)\)\. +* gitlab\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* gitlab\_project \- add option issues\_access\_level to enable/disable project issues \([https\://github\.com/ansible\-collections/community\.general/pull/8760](https\://github\.com/ansible\-collections/community\.general/pull/8760)\)\. +* gitlab\_project \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* gitlab\_project \- sorted parameters in order to avoid future merge conflicts \([https\://github\.com/ansible\-collections/community\.general/pull/8759](https\://github\.com/ansible\-collections/community\.general/pull/8759)\)\. +* hashids filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* hwc\_ecs\_instance \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* hwc\_evs\_disk \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* hwc\_vpc\_eip \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* hwc\_vpc\_peering\_connect \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* hwc\_vpc\_port \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* hwc\_vpc\_subnet \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* imc\_rest \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* ipa\_otptoken \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* jira \- mute the old VarDict deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8776](https\://github\.com/ansible\-collections/community\.general/pull/8776)\)\. +* jira \- replace deprecated params when using decorator cause\_changes \([https\://github\.com/ansible\-collections/community\.general/pull/8791](https\://github\.com/ansible\-collections/community\.general/pull/8791)\)\. +* keep\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* keycloak module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* keycloak\_client \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* keycloak\_clientscope \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* keycloak\_identity\_provider \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* keycloak\_user\_federation \- add module argument allowing users to optout of the removal of unspecified mappers\, for example to keep the keycloak default mappers \([https\://github\.com/ansible\-collections/community\.general/pull/8764](https\://github\.com/ansible\-collections/community\.general/pull/8764)\)\. +* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* linode \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* lxc\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* lxd\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* manageiq\_provider \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* ocapi\_utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* one\_service \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* one\_vm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* onepassword lookup plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* pids \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* pipx \- added new states install\_all\, uninject\, upgrade\_shared\, pin\, and unpin \([https\://github\.com/ansible\-collections/community\.general/pull/8809](https\://github\.com/ansible\-collections/community\.general/pull/8809)\)\. +* pipx \- added parameter global to module \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\. +* pipx \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* pipx\_info \- added parameter global to module \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\. +* pipx\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* pkg5\_publisher \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* proxmox \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* proxmox\_disk \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* proxmox\_kvm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* proxmox\_kvm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* redfish\_utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* redfish\_utils module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* redis cache plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* remove\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* replace\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* scaleway \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* scaleway module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* scaleway\_compute \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* scaleway\_ip \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* scaleway\_lb \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* scaleway\_security\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* scaleway\_security\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* scaleway\_user\_data \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* sensu\_silence \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* snmp\_facts \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* sorcery \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. +* ufw \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. +* unsafe plugin utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* vardict module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* vars MH module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. +* vmadm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. + + +### Deprecated Features + +* MH decorator cause\_changes module utils \- deprecate parameters on\_success and on\_failure \([https\://github\.com/ansible\-collections/community\.general/pull/8791](https\://github\.com/ansible\-collections/community\.general/pull/8791)\)\. +* pipx \- support for versions of the command line tool pipx older than 1\.7\.0 is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\. +* pipx\_info \- support for versions of the command line tool pipx older than 1\.7\.0 is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\. + + +### Bugfixes + +* gitlab\_group\_access\_token \- fix crash in check mode caused by attempted access to a newly created access token \([https\://github\.com/ansible\-collections/community\.general/pull/8796](https\://github\.com/ansible\-collections/community\.general/pull/8796)\)\. +* gitlab\_project \- fix container\_expiration\_policy not being applied when creating a new project \([https\://github\.com/ansible\-collections/community\.general/pull/8790](https\://github\.com/ansible\-collections/community\.general/pull/8790)\)\. +* gitlab\_project \- fix crash caused by old Gitlab projects not having a container\_expiration\_policy attribute \([https\://github\.com/ansible\-collections/community\.general/pull/8790](https\://github\.com/ansible\-collections/community\.general/pull/8790)\)\. +* gitlab\_project\_access\_token \- fix crash in check mode caused by attempted access to a newly created access token \([https\://github\.com/ansible\-collections/community\.general/pull/8796](https\://github\.com/ansible\-collections/community\.general/pull/8796)\)\. +* keycloak\_realm\_key \- fix invalid usage of parent\_id \([https\://github\.com/ansible\-collections/community\.general/issues/7850](https\://github\.com/ansible\-collections/community\.general/issues/7850)\, [https\://github\.com/ansible\-collections/community\.general/pull/8823](https\://github\.com/ansible\-collections/community\.general/pull/8823)\)\. +* keycloak\_user\_federation \- fix key error when removing mappers during an update and new mappers are specified in the module args \([https\://github\.com/ansible\-collections/community\.general/pull/8762](https\://github\.com/ansible\-collections/community\.general/pull/8762)\)\. +* keycloak\_user\_federation \- fix the UnboundLocalError that occurs when an ID is provided for a user federation mapper \([https\://github\.com/ansible\-collections/community\.general/pull/8831](https\://github\.com/ansible\-collections/community\.general/pull/8831)\)\. +* keycloak\_user\_federation \- sort desired and after mapper list by name \(analog to before mapper list\) to minimize diff and make change detection more accurate \([https\://github\.com/ansible\-collections/community\.general/pull/8761](https\://github\.com/ansible\-collections/community\.general/pull/8761)\)\. +* proxmox inventory plugin \- fixed a possible error on concatenating responses from proxmox\. In case an API call unexpectedly returned an empty result\, the inventory failed with a fatal error\. Added check for empty response \([https\://github\.com/ansible\-collections/community\.general/issues/8798](https\://github\.com/ansible\-collections/community\.general/issues/8798)\, [https\://github\.com/ansible\-collections/community\.general/pull/8794](https\://github\.com/ansible\-collections/community\.general/pull/8794)\)\. + + +### New Modules + +* community\.general\.keycloak\_userprofile \- Allows managing Keycloak User Profiles\. +* community\.general\.one\_vnet \- Manages OpenNebula virtual networks\. + + +## v9\.3\.0 + + +### Release Summary + +Regular bugfix and feature release\. + + +### Minor Changes + +* cgroup\_memory\_recap\, hipchat\, jabber\, log\_plays\, loganalytics\, logentries\, logstash\, slack\, splunk\, sumologic\, syslog\_json callback plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8628](https\://github\.com/ansible\-collections/community\.general/pull/8628)\)\. +* chef\_databag\, consul\_kv\, cyberarkpassword\, dsv\, etcd\, filetree\, hiera\, onepassword\, onepassword\_doc\, onepassword\_raw\, passwordstore\, redis\, shelvefile\, tss lookup plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8626](https\://github\.com/ansible\-collections/community\.general/pull/8626)\)\. +* chroot\, funcd\, incus\, iocage\, jail\, lxc\, lxd\, qubes\, zone connection plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8627](https\://github\.com/ansible\-collections/community\.general/pull/8627)\)\. +* cobbler\, linode\, lxd\, nmap\, online\, scaleway\, stackpath\_compute\, virtualbox inventory plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8625](https\://github\.com/ansible\-collections/community\.general/pull/8625)\)\. +* doas\, dzdo\, ksu\, machinectl\, pbrun\, pfexec\, pmrun\, sesu\, sudosu become plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8623](https\://github\.com/ansible\-collections/community\.general/pull/8623)\)\. +* gconftool2 \- make use of ModuleHelper features to simplify code \([https\://github\.com/ansible\-collections/community\.general/pull/8711](https\://github\.com/ansible\-collections/community\.general/pull/8711)\)\. +* gitlab\_project \- add option container\_expiration\_policy to schedule container registry cleanup \([https\://github\.com/ansible\-collections/community\.general/pull/8674](https\://github\.com/ansible\-collections/community\.general/pull/8674)\)\. +* gitlab\_project \- add option model\_registry\_access\_level to disable model registry \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\. +* gitlab\_project \- add option pages\_access\_level to disable project pages \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\. +* gitlab\_project \- add option repository\_access\_level to disable project repository \([https\://github\.com/ansible\-collections/community\.general/pull/8674](https\://github\.com/ansible\-collections/community\.general/pull/8674)\)\. +* gitlab\_project \- add option service\_desk\_enabled to disable service desk \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\. +* locale\_gen \- add support for multiple locales \([https\://github\.com/ansible\-collections/community\.general/issues/8677](https\://github\.com/ansible\-collections/community\.general/issues/8677)\, [https\://github\.com/ansible\-collections/community\.general/pull/8682](https\://github\.com/ansible\-collections/community\.general/pull/8682)\)\. +* memcached\, pickle\, redis\, yaml cache plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8624](https\://github\.com/ansible\-collections/community\.general/pull/8624)\)\. +* opentelemetry callback plugin \- fix default value for store\_spans\_in\_file causing traces to be produced to a file named None \([https\://github\.com/ansible\-collections/community\.general/issues/8566](https\://github\.com/ansible\-collections/community\.general/issues/8566)\, [https\://github\.com/ansible\-collections/community\.general/pull/8741](https\://github\.com/ansible\-collections/community\.general/pull/8741)\)\. +* passwordstore lookup plugin \- add the current user to the lockfile file name to address issues on multi\-user systems \([https\://github\.com/ansible\-collections/community\.general/pull/8689](https\://github\.com/ansible\-collections/community\.general/pull/8689)\)\. +* pipx \- add parameter suffix to module \([https\://github\.com/ansible\-collections/community\.general/pull/8675](https\://github\.com/ansible\-collections/community\.general/pull/8675)\, [https\://github\.com/ansible\-collections/community\.general/issues/8656](https\://github\.com/ansible\-collections/community\.general/issues/8656)\)\. +* pkgng \- add option use\_globs \(default true\) to optionally disable glob patterns \([https\://github\.com/ansible\-collections/community\.general/issues/8632](https\://github\.com/ansible\-collections/community\.general/issues/8632)\, [https\://github\.com/ansible\-collections/community\.general/pull/8633](https\://github\.com/ansible\-collections/community\.general/pull/8633)\)\. +* proxmox inventory plugin \- add new fact for LXC interface details \([https\://github\.com/ansible\-collections/community\.general/pull/8713](https\://github\.com/ansible\-collections/community\.general/pull/8713)\)\. +* redis\, redis\_info \- add client\_cert and client\_key options to specify path to certificate for Redis authentication \([https\://github\.com/ansible\-collections/community\.general/pull/8654](https\://github\.com/ansible\-collections/community\.general/pull/8654)\)\. + + +### Bugfixes + +* gitlab\_runner \- fix paused parameter being ignored \([https\://github\.com/ansible\-collections/community\.general/pull/8648](https\://github\.com/ansible\-collections/community\.general/pull/8648)\)\. +* homebrew\_cask \- fix upgrade\_all returns changed when nothing upgraded \([https\://github\.com/ansible\-collections/community\.general/issues/8707](https\://github\.com/ansible\-collections/community\.general/issues/8707)\, [https\://github\.com/ansible\-collections/community\.general/pull/8708](https\://github\.com/ansible\-collections/community\.general/pull/8708)\)\. +* keycloak\_user\_federation \- get cleartext IDP clientSecret from full realm info to detect changes to it \([https\://github\.com/ansible\-collections/community\.general/issues/8294](https\://github\.com/ansible\-collections/community\.general/issues/8294)\, [https\://github\.com/ansible\-collections/community\.general/pull/8735](https\://github\.com/ansible\-collections/community\.general/pull/8735)\)\. +* keycloak\_user\_federation \- remove existing user federation mappers if they are not present in the federation configuration and will not be updated \([https\://github\.com/ansible\-collections/community\.general/issues/7169](https\://github\.com/ansible\-collections/community\.general/issues/7169)\, [https\://github\.com/ansible\-collections/community\.general/pull/8695](https\://github\.com/ansible\-collections/community\.general/pull/8695)\)\. +* proxmox \- fixed an issue where the new volume handling incorrectly converted null values into \"None\" strings \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\. +* proxmox \- fixed an issue where volume strings where overwritten instead of appended to in the new build\_volume\(\) method \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\. +* proxmox \- removed the forced conversion of non\-string values to strings to be consistent with the module documentation \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\. + + +### New Modules + +* community\.general\.bootc\_manage \- Bootc Switch and Upgrade\. +* community\.general\.homebrew\_services \- Services manager for Homebrew\. +* community\.general\.keycloak\_realm\_keys\_metadata\_info \- Allows obtaining Keycloak realm keys metadata via Keycloak API\. + + +## v9\.2\.0 + + +### Release Summary + +Regular bugfix and feature release\. + + +### Minor Changes + +* CmdRunner module utils \- the parameter force\_lang now supports the special value auto which will automatically try and determine the best parsable locale in the system \([https\://github\.com/ansible\-collections/community\.general/pull/8517](https\://github\.com/ansible\-collections/community\.general/pull/8517)\)\. +* proxmox \- add disk\_volume and mount\_volumes keys for better readability \([https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\. +* proxmox \- translate the old disk and mounts keys to the new handling internally \([https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\. +* proxmox\_template \- small refactor in logic for determining whether a template exists or not \([https\://github\.com/ansible\-collections/community\.general/pull/8516](https\://github\.com/ansible\-collections/community\.general/pull/8516)\)\. +* redfish\_\* modules \- adds ciphers option for custom cipher selection \([https\://github\.com/ansible\-collections/community\.general/pull/8533](https\://github\.com/ansible\-collections/community\.general/pull/8533)\)\. +* sudosu become plugin \- added an option \(alt\_method\) to enhance compatibility with more versions of su \([https\://github\.com/ansible\-collections/community\.general/pull/8214](https\://github\.com/ansible\-collections/community\.general/pull/8214)\)\. +* virtualbox inventory plugin \- expose a new parameter enable\_advanced\_group\_parsing to change how the VirtualBox dynamic inventory parses VM groups \([https\://github\.com/ansible\-collections/community\.general/issues/8508](https\://github\.com/ansible\-collections/community\.general/issues/8508)\, [https\://github\.com/ansible\-collections/community\.general/pull/8510](https\://github\.com/ansible\-collections/community\.general/pull/8510)\)\. +* wdc\_redfish\_command \- minor change to handle upgrade file for Redfish WD platforms \([https\://github\.com/ansible\-collections/community\.general/pull/8444](https\://github\.com/ansible\-collections/community\.general/pull/8444)\)\. + + +### Bugfixes + +* bitwarden lookup plugin \- fix KeyError in search\_field \([https\://github\.com/ansible\-collections/community\.general/issues/8549](https\://github\.com/ansible\-collections/community\.general/issues/8549)\, [https\://github\.com/ansible\-collections/community\.general/pull/8557](https\://github\.com/ansible\-collections/community\.general/pull/8557)\)\. +* keycloak\_clientscope \- remove IDs from clientscope and its protocol mappers on comparison for changed check \([https\://github\.com/ansible\-collections/community\.general/pull/8545](https\://github\.com/ansible\-collections/community\.general/pull/8545)\)\. +* nsupdate \- fix \'index out of range\' error when changing NS records by falling back to authority section of the response \([https\://github\.com/ansible\-collections/community\.general/issues/8612](https\://github\.com/ansible\-collections/community\.general/issues/8612)\, [https\://github\.com/ansible\-collections/community\.general/pull/8614](https\://github\.com/ansible\-collections/community\.general/pull/8614)\)\. +* proxmox \- fix idempotency on creation of mount volumes using Proxmox\' special \\:\ syntax \([https\://github\.com/ansible\-collections/community\.general/issues/8407](https\://github\.com/ansible\-collections/community\.general/issues/8407)\, [https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\. +* redfish\_utils module utils \- do not fail when language is not exactly \"en\" \([https\://github\.com/ansible\-collections/community\.general/pull/8613](https\://github\.com/ansible\-collections/community\.general/pull/8613)\)\. + + +### New Plugins + + +#### Filter + +* community\.general\.reveal\_ansible\_type \- Return input type\. + + +#### Test + +* community\.general\.ansible\_type \- Validate input type\. + + +## v9\.1\.0 + + +### Release Summary + +Regular feature and bugfix release\. + + +### Minor Changes + +* CmdRunner module util \- argument formats can be specified as plain functions without calling cmd\_runner\_fmt\.as\_func\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8479](https\://github\.com/ansible\-collections/community\.general/pull/8479)\)\. +* ansible\_galaxy\_install \- add upgrade feature \([https\://github\.com/ansible\-collections/community\.general/pull/8431](https\://github\.com/ansible\-collections/community\.general/pull/8431)\, [https\://github\.com/ansible\-collections/community\.general/issues/8351](https\://github\.com/ansible\-collections/community\.general/issues/8351)\)\. +* cargo \- add option directory\, which allows source directory to be specified \([https\://github\.com/ansible\-collections/community\.general/pull/8480](https\://github\.com/ansible\-collections/community\.general/pull/8480)\)\. +* cmd\_runner module utils \- add decorator cmd\_runner\_fmt\.stack \([https\://github\.com/ansible\-collections/community\.general/pull/8415](https\://github\.com/ansible\-collections/community\.general/pull/8415)\)\. +* cmd\_runner\_fmt module utils \- simplify implementation of cmd\_runner\_fmt\.as\_bool\_not\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8512](https\://github\.com/ansible\-collections/community\.general/pull/8512)\)\. +* ipa\_dnsrecord \- adds SSHFP record type for managing SSH fingerprints in FreeIPA DNS \([https\://github\.com/ansible\-collections/community\.general/pull/8404](https\://github\.com/ansible\-collections/community\.general/pull/8404)\)\. +* keycloak\_client \- assign auth flow by name \([https\://github\.com/ansible\-collections/community\.general/pull/8428](https\://github\.com/ansible\-collections/community\.general/pull/8428)\)\. +* openbsd\_pkg \- adds diff support to show changes in installed package list\. This does not yet work for check mode \([https\://github\.com/ansible\-collections/community\.general/pull/8402](https\://github\.com/ansible\-collections/community\.general/pull/8402)\)\. +* proxmox \- allow specification of the API port when using proxmox\_\* \([https\://github\.com/ansible\-collections/community\.general/issues/8440](https\://github\.com/ansible\-collections/community\.general/issues/8440)\, [https\://github\.com/ansible\-collections/community\.general/pull/8441](https\://github\.com/ansible\-collections/community\.general/pull/8441)\)\. +* proxmox\_vm\_info \- add network option to retrieve current network information \([https\://github\.com/ansible\-collections/community\.general/pull/8471](https\://github\.com/ansible\-collections/community\.general/pull/8471)\)\. +* redfish\_command \- add wait and wait\_timeout options to allow a user to block a command until a service is accessible after performing the requested command \([https\://github\.com/ansible\-collections/community\.general/issues/8051](https\://github\.com/ansible\-collections/community\.general/issues/8051)\, [https\://github\.com/ansible\-collections/community\.general/pull/8434](https\://github\.com/ansible\-collections/community\.general/pull/8434)\)\. +* redfish\_info \- add command CheckAvailability to check if a service is accessible \([https\://github\.com/ansible\-collections/community\.general/issues/8051](https\://github\.com/ansible\-collections/community\.general/issues/8051)\, [https\://github\.com/ansible\-collections/community\.general/pull/8434](https\://github\.com/ansible\-collections/community\.general/pull/8434)\)\. +* redis\_info \- adds support for getting cluster info \([https\://github\.com/ansible\-collections/community\.general/pull/8464](https\://github\.com/ansible\-collections/community\.general/pull/8464)\)\. + + +### Deprecated Features + +* CmdRunner module util \- setting the value of the ignore\_none parameter within a CmdRunner context is deprecated and that feature should be removed in community\.general 12\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8479](https\://github\.com/ansible\-collections/community\.general/pull/8479)\)\. +* git\_config \- the list\_all option has been deprecated and will be removed in community\.general 11\.0\.0\. Use the community\.general\.git\_config\_info module instead \([https\://github\.com/ansible\-collections/community\.general/pull/8453](https\://github\.com/ansible\-collections/community\.general/pull/8453)\)\. +* git\_config \- using state\=present without providing value is deprecated and will be disallowed in community\.general 11\.0\.0\. Use the community\.general\.git\_config\_info module instead to read a value \([https\://github\.com/ansible\-collections/community\.general/pull/8453](https\://github\.com/ansible\-collections/community\.general/pull/8453)\)\. + + +### Bugfixes + +* git\_config \- fix behavior of state\=absent if value is present \([https\://github\.com/ansible\-collections/community\.general/issues/8436](https\://github\.com/ansible\-collections/community\.general/issues/8436)\, [https\://github\.com/ansible\-collections/community\.general/pull/8452](https\://github\.com/ansible\-collections/community\.general/pull/8452)\)\. +* keycloak\_realm \- add normalizations for attributes and protocol\_mappers \([https\://github\.com/ansible\-collections/community\.general/pull/8496](https\://github\.com/ansible\-collections/community\.general/pull/8496)\)\. +* launched \- correctly report changed status in check mode \([https\://github\.com/ansible\-collections/community\.general/pull/8406](https\://github\.com/ansible\-collections/community\.general/pull/8406)\)\. +* opennebula inventory plugin \- fix invalid reference to IP when inventory runs against NICs with no IPv4 address \([https\://github\.com/ansible\-collections/community\.general/pull/8489](https\://github\.com/ansible\-collections/community\.general/pull/8489)\)\. +* opentelemetry callback \- do not save the JSON response when using the ansible\.builtin\.uri module \([https\://github\.com/ansible\-collections/community\.general/pull/8430](https\://github\.com/ansible\-collections/community\.general/pull/8430)\)\. +* opentelemetry callback \- do not save the content response when using the ansible\.builtin\.slurp module \([https\://github\.com/ansible\-collections/community\.general/pull/8430](https\://github\.com/ansible\-collections/community\.general/pull/8430)\)\. +* paman \- do not fail if an empty list of packages has been provided and there is nothing to do \([https\://github\.com/ansible\-collections/community\.general/pull/8514](https\://github\.com/ansible\-collections/community\.general/pull/8514)\)\. + + +### Known Issues + +* homectl \- the module does not work under Python 3\.13 or newer\, since it relies on the removed crypt standard library module \([https\://github\.com/ansible\-collections/community\.general/issues/4691](https\://github\.com/ansible\-collections/community\.general/issues/4691)\, [https\://github\.com/ansible\-collections/community\.general/pull/8497](https\://github\.com/ansible\-collections/community\.general/pull/8497)\)\. +* udm\_user \- the module does not work under Python 3\.13 or newer\, since it relies on the removed crypt standard library module \([https\://github\.com/ansible\-collections/community\.general/issues/4690](https\://github\.com/ansible\-collections/community\.general/issues/4690)\, [https\://github\.com/ansible\-collections/community\.general/pull/8497](https\://github\.com/ansible\-collections/community\.general/pull/8497)\)\. + + +### New Plugins + + +#### Filter + +* community\.general\.keep\_keys \- Keep specific keys from dictionaries in a list\. +* community\.general\.remove\_keys \- Remove specific keys from dictionaries in a list\. +* community\.general\.replace\_keys \- Replace specific keys in a list of dictionaries\. + + +### New Modules + +* community\.general\.consul\_agent\_check \- Add\, modify\, and delete checks within a consul cluster\. +* community\.general\.consul\_agent\_service \- Add\, modify and delete services within a consul cluster\. +* community\.general\.django\_check \- Wrapper for C\(django\-admin check\)\. +* community\.general\.django\_createcachetable \- Wrapper for C\(django\-admin createcachetable\)\. + + +## v9\.0\.1 + + +### Release Summary + +Bugfix release for inclusion in Ansible 10\.0\.0rc1\. + + +### Minor Changes + +* ansible\_galaxy\_install \- minor refactor in the module \([https\://github\.com/ansible\-collections/community\.general/pull/8413](https\://github\.com/ansible\-collections/community\.general/pull/8413)\)\. + + +### Bugfixes + +* cpanm \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. +* django module utils \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. +* gconftool2\_info \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. +* homebrew \- do not fail when brew prints warnings \([https\://github\.com/ansible\-collections/community\.general/pull/8406](https\://github\.com/ansible\-collections/community\.general/pull/8406)\, [https\://github\.com/ansible\-collections/community\.general/issues/7044](https\://github\.com/ansible\-collections/community\.general/issues/7044)\)\. +* hponcfg \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. +* kernel\_blacklist \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. +* keycloak\_client \- fix TypeError when sanitizing the saml\.signing\.private\.key attribute in the module\'s diff or state output\. The sanitize\_cr function expected a dict where in some cases a list might occur \([https\://github\.com/ansible\-collections/community\.general/pull/8403](https\://github\.com/ansible\-collections/community\.general/pull/8403)\)\. +* locale\_gen \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. +* mksysb \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. +* pipx\_info \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. +* snap \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. +* snap\_alias \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. + + +## v9\.0\.0 + + +### Release Summary + +This is release 9\.0\.0 of community\.general\, released on 2024\-05\-20\. + + +### Minor Changes + +* PythonRunner module utils \- specialisation of CmdRunner to execute Python scripts \([https\://github\.com/ansible\-collections/community\.general/pull/8289](https\://github\.com/ansible\-collections/community\.general/pull/8289)\)\. +* Use offset\-aware datetime\.datetime objects \(with timezone UTC\) instead of offset\-naive UTC timestamps\, which are deprecated in Python 3\.12 \([https\://github\.com/ansible\-collections/community\.general/pull/8222](https\://github\.com/ansible\-collections/community\.general/pull/8222)\)\. +* aix\_lvol \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* apt\_rpm \- add new states latest and present\_not\_latest\. The value latest is equivalent to the current behavior of present\, which will upgrade a package if a newer version exists\. present\_not\_latest does what most users would expect present to do\: it does not upgrade if the package is already installed\. The current behavior of present will be deprecated in a later version\, and eventually changed to that of present\_not\_latest \([https\://github\.com/ansible\-collections/community\.general/issues/8217](https\://github\.com/ansible\-collections/community\.general/issues/8217)\, [https\://github\.com/ansible\-collections/community\.general/pull/8247](https\://github\.com/ansible\-collections/community\.general/pull/8247)\)\. +* apt\_rpm \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* bitwarden lookup plugin \- add bw\_session option\, to pass session key instead of reading from env \([https\://github\.com/ansible\-collections/community\.general/pull/7994](https\://github\.com/ansible\-collections/community\.general/pull/7994)\)\. +* bitwarden lookup plugin \- add support to filter by organization ID \([https\://github\.com/ansible\-collections/community\.general/pull/8188](https\://github\.com/ansible\-collections/community\.general/pull/8188)\)\. +* bitwarden lookup plugin \- allows to fetch all records of a given collection ID\, by allowing to pass an empty value for search\_value when collection\_id is provided \([https\://github\.com/ansible\-collections/community\.general/pull/8013](https\://github\.com/ansible\-collections/community\.general/pull/8013)\)\. +* bitwarden lookup plugin \- when looking for items using an item ID\, the item is now accessed directly with bw get item instead of searching through all items\. This doubles the lookup speed \([https\://github\.com/ansible\-collections/community\.general/pull/7468](https\://github\.com/ansible\-collections/community\.general/pull/7468)\)\. +* btrfs\_subvolume \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* cmd\_runner module\_utils \- add validation for minimum and maximum length in the value passed to cmd\_runner\_fmt\.as\_list\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8288](https\://github\.com/ansible\-collections/community\.general/pull/8288)\)\. +* consul\_auth\_method\, consul\_binding\_rule\, consul\_policy\, consul\_role\, consul\_session\, consul\_token \- added action group community\.general\.consul \([https\://github\.com/ansible\-collections/community\.general/pull/7897](https\://github\.com/ansible\-collections/community\.general/pull/7897)\)\. +* consul\_policy \- added support for diff and check mode \([https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\. +* consul\_policy\, consul\_role\, consul\_session \- removed dependency on requests and factored out common parts \([https\://github\.com/ansible\-collections/community\.general/pull/7826](https\://github\.com/ansible\-collections/community\.general/pull/7826)\, [https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\. +* consul\_role \- node\_identities now expects a node\_name option to match the Consul API\, the old name is still supported as alias \([https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\. +* consul\_role \- service\_identities now expects a service\_name option to match the Consul API\, the old name is still supported as alias \([https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\. +* consul\_role \- added support for diff mode \([https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\. +* consul\_role \- added support for templated policies \([https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\. +* elastic callback plugin \- close elastic client to not leak resources \([https\://github\.com/ansible\-collections/community\.general/pull/7517](https\://github\.com/ansible\-collections/community\.general/pull/7517)\)\. +* filesystem \- add bcachefs support \([https\://github\.com/ansible\-collections/community\.general/pull/8126](https\://github\.com/ansible\-collections/community\.general/pull/8126)\)\. +* gandi\_livedns \- adds support for personal access tokens \([https\://github\.com/ansible\-collections/community\.general/issues/7639](https\://github\.com/ansible\-collections/community\.general/issues/7639)\, [https\://github\.com/ansible\-collections/community\.general/pull/8337](https\://github\.com/ansible\-collections/community\.general/pull/8337)\)\. +* gconftool2 \- use ModuleHelper with VarDict \([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\. +* git\_config \- allow multiple git configs for the same name with the new add\_mode option \([https\://github\.com/ansible\-collections/community\.general/pull/7260](https\://github\.com/ansible\-collections/community\.general/pull/7260)\)\. +* git\_config \- the after and before fields in the diff of the return value can be a list instead of a string in case more configs with the same key are affected \([https\://github\.com/ansible\-collections/community\.general/pull/7260](https\://github\.com/ansible\-collections/community\.general/pull/7260)\)\. +* git\_config \- when a value is unset\, all configs with the same key are unset \([https\://github\.com/ansible\-collections/community\.general/pull/7260](https\://github\.com/ansible\-collections/community\.general/pull/7260)\)\. +* gitlab modules \- add ca\_path option \([https\://github\.com/ansible\-collections/community\.general/pull/7472](https\://github\.com/ansible\-collections/community\.general/pull/7472)\)\. +* gitlab modules \- remove duplicate gitlab package check \([https\://github\.com/ansible\-collections/community\.general/pull/7486](https\://github\.com/ansible\-collections/community\.general/pull/7486)\)\. +* gitlab\_deploy\_key\, gitlab\_group\_members\, gitlab\_group\_variable\, gitlab\_hook\, gitlab\_instance\_variable\, gitlab\_project\_badge\, gitlab\_project\_variable\, gitlab\_user \- improve API pagination and compatibility with different versions of python\-gitlab \([https\://github\.com/ansible\-collections/community\.general/pull/7790](https\://github\.com/ansible\-collections/community\.general/pull/7790)\)\. +* gitlab\_hook \- adds releases\_events parameter for supporting Releases events triggers on GitLab hooks \([https\://github\.com/ansible\-collections/community\.general/pull/7956](https\://github\.com/ansible\-collections/community\.general/pull/7956)\)\. +* gitlab\_runner \- add support for new runner creation workflow \([https\://github\.com/ansible\-collections/community\.general/pull/7199](https\://github\.com/ansible\-collections/community\.general/pull/7199)\)\. +* homebrew \- adds force\_formula parameter to disambiguate a formula from a cask of the same name \([https\://github\.com/ansible\-collections/community\.general/issues/8274](https\://github\.com/ansible\-collections/community\.general/issues/8274)\)\. +* homebrew\, homebrew\_cask \- refactor common argument validation logic into a dedicated homebrew module utils \([https\://github\.com/ansible\-collections/community\.general/issues/8323](https\://github\.com/ansible\-collections/community\.general/issues/8323)\, [https\://github\.com/ansible\-collections/community\.general/pull/8324](https\://github\.com/ansible\-collections/community\.general/pull/8324)\)\. +* icinga2 inventory plugin \- add Jinja2 templating support to url\, user\, and password paramenters \([https\://github\.com/ansible\-collections/community\.general/issues/7074](https\://github\.com/ansible\-collections/community\.general/issues/7074)\, [https\://github\.com/ansible\-collections/community\.general/pull/7996](https\://github\.com/ansible\-collections/community\.general/pull/7996)\)\. +* icinga2 inventory plugin \- adds new parameter group\_by\_hostgroups in order to make grouping by Icinga2 hostgroups optional \([https\://github\.com/ansible\-collections/community\.general/pull/7998](https\://github\.com/ansible\-collections/community\.general/pull/7998)\)\. +* ini\_file \- add an optional parameter section\_has\_values\. If the target ini file contains more than one section\, use section\_has\_values to specify which one should be updated \([https\://github\.com/ansible\-collections/community\.general/pull/7505](https\://github\.com/ansible\-collections/community\.general/pull/7505)\)\. +* ini\_file \- support optional spaces between section names and their surrounding brackets \([https\://github\.com/ansible\-collections/community\.general/pull/8075](https\://github\.com/ansible\-collections/community\.general/pull/8075)\)\. +* installp \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* ipa\_config \- adds passkey choice to ipauserauthtype parameter\'s choices \([https\://github\.com/ansible\-collections/community\.general/pull/7588](https\://github\.com/ansible\-collections/community\.general/pull/7588)\)\. +* ipa\_dnsrecord \- adds ability to manage NS record types \([https\://github\.com/ansible\-collections/community\.general/pull/7737](https\://github\.com/ansible\-collections/community\.general/pull/7737)\)\. +* ipa\_pwpolicy \- refactor module and exchange a sequence if statements with a for loop \([https\://github\.com/ansible\-collections/community\.general/pull/7723](https\://github\.com/ansible\-collections/community\.general/pull/7723)\)\. +* ipa\_pwpolicy \- update module to support maxrepeat\, maxsequence\, dictcheck\, usercheck\, gracelimit parameters in FreeIPA password policies \([https\://github\.com/ansible\-collections/community\.general/pull/7723](https\://github\.com/ansible\-collections/community\.general/pull/7723)\)\. +* ipa\_sudorule \- adds options to include denied commands or command groups \([https\://github\.com/ansible\-collections/community\.general/pull/7415](https\://github\.com/ansible\-collections/community\.general/pull/7415)\)\. +* ipa\_user \- adds idp and passkey choice to ipauserauthtype parameter\'s choices \([https\://github\.com/ansible\-collections/community\.general/pull/7589](https\://github\.com/ansible\-collections/community\.general/pull/7589)\)\. +* irc \- add validate\_certs option\, and rename use\_ssl to use\_tls\, while keeping use\_ssl as an alias\. The default value for validate\_certs is false for backwards compatibility\. We recommend to every user of this module to explicitly set use\_tls\=true and validate\_certs\=true\` whenever possible\, especially when communicating to IRC servers over the internet \([https\://github\.com/ansible\-collections/community\.general/pull/7550](https\://github\.com/ansible\-collections/community\.general/pull/7550)\)\. +* java\_cert \- add cert\_content argument \([https\://github\.com/ansible\-collections/community\.general/pull/8153](https\://github\.com/ansible\-collections/community\.general/pull/8153)\)\. +* java\_cert \- enable owner\, group\, mode\, and other generic file arguments \([https\://github\.com/ansible\-collections/community\.general/pull/8116](https\://github\.com/ansible\-collections/community\.general/pull/8116)\)\. +* kernel\_blacklist \- use ModuleHelper with VarDict \([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\. +* keycloak module utils \- expose error message from Keycloak server for HTTP errors in some specific situations \([https\://github\.com/ansible\-collections/community\.general/pull/7645](https\://github\.com/ansible\-collections/community\.general/pull/7645)\)\. +* keycloak\_client\, keycloak\_clientscope\, keycloak\_clienttemplate \- added docker\-v2 protocol support\, enhancing alignment with Keycloak\'s protocol options \([https\://github\.com/ansible\-collections/community\.general/issues/8215](https\://github\.com/ansible\-collections/community\.general/issues/8215)\, [https\://github\.com/ansible\-collections/community\.general/pull/8216](https\://github\.com/ansible\-collections/community\.general/pull/8216)\)\. +* keycloak\_realm\_key \- the config\.algorithm option now supports 8 additional key algorithms \([https\://github\.com/ansible\-collections/community\.general/pull/7698](https\://github\.com/ansible\-collections/community\.general/pull/7698)\)\. +* keycloak\_realm\_key \- the config\.certificate option value is no longer defined with no\_log\=True \([https\://github\.com/ansible\-collections/community\.general/pull/7698](https\://github\.com/ansible\-collections/community\.general/pull/7698)\)\. +* keycloak\_realm\_key \- the provider\_id option now supports RSA encryption key usage \(value rsa\-enc\) \([https\://github\.com/ansible\-collections/community\.general/pull/7698](https\://github\.com/ansible\-collections/community\.general/pull/7698)\)\. +* keycloak\_user\_federation \- add option for krbPrincipalAttribute \([https\://github\.com/ansible\-collections/community\.general/pull/7538](https\://github\.com/ansible\-collections/community\.general/pull/7538)\)\. +* keycloak\_user\_federation \- allow custom user storage providers to be set through provider\_id \([https\://github\.com/ansible\-collections/community\.general/pull/7789](https\://github\.com/ansible\-collections/community\.general/pull/7789)\)\. +* ldap\_attrs \- module now supports diff mode\, showing which attributes are changed within an operation \([https\://github\.com/ansible\-collections/community\.general/pull/8073](https\://github\.com/ansible\-collections/community\.general/pull/8073)\)\. +* lvg \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* lvol \- change pvs argument type to list of strings \([https\://github\.com/ansible\-collections/community\.general/pull/7676](https\://github\.com/ansible\-collections/community\.general/pull/7676)\, [https\://github\.com/ansible\-collections/community\.general/issues/7504](https\://github\.com/ansible\-collections/community\.general/issues/7504)\)\. +* lvol \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* lxd connection plugin \- tighten the detection logic for lxd Instance not found errors\, to avoid false detection on unrelated errors such as /usr/bin/python3\: not found \([https\://github\.com/ansible\-collections/community\.general/pull/7521](https\://github\.com/ansible\-collections/community\.general/pull/7521)\)\. +* lxd\_container \- uses /1\.0/instances API endpoint\, if available\. Falls back to /1\.0/containers or /1\.0/virtual\-machines\. Fixes issue when using Incus or LXD 5\.19 due to migrating to /1\.0/instances endpoint \([https\://github\.com/ansible\-collections/community\.general/pull/7980](https\://github\.com/ansible\-collections/community\.general/pull/7980)\)\. +* macports \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* mail \- add Message\-ID header\; which is required by some mail servers \([https\://github\.com/ansible\-collections/community\.general/pull/7740](https\://github\.com/ansible\-collections/community\.general/pull/7740)\)\. +* mail module\, mail callback plugin \- allow to configure the domain name of the Message\-ID header with a new message\_id\_domain option \([https\://github\.com/ansible\-collections/community\.general/pull/7765](https\://github\.com/ansible\-collections/community\.general/pull/7765)\)\. +* mssql\_script \- adds transactional \(rollback/commit\) support via optional boolean param transaction \([https\://github\.com/ansible\-collections/community\.general/pull/7976](https\://github\.com/ansible\-collections/community\.general/pull/7976)\)\. +* netcup\_dns \- adds support for record types OPENPGPKEY\, SMIMEA\, and SSHFP \([https\://github\.com/ansible\-collections/community\.general/pull/7489](https\://github\.com/ansible\-collections/community\.general/pull/7489)\)\. +* nmcli \- add support for new connection type loopback \([https\://github\.com/ansible\-collections/community\.general/issues/6572](https\://github\.com/ansible\-collections/community\.general/issues/6572)\)\. +* nmcli \- adds OpenvSwitch support with new type values ovs\-port\, ovs\-interface\, and ovs\-bridge\, and new slave\_type value ovs\-port \([https\://github\.com/ansible\-collections/community\.general/pull/8154](https\://github\.com/ansible\-collections/community\.general/pull/8154)\)\. +* nmcli \- allow for infiniband slaves of bond interface types \([https\://github\.com/ansible\-collections/community\.general/pull/7569](https\://github\.com/ansible\-collections/community\.general/pull/7569)\)\. +* nmcli \- allow for the setting of MTU for infiniband and bond interface types \([https\://github\.com/ansible\-collections/community\.general/pull/7499](https\://github\.com/ansible\-collections/community\.general/pull/7499)\)\. +* nmcli \- allow setting MTU for bond\-slave interface types \([https\://github\.com/ansible\-collections/community\.general/pull/8118](https\://github\.com/ansible\-collections/community\.general/pull/8118)\)\. +* onepassword lookup plugin \- support 1Password Connect with the opv2 client by setting the connect\_host and connect\_token parameters \([https\://github\.com/ansible\-collections/community\.general/pull/7116](https\://github\.com/ansible\-collections/community\.general/pull/7116)\)\. +* onepassword\_raw lookup plugin \- support 1Password Connect with the opv2 client by setting the connect\_host and connect\_token parameters \([https\://github\.com/ansible\-collections/community\.general/pull/7116](https\://github\.com/ansible\-collections/community\.general/pull/7116)\) +* opentelemetry \- add support for HTTP trace\_exporter and configures the behavior via OTEL\_EXPORTER\_OTLP\_TRACES\_PROTOCOL \([https\://github\.com/ansible\-collections/community\.general/issues/7888](https\://github\.com/ansible\-collections/community\.general/issues/7888)\, [https\://github\.com/ansible\-collections/community\.general/pull/8321](https\://github\.com/ansible\-collections/community\.general/pull/8321)\)\. +* opentelemetry \- add support for exporting spans in a file via ANSIBLE\_OPENTELEMETRY\_STORE\_SPANS\_IN\_FILE \([https\://github\.com/ansible\-collections/community\.general/issues/7888](https\://github\.com/ansible\-collections/community\.general/issues/7888)\, [https\://github\.com/ansible\-collections/community\.general/pull/8363](https\://github\.com/ansible\-collections/community\.general/pull/8363)\)\. +* opkg \- use ModuleHelper with VarDict \([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\. +* osx\_defaults \- add option check\_types to enable changing the type of existing defaults on the fly \([https\://github\.com/ansible\-collections/community\.general/pull/8173](https\://github\.com/ansible\-collections/community\.general/pull/8173)\)\. +* parted \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* passwordstore \- adds timestamp and preserve parameters to modify the stored password format \([https\://github\.com/ansible\-collections/community\.general/pull/7426](https\://github\.com/ansible\-collections/community\.general/pull/7426)\)\. +* passwordstore lookup \- add missing\_subkey parameter defining the behavior of the lookup when a passwordstore subkey is missing \([https\://github\.com/ansible\-collections/community\.general/pull/8166](https\://github\.com/ansible\-collections/community\.general/pull/8166)\)\. +* pipx \- use ModuleHelper with VarDict \([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\. +* pkg5 \- add support for non\-silent execution \([https\://github\.com/ansible\-collections/community\.general/issues/8379](https\://github\.com/ansible\-collections/community\.general/issues/8379)\, [https\://github\.com/ansible\-collections/community\.general/pull/8382](https\://github\.com/ansible\-collections/community\.general/pull/8382)\)\. +* pkgin \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* portage \- adds the possibility to explicitely tell portage to write packages to world file \([https\://github\.com/ansible\-collections/community\.general/issues/6226](https\://github\.com/ansible\-collections/community\.general/issues/6226)\, [https\://github\.com/ansible\-collections/community\.general/pull/8236](https\://github\.com/ansible\-collections/community\.general/pull/8236)\)\. +* portinstall \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* proxmox \- adds startup parameters to configure startup order\, startup delay and shutdown delay \([https\://github\.com/ansible\-collections/community\.general/pull/8038](https\://github\.com/ansible\-collections/community\.general/pull/8038)\)\. +* proxmox \- adds template value to the state parameter\, allowing conversion of container to a template \([https\://github\.com/ansible\-collections/community\.general/pull/7143](https\://github\.com/ansible\-collections/community\.general/pull/7143)\)\. +* proxmox \- adds update parameter\, allowing update of an already existing containers configuration \([https\://github\.com/ansible\-collections/community\.general/pull/7540](https\://github\.com/ansible\-collections/community\.general/pull/7540)\)\. +* proxmox inventory plugin \- adds an option to exclude nodes from the dynamic inventory generation\. The new setting is optional\, not using this option will behave as usual \([https\://github\.com/ansible\-collections/community\.general/issues/6714](https\://github\.com/ansible\-collections/community\.general/issues/6714)\, [https\://github\.com/ansible\-collections/community\.general/pull/7461](https\://github\.com/ansible\-collections/community\.general/pull/7461)\)\. +* proxmox\* modules \- there is now a community\.general\.proxmox module defaults group that can be used to set default options for all Proxmox modules \([https\://github\.com/ansible\-collections/community\.general/pull/8334](https\://github\.com/ansible\-collections/community\.general/pull/8334)\)\. +* proxmox\_disk \- add ability to manipulate CD\-ROM drive \([https\://github\.com/ansible\-collections/community\.general/pull/7495](https\://github\.com/ansible\-collections/community\.general/pull/7495)\)\. +* proxmox\_kvm \- add parameter update\_unsafe to avoid limitations when updating dangerous values \([https\://github\.com/ansible\-collections/community\.general/pull/7843](https\://github\.com/ansible\-collections/community\.general/pull/7843)\)\. +* proxmox\_kvm \- adds template value to the state parameter\, allowing conversion of a VM to a template \([https\://github\.com/ansible\-collections/community\.general/pull/7143](https\://github\.com/ansible\-collections/community\.general/pull/7143)\)\. +* proxmox\_kvm \- adds\`\`usb\`\` parameter for setting USB devices on proxmox KVM VMs \([https\://github\.com/ansible\-collections/community\.general/pull/8199](https\://github\.com/ansible\-collections/community\.general/pull/8199)\)\. +* proxmox\_kvm \- support the hookscript parameter \([https\://github\.com/ansible\-collections/community\.general/issues/7600](https\://github\.com/ansible\-collections/community\.general/issues/7600)\)\. +* proxmox\_ostype \- it is now possible to specify the ostype when creating an LXC container \([https\://github\.com/ansible\-collections/community\.general/pull/7462](https\://github\.com/ansible\-collections/community\.general/pull/7462)\)\. +* proxmox\_vm\_info \- add ability to retrieve configuration info \([https\://github\.com/ansible\-collections/community\.general/pull/7485](https\://github\.com/ansible\-collections/community\.general/pull/7485)\)\. +* puppet \- new feature to set \-\-waitforlock option \([https\://github\.com/ansible\-collections/community\.general/pull/8282](https\://github\.com/ansible\-collections/community\.general/pull/8282)\)\. +* redfish\_command \- add command ResetToDefaults to reset manager to default state \([https\://github\.com/ansible\-collections/community\.general/issues/8163](https\://github\.com/ansible\-collections/community\.general/issues/8163)\)\. +* redfish\_config \- add command SetServiceIdentification to set service identification \([https\://github\.com/ansible\-collections/community\.general/issues/7916](https\://github\.com/ansible\-collections/community\.general/issues/7916)\)\. +* redfish\_info \- add boolean return value MultipartHttpPush to GetFirmwareUpdateCapabilities \([https\://github\.com/ansible\-collections/community\.general/issues/8194](https\://github\.com/ansible\-collections/community\.general/issues/8194)\, [https\://github\.com/ansible\-collections/community\.general/pull/8195](https\://github\.com/ansible\-collections/community\.general/pull/8195)\)\. +* redfish\_info \- add command GetServiceIdentification to get service identification \([https\://github\.com/ansible\-collections/community\.general/issues/7882](https\://github\.com/ansible\-collections/community\.general/issues/7882)\)\. +* redfish\_info \- adding the BootProgress property when getting Systems info \([https\://github\.com/ansible\-collections/community\.general/pull/7626](https\://github\.com/ansible\-collections/community\.general/pull/7626)\)\. +* revbitspss lookup plugin \- removed a redundant unicode prefix\. The prefix was not necessary for Python 3 and has been cleaned up to streamline the code \([https\://github\.com/ansible\-collections/community\.general/pull/8087](https\://github\.com/ansible\-collections/community\.general/pull/8087)\)\. +* rundeck module utils \- allow to pass Content\-Type to API requests \([https\://github\.com/ansible\-collections/community\.general/pull/7684](https\://github\.com/ansible\-collections/community\.general/pull/7684)\)\. +* slackpkg \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* ssh\_config \- adds controlmaster\, controlpath and controlpersist parameters \([https\://github\.com/ansible\-collections/community\.general/pull/7456](https\://github\.com/ansible\-collections/community\.general/pull/7456)\)\. +* ssh\_config \- allow accept\-new as valid value for strict\_host\_key\_checking \([https\://github\.com/ansible\-collections/community\.general/pull/8257](https\://github\.com/ansible\-collections/community\.general/pull/8257)\)\. +* ssh\_config \- new feature to set AddKeysToAgent option to yes or no \([https\://github\.com/ansible\-collections/community\.general/pull/7703](https\://github\.com/ansible\-collections/community\.general/pull/7703)\)\. +* ssh\_config \- new feature to set IdentitiesOnly option to yes or no \([https\://github\.com/ansible\-collections/community\.general/pull/7704](https\://github\.com/ansible\-collections/community\.general/pull/7704)\)\. +* sudoers \- add support for the NOEXEC tag in sudoers rules \([https\://github\.com/ansible\-collections/community\.general/pull/7983](https\://github\.com/ansible\-collections/community\.general/pull/7983)\)\. +* svr4pkg \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* swdepot \- refactor module to pass list of arguments to module\.run\_command\(\) instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\. +* terraform \- add support for diff\_mode for terraform resource\_changes \([https\://github\.com/ansible\-collections/community\.general/pull/7896](https\://github\.com/ansible\-collections/community\.general/pull/7896)\)\. +* terraform \- fix diff\_mode in state absent and when terraform resource\_changes does not exist \([https\://github\.com/ansible\-collections/community\.general/pull/7963](https\://github\.com/ansible\-collections/community\.general/pull/7963)\)\. +* xcc\_redfish\_command \- added support for raw POSTs \(command\=PostResource in category\=Raw\) without a specific action info \([https\://github\.com/ansible\-collections/community\.general/pull/7746](https\://github\.com/ansible\-collections/community\.general/pull/7746)\)\. +* xfconf \- use ModuleHelper with VarDict \([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\. +* xfconf\_info \- use ModuleHelper with VarDict \([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\. + + +### Breaking Changes / Porting Guide + +* cpanm \- the default of the mode option changed from compatibility to new \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* django\_manage \- the module now requires Django \>\= 4\.1 \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* django\_manage \- the module will now fail if virtualenv is specified but no virtual environment exists at that location \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* redfish\_command\, redfish\_config\, redfish\_info \- change the default for timeout from 10 to 60 \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. + + +### Deprecated Features + +* MH DependencyCtxMgr module\_utils \- deprecate module\_utils\.mh\.mixin\.deps\.DependencyCtxMgr in favour of module\_utils\.deps \([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\. +* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.AnsibleModule \([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\. +* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.DependencyCtxMgr \([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\. +* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.StateMixin \([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\. +* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.VarDict\, \([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\. +* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.VarMeta \([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\. +* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.VarsMixin \([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\. +* ModuleHelper module\_utils \- deprecate use of VarsMixin in favor of using the VardDict module\_utils \([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\. +* ModuleHelper vars module\_utils \- bump deprecation of VarMeta\, VarDict and VarsMixin to version 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\. +* apt\_rpm \- the behavior of state\=present and state\=installed is deprecated and will change in community\.general 11\.0\.0\. Right now the module will upgrade a package to the latest version if one of these two states is used\. You should explicitly use state\=latest if you want this behavior\, and switch to state\=present\_not\_latest if you do not want to upgrade the package if it is already installed\. In community\.general 11\.0\.0 the behavior of state\=present and state\=installed will change to that of state\=present\_not\_latest \([https\://github\.com/ansible\-collections/community\.general/issues/8217](https\://github\.com/ansible\-collections/community\.general/issues/8217)\, [https\://github\.com/ansible\-collections/community\.general/pull/8285](https\://github\.com/ansible\-collections/community\.general/pull/8285)\)\. +* consul\_acl \- the module has been deprecated and will be removed in community\.general 10\.0\.0\. consul\_token and consul\_policy can be used instead \([https\://github\.com/ansible\-collections/community\.general/pull/7901](https\://github\.com/ansible\-collections/community\.general/pull/7901)\)\. +* django\_manage \- the ack\_venv\_creation\_deprecation option has no more effect and will be removed from community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* gitlab modules \- the basic auth method on GitLab API have been deprecated and will be removed in community\.general 10\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8383](https\://github\.com/ansible\-collections/community\.general/pull/8383)\)\. +* hipchat callback plugin \- the hipchat service has been discontinued and the self\-hosted variant has been End of Life since 2020\. The callback plugin is therefore deprecated and will be removed from community\.general 10\.0\.0 if nobody provides compelling reasons to still keep it \([https\://github\.com/ansible\-collections/community\.general/issues/8184](https\://github\.com/ansible\-collections/community\.general/issues/8184)\, [https\://github\.com/ansible\-collections/community\.general/pull/8189](https\://github\.com/ansible\-collections/community\.general/pull/8189)\)\. +* irc \- the defaults false for use\_tls and validate\_certs have been deprecated and will change to true in community\.general 10\.0\.0 to improve security\. You can already improve security now by explicitly setting them to true\. Specifying values now disables the deprecation warning \([https\://github\.com/ansible\-collections/community\.general/pull/7578](https\://github\.com/ansible\-collections/community\.general/pull/7578)\)\. + + +### Removed Features \(previously deprecated\) + +* The deprecated redirects for internal module names have been removed\. These internal redirects were extra\-long FQCNs like community\.general\.packaging\.os\.apt\_rpm that redirect to the short FQCN community\.general\.apt\_rpm\. They were originally needed to implement flatmapping\; as various tooling started to recommend users to use the long names flatmapping was removed from the collection and redirects were added for users who already followed these incorrect recommendations \([https\://github\.com/ansible\-collections/community\.general/pull/7835](https\://github\.com/ansible\-collections/community\.general/pull/7835)\)\. +* ansible\_galaxy\_install \- the ack\_ansible29 and ack\_min\_ansiblecore211 options have been removed\. They no longer had any effect \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* cloudflare\_dns \- remove support for SPF records\. These are no longer supported by CloudFlare \([https\://github\.com/ansible\-collections/community\.general/pull/7782](https\://github\.com/ansible\-collections/community\.general/pull/7782)\)\. +* django\_manage \- support for the command values cleanup\, syncdb\, and validate were removed\. Use clearsessions\, migrate\, and check instead\, respectively \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* flowdock \- this module relied on HTTPS APIs that do not exist anymore and was thus removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* mh\.mixins\.deps module utils \- the DependencyMixin has been removed\. Use the deps module utils instead \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* proxmox \- the proxmox\_default\_behavior option has been removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* rax\* modules\, rax module utils\, rax docs fragment \- the Rackspace modules relied on the deprecated package pyrax and were thus removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* redhat module utils \- the classes Rhsm\, RhsmPool\, and RhsmPools have been removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* redhat\_subscription \- the alias autosubscribe of the auto\_attach option was removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* stackdriver \- this module relied on HTTPS APIs that do not exist anymore and was thus removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. +* webfaction\_\* modules \- these modules relied on HTTPS APIs that do not exist anymore and were thus removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\. + + +### Security Fixes + +* cobbler\, gitlab\_runners\, icinga2\, linode\, lxd\, nmap\, online\, opennebula\, proxmox\, scaleway\, stackpath\_compute\, virtualbox\, and xen\_orchestra inventory plugin \- make sure all data received from the remote servers is marked as unsafe\, so remote code execution by obtaining texts that can be evaluated as templates is not possible \([https\://www\.die\-welt\.net/2024/03/remote\-code\-execution\-in\-ansible\-dynamic\-inventory\-plugins/](https\://www\.die\-welt\.net/2024/03/remote\-code\-execution\-in\-ansible\-dynamic\-inventory\-plugins/)\, [https\://github\.com/ansible\-collections/community\.general/pull/8098](https\://github\.com/ansible\-collections/community\.general/pull/8098)\)\. +* keycloak\_identity\_provider \- the client secret was not correctly sanitized by the module\. The return values proposed\, existing\, and end\_state\, as well as the diff\, did contain the client secret unmasked \([https\://github\.com/ansible\-collections/community\.general/pull/8355](https\://github\.com/ansible\-collections/community\.general/pull/8355)\)\. + + +### Bugfixes + +* aix\_filesystem \- fix \_validate\_vg not passing VG name to lsvg\_cmd \([https\://github\.com/ansible\-collections/community\.general/issues/8151](https\://github\.com/ansible\-collections/community\.general/issues/8151)\)\. +* aix\_filesystem \- fix issue with empty list items in crfs logic and option order \([https\://github\.com/ansible\-collections/community\.general/pull/8052](https\://github\.com/ansible\-collections/community\.general/pull/8052)\)\. +* apt\-rpm \- the module did not upgrade packages if a newer version exists\. Now the package will be reinstalled if the candidate is newer than the installed version \([https\://github\.com/ansible\-collections/community\.general/issues/7414](https\://github\.com/ansible\-collections/community\.general/issues/7414)\)\. +* apt\_rpm \- when checking whether packages were installed after running apt\-get \-y install \\, only the last package name was checked \([https\://github\.com/ansible\-collections/community\.general/pull/8263](https\://github\.com/ansible\-collections/community\.general/pull/8263)\)\. +* bitwarden\_secrets\_manager lookup plugin \- implements retry with exponential backoff to avoid lookup errors when Bitwardn\'s API rate limiting is encountered \([https\://github\.com/ansible\-collections/community\.general/issues/8230](https\://github\.com/ansible\-collections/community\.general/issues/8230)\, [https\://github\.com/ansible\-collections/community\.general/pull/8238](https\://github\.com/ansible\-collections/community\.general/pull/8238)\)\. +* cargo \- fix idempotency issues when using a custom installation path for packages \(using the \-\-path parameter\)\. The initial installation runs fine\, but subsequent runs use the get\_installed\(\) function which did not check the given installation location\, before running cargo install\. This resulted in a false changed state\. Also the removal of packeges using state\: absent failed\, as the installation check did not use the given parameter \([https\://github\.com/ansible\-collections/community\.general/pull/7970](https\://github\.com/ansible\-collections/community\.general/pull/7970)\)\. +* cloudflare\_dns \- fix Cloudflare lookup of SHFP records \([https\://github\.com/ansible\-collections/community\.general/issues/7652](https\://github\.com/ansible\-collections/community\.general/issues/7652)\)\. +* consul\_token \- fix token creation without accessor\_id \([https\://github\.com/ansible\-collections/community\.general/pull/8091](https\://github\.com/ansible\-collections/community\.general/pull/8091)\)\. +* from\_ini filter plugin \- disabling interpolation of ConfigParser to allow converting values with a \% sign \([https\://github\.com/ansible\-collections/community\.general/issues/8183](https\://github\.com/ansible\-collections/community\.general/issues/8183)\, [https\://github\.com/ansible\-collections/community\.general/pull/8185](https\://github\.com/ansible\-collections/community\.general/pull/8185)\)\. +* gitlab\_group\_members \- fix gitlab constants call in gitlab\_group\_members module \([https\://github\.com/ansible\-collections/community\.general/issues/7467](https\://github\.com/ansible\-collections/community\.general/issues/7467)\)\. +* gitlab\_issue \- fix behavior to search GitLab issue\, using search keyword instead of title \([https\://github\.com/ansible\-collections/community\.general/issues/7846](https\://github\.com/ansible\-collections/community\.general/issues/7846)\)\. +* gitlab\_issue\, gitlab\_label\, gitlab\_milestone \- avoid crash during version comparison when the python\-gitlab Python module is not installed \([https\://github\.com/ansible\-collections/community\.general/pull/8158](https\://github\.com/ansible\-collections/community\.general/pull/8158)\)\. +* gitlab\_project\_members \- fix gitlab constants call in gitlab\_project\_members module \([https\://github\.com/ansible\-collections/community\.general/issues/7467](https\://github\.com/ansible\-collections/community\.general/issues/7467)\)\. +* gitlab\_protected\_branches \- fix gitlab constants call in gitlab\_protected\_branches module \([https\://github\.com/ansible\-collections/community\.general/issues/7467](https\://github\.com/ansible\-collections/community\.general/issues/7467)\)\. +* gitlab\_runner \- fix pagination when checking for existing runners \([https\://github\.com/ansible\-collections/community\.general/pull/7790](https\://github\.com/ansible\-collections/community\.general/pull/7790)\)\. +* gitlab\_user \- fix gitlab constants call in gitlab\_user module \([https\://github\.com/ansible\-collections/community\.general/issues/7467](https\://github\.com/ansible\-collections/community\.general/issues/7467)\)\. +* haproxy \- fix an issue where HAProxy could get stuck in DRAIN mode when the backend was unreachable \([https\://github\.com/ansible\-collections/community\.general/issues/8092](https\://github\.com/ansible\-collections/community\.general/issues/8092)\)\. +* homebrew \- detect already installed formulae and casks using JSON output from brew info \([https\://github\.com/ansible\-collections/community\.general/issues/864](https\://github\.com/ansible\-collections/community\.general/issues/864)\)\. +* homebrew \- error returned from brew command was ignored and tried to parse empty JSON\. Fix now checks for an error and raises it to give accurate error message to users \([https\://github\.com/ansible\-collections/community\.general/issues/8047](https\://github\.com/ansible\-collections/community\.general/issues/8047)\)\. +* incus connection plugin \- treats inventory\_hostname as a variable instead of a literal in remote connections \([https\://github\.com/ansible\-collections/community\.general/issues/7874](https\://github\.com/ansible\-collections/community\.general/issues/7874)\)\. +* interface\_files \- also consider address\_family when changing option\=method \([https\://github\.com/ansible\-collections/community\.general/issues/7610](https\://github\.com/ansible\-collections/community\.general/issues/7610)\, [https\://github\.com/ansible\-collections/community\.general/pull/7612](https\://github\.com/ansible\-collections/community\.general/pull/7612)\)\. +* inventory plugins \- add unsafe wrapper to avoid marking strings that do not contain \{ or \} as unsafe\, to work around a bug in AWX \(\([https\://github\.com/ansible\-collections/community\.general/issues/8212](https\://github\.com/ansible\-collections/community\.general/issues/8212)\, [https\://github\.com/ansible\-collections/community\.general/pull/8225](https\://github\.com/ansible\-collections/community\.general/pull/8225)\)\. +* ipa \- fix get version regex in IPA module\_utils \([https\://github\.com/ansible\-collections/community\.general/pull/8175](https\://github\.com/ansible\-collections/community\.general/pull/8175)\)\. +* ipa\_hbacrule \- the module uses a string for ipaenabledflag for new FreeIPA versions while the returned value is a boolean \([https\://github\.com/ansible\-collections/community\.general/pull/7880](https\://github\.com/ansible\-collections/community\.general/pull/7880)\)\. +* ipa\_otptoken \- the module expect ipatokendisabled as string but the ipatokendisabled value is returned as a boolean \([https\://github\.com/ansible\-collections/community\.general/pull/7795](https\://github\.com/ansible\-collections/community\.general/pull/7795)\)\. +* ipa\_sudorule \- the module uses a string for ipaenabledflag for new FreeIPA versions while the returned value is a boolean \([https\://github\.com/ansible\-collections/community\.general/pull/7880](https\://github\.com/ansible\-collections/community\.general/pull/7880)\)\. +* iptables\_state \- fix idempotency issues when restoring incomplete iptables dumps \([https\://github\.com/ansible\-collections/community\.general/issues/8029](https\://github\.com/ansible\-collections/community\.general/issues/8029)\)\. +* irc \- replace ssl\.wrap\_socket that was removed from Python 3\.12 with code for creating a proper SSL context \([https\://github\.com/ansible\-collections/community\.general/pull/7542](https\://github\.com/ansible\-collections/community\.general/pull/7542)\)\. +* keycloak\_\* \- fix Keycloak API client to quote / properly \([https\://github\.com/ansible\-collections/community\.general/pull/7641](https\://github\.com/ansible\-collections/community\.general/pull/7641)\)\. +* keycloak\_authz\_permission \- resource payload variable for scope\-based permission was constructed as a string\, when it needs to be a list\, even for a single item \([https\://github\.com/ansible\-collections/community\.general/issues/7151](https\://github\.com/ansible\-collections/community\.general/issues/7151)\)\. +* keycloak\_client \- add sorted defaultClientScopes and optionalClientScopes to normalizations \([https\://github\.com/ansible\-collections/community\.general/pull/8223](https\://github\.com/ansible\-collections/community\.general/pull/8223)\)\. +* keycloak\_client \- fixes issue when metadata is provided in desired state when task is in check mode \([https\://github\.com/ansible\-collections/community\.general/issues/1226](https\://github\.com/ansible\-collections/community\.general/issues/1226)\, [https\://github\.com/ansible\-collections/community\.general/pull/7881](https\://github\.com/ansible\-collections/community\.general/pull/7881)\)\. +* keycloak\_identity\_provider \- mappers processing was not idempotent if the mappers configuration list had not been sorted by name \(in ascending order\)\. Fix resolves the issue by sorting mappers in the desired state using the same key which is used for obtaining existing state \([https\://github\.com/ansible\-collections/community\.general/pull/7418](https\://github\.com/ansible\-collections/community\.general/pull/7418)\)\. +* keycloak\_identity\_provider \- it was not possible to reconfigure \(add\, remove\) mappers once they were created initially\. Removal was ignored\, adding new ones resulted in dropping the pre\-existing unmodified mappers\. Fix resolves the issue by supplying correct input to the internal update call \([https\://github\.com/ansible\-collections/community\.general/pull/7418](https\://github\.com/ansible\-collections/community\.general/pull/7418)\)\. +* keycloak\_realm \- add normalizations for enabledEventTypes and supportedLocales \([https\://github\.com/ansible\-collections/community\.general/pull/8224](https\://github\.com/ansible\-collections/community\.general/pull/8224)\)\. +* keycloak\_user \- when force is set\, but user does not exist\, do not try to delete it \([https\://github\.com/ansible\-collections/community\.general/pull/7696](https\://github\.com/ansible\-collections/community\.general/pull/7696)\)\. +* keycloak\_user\_federation \- fix diff of empty krbPrincipalAttribute \([https\://github\.com/ansible\-collections/community\.general/pull/8320](https\://github\.com/ansible\-collections/community\.general/pull/8320)\)\. +* ldap \- previously the order number \(if present\) was expected to follow an equals sign in the DN\. This makes it so the order number string is identified correctly anywhere within the DN \([https\://github\.com/ansible\-collections/community\.general/issues/7646](https\://github\.com/ansible\-collections/community\.general/issues/7646)\)\. +* linode inventory plugin \- add descriptive error message for linode inventory plugin \([https\://github\.com/ansible\-collections/community\.general/pull/8133](https\://github\.com/ansible\-collections/community\.general/pull/8133)\)\. +* log\_entries callback plugin \- replace ssl\.wrap\_socket that was removed from Python 3\.12 with code for creating a proper SSL context \([https\://github\.com/ansible\-collections/community\.general/pull/7542](https\://github\.com/ansible\-collections/community\.general/pull/7542)\)\. +* lvol \- test for output messages in both stdout and stderr \([https\://github\.com/ansible\-collections/community\.general/pull/7601](https\://github\.com/ansible\-collections/community\.general/pull/7601)\, [https\://github\.com/ansible\-collections/community\.general/issues/7182](https\://github\.com/ansible\-collections/community\.general/issues/7182)\)\. +* merge\_variables lookup plugin \- fixing cross host merge\: providing access to foreign hosts variables to the perspective of the host that is performing the merge \([https\://github\.com/ansible\-collections/community\.general/pull/8303](https\://github\.com/ansible\-collections/community\.general/pull/8303)\)\. +* modprobe \- listing modules files or modprobe files could trigger a FileNotFoundError if /etc/modprobe\.d or /etc/modules\-load\.d did not exist\. Relevant functions now return empty lists if the directories do not exist to avoid crashing the module \([https\://github\.com/ansible\-collections/community\.general/issues/7717](https\://github\.com/ansible\-collections/community\.general/issues/7717)\)\. +* mssql\_script \- make the module work with Python 2 \([https\://github\.com/ansible\-collections/community\.general/issues/7818](https\://github\.com/ansible\-collections/community\.general/issues/7818)\, [https\://github\.com/ansible\-collections/community\.general/pull/7821](https\://github\.com/ansible\-collections/community\.general/pull/7821)\)\. +* nmcli \- fix connection\.slave\-type wired to bond and not with parameter slave\_type in case of connection type wifi \([https\://github\.com/ansible\-collections/community\.general/issues/7389](https\://github\.com/ansible\-collections/community\.general/issues/7389)\)\. +* ocapi\_utils\, oci\_utils\, redfish\_utils module utils \- replace type\(\) calls with isinstance\(\) calls \([https\://github\.com/ansible\-collections/community\.general/pull/7501](https\://github\.com/ansible\-collections/community\.general/pull/7501)\)\. +* onepassword lookup plugin \- failed for fields that were in sections and had uppercase letters in the label/ID\. Field lookups are now case insensitive in all cases \([https\://github\.com/ansible\-collections/community\.general/pull/7919](https\://github\.com/ansible\-collections/community\.general/pull/7919)\)\. +* onepassword lookup plugin \- field and section titles are now case insensitive when using op CLI version two or later\. This matches the behavior of version one \([https\://github\.com/ansible\-collections/community\.general/pull/7564](https\://github\.com/ansible\-collections/community\.general/pull/7564)\)\. +* opentelemetry callback plugin \- close spans always \([https\://github\.com/ansible\-collections/community\.general/pull/8367](https\://github\.com/ansible\-collections/community\.general/pull/8367)\)\. +* opentelemetry callback plugin \- honour the disable\_logs option to avoid storing task results since they are not used regardless \([https\://github\.com/ansible\-collections/community\.general/pull/8373](https\://github\.com/ansible\-collections/community\.general/pull/8373)\)\. +* pacemaker\_cluster \- actually implement check mode\, which the module claims to support\. This means that until now the module also did changes in check mode \([https\://github\.com/ansible\-collections/community\.general/pull/8081](https\://github\.com/ansible\-collections/community\.general/pull/8081)\)\. +* pam\_limits \- when the file does not exist\, do not create it in check mode \([https\://github\.com/ansible\-collections/community\.general/issues/8050](https\://github\.com/ansible\-collections/community\.general/issues/8050)\, [https\://github\.com/ansible\-collections/community\.general/pull/8057](https\://github\.com/ansible\-collections/community\.general/pull/8057)\)\. +* pipx module utils \- change the CLI argument formatter for the pip\_args parameter \([https\://github\.com/ansible\-collections/community\.general/issues/7497](https\://github\.com/ansible\-collections/community\.general/issues/7497)\, [https\://github\.com/ansible\-collections/community\.general/pull/7506](https\://github\.com/ansible\-collections/community\.general/pull/7506)\)\. +* pkgin \- pkgin \(pkgsrc package manager used by SmartOS\) raises erratic exceptions and spurious changed\=true \([https\://github\.com/ansible\-collections/community\.general/pull/7971](https\://github\.com/ansible\-collections/community\.general/pull/7971)\)\. +* proxmox \- fix updating a container config if the setting does not already exist \([https\://github\.com/ansible\-collections/community\.general/pull/7872](https\://github\.com/ansible\-collections/community\.general/pull/7872)\)\. +* proxmox\_kvm \- fixed status check getting from node\-specific API endpoint \([https\://github\.com/ansible\-collections/community\.general/issues/7817](https\://github\.com/ansible\-collections/community\.general/issues/7817)\)\. +* proxmox\_kvm \- running state\=template will first check whether VM is already a template \([https\://github\.com/ansible\-collections/community\.general/pull/7792](https\://github\.com/ansible\-collections/community\.general/pull/7792)\)\. +* proxmox\_pool\_member \- absent state for type VM did not delete VMs from the pools \([https\://github\.com/ansible\-collections/community\.general/pull/7464](https\://github\.com/ansible\-collections/community\.general/pull/7464)\)\. +* puppet \- add option environment\_lang to set the environment language encoding\. Defaults to lang C\. It is recommended to set it to C\.UTF\-8 or en\_US\.UTF\-8 depending on what is available on your system\. \([https\://github\.com/ansible\-collections/community\.general/issues/8000](https\://github\.com/ansible\-collections/community\.general/issues/8000)\) +* redfish\_command \- fix usage of message parsing in SimpleUpdate and MultipartHTTPPushUpdate commands to treat the lack of a MessageId as no message \([https\://github\.com/ansible\-collections/community\.general/issues/7465](https\://github\.com/ansible\-collections/community\.general/issues/7465)\, [https\://github\.com/ansible\-collections/community\.general/pull/7471](https\://github\.com/ansible\-collections/community\.general/pull/7471)\)\. +* redfish\_info \- allow for a GET operation invoked by GetUpdateStatus to allow for an empty response body for cases where a service returns 204 No Content \([https\://github\.com/ansible\-collections/community\.general/issues/8003](https\://github\.com/ansible\-collections/community\.general/issues/8003)\)\. +* redfish\_info \- correct uncaught exception when attempting to retrieve Chassis information \([https\://github\.com/ansible\-collections/community\.general/pull/7952](https\://github\.com/ansible\-collections/community\.general/pull/7952)\)\. +* redhat\_subscription \- use the D\-Bus registration on RHEL 7 only on 7\.4 and + greater\; older versions of RHEL 7 do not have it + \([https\://github\.com/ansible\-collections/community\.general/issues/7622](https\://github\.com/ansible\-collections/community\.general/issues/7622)\, + [https\://github\.com/ansible\-collections/community\.general/pull/7624](https\://github\.com/ansible\-collections/community\.general/pull/7624)\)\. +* riak \- support riak admin sub\-command in newer Riak KV versions beside the legacy riak\-admin main command \([https\://github\.com/ansible\-collections/community\.general/pull/8211](https\://github\.com/ansible\-collections/community\.general/pull/8211)\)\. +* statusio\_maintenance \- fix error caused by incorrectly formed API data payload\. Was raising \"Failed to create maintenance HTTP Error 400 Bad Request\" caused by bad data type for date/time and deprecated dict keys \([https\://github\.com/ansible\-collections/community\.general/pull/7754](https\://github\.com/ansible\-collections/community\.general/pull/7754)\)\. +* terraform \- fix multiline string handling in complex variables \([https\://github\.com/ansible\-collections/community\.general/pull/7535](https\://github\.com/ansible\-collections/community\.general/pull/7535)\)\. +* to\_ini filter plugin \- disabling interpolation of ConfigParser to allow converting values with a \% sign \([https\://github\.com/ansible\-collections/community\.general/issues/8183](https\://github\.com/ansible\-collections/community\.general/issues/8183)\, [https\://github\.com/ansible\-collections/community\.general/pull/8185](https\://github\.com/ansible\-collections/community\.general/pull/8185)\)\. +* xml \- make module work with lxml 5\.1\.1\, which removed some internals that the module was relying on \([https\://github\.com/ansible\-collections/community\.general/pull/8169](https\://github\.com/ansible\-collections/community\.general/pull/8169)\)\. + + +### New Plugins + + +#### Become + +* community\.general\.run0 \- Systemd\'s run0\. + + +#### Callback + +* community\.general\.default\_without\_diff \- The default ansible callback without diff output\. +* community\.general\.timestamp \- Adds simple timestamp for each header\. + + +#### Connection + +* community\.general\.incus \- Run tasks in Incus instances via the Incus CLI\. + + +#### Filter + +* community\.general\.from\_ini \- Converts INI text input into a dictionary\. +* community\.general\.lists\_difference \- Difference of lists with a predictive order\. +* community\.general\.lists\_intersect \- Intersection of lists with a predictive order\. +* community\.general\.lists\_symmetric\_difference \- Symmetric Difference of lists with a predictive order\. +* community\.general\.lists\_union \- Union of lists with a predictive order\. +* community\.general\.to\_ini \- Converts a dictionary to the INI file format\. + + +#### Lookup + +* community\.general\.github\_app\_access\_token \- Obtain short\-lived Github App Access tokens\. +* community\.general\.onepassword\_doc \- Fetch documents stored in 1Password\. + + +#### Test + +* community\.general\.fqdn\_valid \- Validates fully\-qualified domain names against RFC 1123\. + + +### New Modules + +* community\.general\.consul\_acl\_bootstrap \- Bootstrap ACLs in Consul\. +* community\.general\.consul\_auth\_method \- Manipulate Consul auth methods\. +* community\.general\.consul\_binding\_rule \- Manipulate Consul binding rules\. +* community\.general\.consul\_token \- Manipulate Consul tokens\. +* community\.general\.django\_command \- Run Django admin commands\. +* community\.general\.dnf\_config\_manager \- Enable or disable dnf repositories using config\-manager\. +* community\.general\.git\_config\_info \- Read git configuration\. +* community\.general\.gitlab\_group\_access\_token \- Manages GitLab group access tokens\. +* community\.general\.gitlab\_issue \- Create\, update\, or delete GitLab issues\. +* community\.general\.gitlab\_label \- Creates/updates/deletes GitLab Labels belonging to project or group\. +* community\.general\.gitlab\_milestone \- Creates/updates/deletes GitLab Milestones belonging to project or group\. +* community\.general\.gitlab\_project\_access\_token \- Manages GitLab project access tokens\. +* community\.general\.keycloak\_client\_rolescope \- Allows administration of Keycloak client roles scope to restrict the usage of certain roles to a other specific client applications\. +* community\.general\.keycloak\_component\_info \- Retrive component info in Keycloak\. +* community\.general\.keycloak\_realm\_rolemapping \- Allows administration of Keycloak realm role mappings into groups with the Keycloak API\. +* community\.general\.nomad\_token \- Manage Nomad ACL tokens\. +* community\.general\.proxmox\_node\_info \- Retrieve information about one or more Proxmox VE nodes\. +* community\.general\.proxmox\_storage\_contents\_info \- List content from a Proxmox VE storage\. +* community\.general\.usb\_facts \- Allows listing information about USB devices\. diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 119e04e170..f200798d93 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,823 @@ -Placeholder changelog -===================== +=============================== +Community General Release Notes +=============================== -This file is a placeholder; a version-specific ``CHANGELOG-vX.rst`` will be generated during releases from fragments -under ``changelogs/fragments``. On release branches once a release has been created, consult the branch's version-specific -file for changes that have occurred in that branch. +.. contents:: Topics + +This changelog describes changes after version 8.0.0. + +v9.5.4 +====== + +Security Fixes +-------------- + +- keycloak_client - Sanitize ``saml.encryption.private.key`` so it does not show in the logs (https://github.com/ansible-collections/community.general/pull/9621). + +Bugfixes +-------- + +- redhat_subscription - do not try to unsubscribe (i.e. remove subscriptions) + when unregistering a system: newer versions of subscription-manager, as + available in EL 10 and Fedora 41+, do not support entitlements anymore, and + thus unsubscribing will fail + (https://github.com/ansible-collections/community.general/pull/9578). + +v9.5.3 +====== + +Release Summary +--------------- + +Regular bugfix release. + +Minor Changes +------------- + +- proxmox module utils - add method ``api_task_complete`` that can wait for task completion and return error message (https://github.com/ansible-collections/community.general/pull/9256). + +Security Fixes +-------------- + +- keycloak_authentication - API calls did not properly set the ``priority`` during update resulting in incorrectly sorted authentication flows. This apparently only affects Keycloak 25 or newer (https://github.com/ansible-collections/community.general/pull/9263). + +Bugfixes +-------- + +- dig lookup plugin - correctly handle ``NoNameserver`` exception (https://github.com/ansible-collections/community.general/pull/9363, https://github.com/ansible-collections/community.general/issues/9362). +- htpasswd - report changes when file permissions are adjusted (https://github.com/ansible-collections/community.general/issues/9485, https://github.com/ansible-collections/community.general/pull/9490). +- proxmox_disk - fix async method and make ``resize_disk`` method handle errors correctly (https://github.com/ansible-collections/community.general/pull/9256). +- proxmox_template - fix the wrong path called on ``proxmox_template.task_status`` (https://github.com/ansible-collections/community.general/issues/9276, https://github.com/ansible-collections/community.general/pull/9277). +- qubes connection plugin - fix the printing of debug information (https://github.com/ansible-collections/community.general/pull/9334). +- redfish_utils module utils - Fix ``VerifyBiosAttributes`` command on multi system resource nodes (https://github.com/ansible-collections/community.general/pull/9234). + +v9.5.2 +====== + +Release Summary +--------------- + +Regular bugfix release. + +Minor Changes +------------- + +- proxmox inventory plugin - fix urllib3 ``InsecureRequestWarnings`` not being suppressed when a token is used (https://github.com/ansible-collections/community.general/pull/9099). + +Bugfixes +-------- + +- dnf_config_manager - fix hanging when prompting to import GPG keys (https://github.com/ansible-collections/community.general/pull/9124, https://github.com/ansible-collections/community.general/issues/8830). +- dnf_config_manager - forces locale to ``C`` before module starts. If the locale was set to non-English, the output of the ``dnf config-manager`` could not be parsed (https://github.com/ansible-collections/community.general/pull/9157, https://github.com/ansible-collections/community.general/issues/9046). +- flatpak - force the locale language to ``C`` when running the flatpak command (https://github.com/ansible-collections/community.general/pull/9187, https://github.com/ansible-collections/community.general/issues/8883). +- github_key - in check mode, a faulty call to ```datetime.strftime(...)``` was being made which generated an exception (https://github.com/ansible-collections/community.general/issues/9185). +- homebrew_cask - allow ``+`` symbol in Homebrew cask name validation regex (https://github.com/ansible-collections/community.general/pull/9128). +- keycloak_client - fix diff by removing code that turns the attributes dict which contains additional settings into a list (https://github.com/ansible-collections/community.general/pull/9077). +- keycloak_clientscope - fix diff and ``end_state`` by removing the code that turns the attributes dict, which contains additional config items, into a list (https://github.com/ansible-collections/community.general/pull/9082). +- keycloak_clientscope_type - sort the default and optional clientscope lists to improve the diff (https://github.com/ansible-collections/community.general/pull/9202). +- redfish_utils module utils - remove undocumented default applytime (https://github.com/ansible-collections/community.general/pull/9114). +- slack - fail if Slack API response is not OK with error message (https://github.com/ansible-collections/community.general/pull/9198). + +v9.5.1 +====== + +Release Summary +--------------- + +Regular bugfix release. + +Minor Changes +------------- + +- redfish_utils module utils - schedule a BIOS configuration job at next reboot when the BIOS config is changed (https://github.com/ansible-collections/community.general/pull/9012). + +Bugfixes +-------- + +- bitwarden lookup plugin - support BWS v0.3.0 syntax breaking change (https://github.com/ansible-collections/community.general/pull/9028). +- collection_version lookup plugin - use ``importlib`` directly instead of the deprecated and in ansible-core 2.19 removed ``ansible.module_utils.compat.importlib`` (https://github.com/ansible-collections/community.general/pull/9084). +- gitlab_label - update label's color (https://github.com/ansible-collections/community.general/pull/9010). +- keycloak_clientscope_type - fix detect changes in check mode (https://github.com/ansible-collections/community.general/issues/9092, https://github.com/ansible-collections/community.general/pull/9093). +- keycloak_group - fix crash caused in subgroup creation. The crash was caused by a missing or empty ``subGroups`` property in Keycloak ≥23 (https://github.com/ansible-collections/community.general/issues/8788, https://github.com/ansible-collections/community.general/pull/8979). +- modprobe - fix check mode not being honored for ``persistent`` option (https://github.com/ansible-collections/community.general/issues/9051, https://github.com/ansible-collections/community.general/pull/9052). +- one_host - fix if statements for cases when ``ID=0`` (https://github.com/ansible-collections/community.general/issues/1199, https://github.com/ansible-collections/community.general/pull/8907). +- one_image - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). +- one_image_info - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). +- one_vnet - fix module failing due to a variable typo (https://github.com/ansible-collections/community.general/pull/9019). +- redfish_utils module utils - fix issue with URI parsing to gracefully handling trailing slashes when extracting member identifiers (https://github.com/ansible-collections/community.general/issues/9047, https://github.com/ansible-collections/community.general/pull/9057). + +v9.5.0 +====== + +Release Summary +--------------- + +Regular bugfix and feature release. + +Please note that this is the last feature release for community.general 9.x.y. +From now on, new features will only go into community.general 10.x.y. + +Minor Changes +------------- + +- dig lookup plugin - add ``port`` option to specify DNS server port (https://github.com/ansible-collections/community.general/pull/8966). +- flatpak - improve the parsing of Flatpak application IDs based on official guidelines (https://github.com/ansible-collections/community.general/pull/8909). +- gio_mime - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8855). +- gitlab_deploy_key - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). +- gitlab_group - add many new parameters (https://github.com/ansible-collections/community.general/pull/8908). +- gitlab_group - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). +- gitlab_issue - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). +- gitlab_merge_request - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). +- gitlab_runner - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). +- icinga2_host - replace loop with dict comprehension (https://github.com/ansible-collections/community.general/pull/8876). +- jira - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8856). +- keycloak_client - add ``client-x509`` choice to ``client_authenticator_type`` (https://github.com/ansible-collections/community.general/pull/8973). +- keycloak_user_federation - add the user federation config parameter ``referral`` to the module arguments (https://github.com/ansible-collections/community.general/pull/8954). +- memset_dns_reload - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). +- memset_memstore_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). +- memset_server_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). +- memset_zone - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). +- memset_zone_domain - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). +- memset_zone_record - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). +- nmcli - add ``conn_enable`` param to reload connection (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/pull/8897). +- nmcli - add ``state=up`` and ``state=down`` to enable/disable connections (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/issues/7152, https://github.com/ansible-collections/community.general/pull/8897). +- nmcli - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). +- npm - add ``force`` parameter to allow ``--force`` (https://github.com/ansible-collections/community.general/pull/8885). +- one_image - add option ``persistent`` to manage image persistence (https://github.com/ansible-collections/community.general/issues/3578, https://github.com/ansible-collections/community.general/pull/8889). +- one_image - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889). +- one_image - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). +- one_image_info - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889). +- one_image_info - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). +- open_iscsi - allow login to a portal with multiple targets without specifying any of them (https://github.com/ansible-collections/community.general/pull/8719). +- opennebula.py - add VM ``id`` and VM ``host`` to inventory host data (https://github.com/ansible-collections/community.general/pull/8532). +- passwordstore lookup plugin - add subkey creation/update support (https://github.com/ansible-collections/community.general/pull/8952). +- proxmox inventory plugin - clean up authentication code (https://github.com/ansible-collections/community.general/pull/8917). +- redfish_command - add handling of the ``PasswordChangeRequired`` message from services in the ``UpdateUserPassword`` command to directly modify the user's password if the requested user is the one invoking the operation (https://github.com/ansible-collections/community.general/issues/8652, https://github.com/ansible-collections/community.general/pull/8653). +- redfish_confg - remove ``CapacityBytes`` from required paramaters of the ``CreateVolume`` command (https://github.com/ansible-collections/community.general/pull/8956). +- redfish_config - add parameter ``storage_none_volume_deletion`` to ``CreateVolume`` command in order to control the automatic deletion of non-RAID volumes (https://github.com/ansible-collections/community.general/pull/8990). +- redfish_info - adds ``RedfishURI`` and ``StorageId`` to Disk inventory (https://github.com/ansible-collections/community.general/pull/8937). +- scaleway_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). +- scaleway_container_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). +- scaleway_container_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). +- scaleway_container_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). +- scaleway_container_registry - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). +- scaleway_container_registry_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). +- scaleway_function - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). +- scaleway_function_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). +- scaleway_function_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). +- scaleway_function_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). +- scaleway_user_data - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). +- udm_dns_record - replace loop with ``dict.update()`` (https://github.com/ansible-collections/community.general/pull/8876). + +Deprecated Features +------------------- + +- hipchat - the hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. The module is therefore deprecated and will be removed from community.general 11.0.0 if nobody provides compelling reasons to still keep it (https://github.com/ansible-collections/community.general/pull/8919). + +Bugfixes +-------- + +- cloudflare_dns - fix changing Cloudflare SRV records (https://github.com/ansible-collections/community.general/issues/8679, https://github.com/ansible-collections/community.general/pull/8948). +- cmd_runner module utils - call to ``get_best_parsable_locales()`` was missing parameter (https://github.com/ansible-collections/community.general/pull/8929). +- dig lookup plugin - fix using only the last nameserver specified (https://github.com/ansible-collections/community.general/pull/8970). +- django_command - option ``command`` is now split lexically before passed to underlying PythonRunner (https://github.com/ansible-collections/community.general/pull/8944). +- homectl - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4691, https://github.com/ansible-collections/community.general/pull/8987). +- ini_file - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925). +- ipa_host - add ``force_create``, fix ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1094, https://github.com/ansible-collections/community.general/pull/8920). +- ipa_hostgroup - fix ``enabled `` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/8408, https://github.com/ansible-collections/community.general/pull/8900). +- java_keystore - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925). +- jenkins_plugin - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925). +- kdeconfig - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925). +- keycloak_realm - fix change detection in check mode by sorting the lists in the realms beforehand (https://github.com/ansible-collections/community.general/pull/8877). +- keycloak_user_federation - add module argument allowing users to configure the update mode for the parameter ``bindCredential`` (https://github.com/ansible-collections/community.general/pull/8898). +- keycloak_user_federation - minimize change detection by setting ``krbPrincipalAttribute`` to ``''`` in Keycloak responses if missing (https://github.com/ansible-collections/community.general/pull/8785). +- keycloak_user_federation - remove ``lastSync`` parameter from Keycloak responses to minimize diff/changes (https://github.com/ansible-collections/community.general/pull/8812). +- keycloak_userprofile - fix empty response when fetching userprofile component by removing ``parent=parent_id`` filter (https://github.com/ansible-collections/community.general/pull/8923). +- keycloak_userprofile - improve diff by deserializing the fetched ``kc.user.profile.config`` and serialize it only when sending back (https://github.com/ansible-collections/community.general/pull/8940). +- lxd_container - fix bug introduced in previous commit (https://github.com/ansible-collections/community.general/pull/8895, https://github.com/ansible-collections/community.general/issues/8888). +- one_service - fix service creation after it was deleted with ``unique`` parameter (https://github.com/ansible-collections/community.general/issues/3137, https://github.com/ansible-collections/community.general/pull/8887). +- pam_limits - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925). +- python_runner module utils - parameter ``path_prefix`` was being handled as string when it should be a list (https://github.com/ansible-collections/community.general/pull/8944). +- udm_user - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4690, https://github.com/ansible-collections/community.general/pull/8987). + +New Modules +----------- + +- community.general.ipa_getkeytab - Manage keytab file in FreeIPA. + +v9.4.0 +====== + +Release Summary +--------------- + +Bugfix and feature release. + +Minor Changes +------------- + +- MH module utils - add parameter ``when`` to ``cause_changes`` decorator (https://github.com/ansible-collections/community.general/pull/8766). +- MH module utils - minor refactor in decorators (https://github.com/ansible-collections/community.general/pull/8766). +- alternatives - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- consul_acl - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- copr - Added ``includepkgs`` and ``excludepkgs`` parameters to limit the list of packages fetched or excluded from the repository(https://github.com/ansible-collections/community.general/pull/8779). +- credstash lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- csv module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- deco MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- etcd3 - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- gio_mime - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). +- gitlab_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- gitlab_project - add option ``issues_access_level`` to enable/disable project issues (https://github.com/ansible-collections/community.general/pull/8760). +- gitlab_project - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- gitlab_project - sorted parameters in order to avoid future merge conflicts (https://github.com/ansible-collections/community.general/pull/8759). +- hashids filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- hwc_ecs_instance - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- hwc_evs_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- hwc_vpc_eip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- hwc_vpc_peering_connect - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- hwc_vpc_port - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- hwc_vpc_subnet - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- imc_rest - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- ipa_otptoken - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- jira - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). +- jira - replace deprecated params when using decorator ``cause_changes`` (https://github.com/ansible-collections/community.general/pull/8791). +- keep_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- keycloak module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- keycloak_client - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- keycloak_clientscope - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- keycloak_identity_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- keycloak_user_federation - add module argument allowing users to optout of the removal of unspecified mappers, for example to keep the keycloak default mappers (https://github.com/ansible-collections/community.general/pull/8764). +- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- linode - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- lxc_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- lxd_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- manageiq_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- ocapi_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- one_service - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- one_vm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- onepassword lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- pids - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- pipx - added new states ``install_all``, ``uninject``, ``upgrade_shared``, ``pin``, and ``unpin`` (https://github.com/ansible-collections/community.general/pull/8809). +- pipx - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). +- pipx - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- pipx_info - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). +- pipx_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- pkg5_publisher - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- proxmox - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- proxmox_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- redfish_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- redfish_utils module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- redis cache plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- remove_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- replace_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- scaleway - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- scaleway module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- scaleway_compute - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- scaleway_ip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- scaleway_lb - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- scaleway_user_data - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- sensu_silence - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- snmp_facts - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- sorcery - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). +- ufw - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). +- unsafe plugin utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- vardict module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- vars MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). +- vmadm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + +Deprecated Features +------------------- + +- MH decorator cause_changes module utils - deprecate parameters ``on_success`` and ``on_failure`` (https://github.com/ansible-collections/community.general/pull/8791). +- pipx - support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8793). +- pipx_info - support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8793). + +Bugfixes +-------- + +- gitlab_group_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). +- gitlab_project - fix ``container_expiration_policy`` not being applied when creating a new project (https://github.com/ansible-collections/community.general/pull/8790). +- gitlab_project - fix crash caused by old Gitlab projects not having a ``container_expiration_policy`` attribute (https://github.com/ansible-collections/community.general/pull/8790). +- gitlab_project_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). +- keycloak_realm_key - fix invalid usage of ``parent_id`` (https://github.com/ansible-collections/community.general/issues/7850, https://github.com/ansible-collections/community.general/pull/8823). +- keycloak_user_federation - fix key error when removing mappers during an update and new mappers are specified in the module args (https://github.com/ansible-collections/community.general/pull/8762). +- keycloak_user_federation - fix the ``UnboundLocalError`` that occurs when an ID is provided for a user federation mapper (https://github.com/ansible-collections/community.general/pull/8831). +- keycloak_user_federation - sort desired and after mapper list by name (analog to before mapper list) to minimize diff and make change detection more accurate (https://github.com/ansible-collections/community.general/pull/8761). +- proxmox inventory plugin - fixed a possible error on concatenating responses from proxmox. In case an API call unexpectedly returned an empty result, the inventory failed with a fatal error. Added check for empty response (https://github.com/ansible-collections/community.general/issues/8798, https://github.com/ansible-collections/community.general/pull/8794). + +New Modules +----------- + +- community.general.keycloak_userprofile - Allows managing Keycloak User Profiles. +- community.general.one_vnet - Manages OpenNebula virtual networks. + +v9.3.0 +====== + +Release Summary +--------------- + +Regular bugfix and feature release. + +Minor Changes +------------- + +- cgroup_memory_recap, hipchat, jabber, log_plays, loganalytics, logentries, logstash, slack, splunk, sumologic, syslog_json callback plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8628). +- chef_databag, consul_kv, cyberarkpassword, dsv, etcd, filetree, hiera, onepassword, onepassword_doc, onepassword_raw, passwordstore, redis, shelvefile, tss lookup plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8626). +- chroot, funcd, incus, iocage, jail, lxc, lxd, qubes, zone connection plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8627). +- cobbler, linode, lxd, nmap, online, scaleway, stackpath_compute, virtualbox inventory plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8625). +- doas, dzdo, ksu, machinectl, pbrun, pfexec, pmrun, sesu, sudosu become plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8623). +- gconftool2 - make use of ``ModuleHelper`` features to simplify code (https://github.com/ansible-collections/community.general/pull/8711). +- gitlab_project - add option ``container_expiration_policy`` to schedule container registry cleanup (https://github.com/ansible-collections/community.general/pull/8674). +- gitlab_project - add option ``model_registry_access_level`` to disable model registry (https://github.com/ansible-collections/community.general/pull/8688). +- gitlab_project - add option ``pages_access_level`` to disable project pages (https://github.com/ansible-collections/community.general/pull/8688). +- gitlab_project - add option ``repository_access_level`` to disable project repository (https://github.com/ansible-collections/community.general/pull/8674). +- gitlab_project - add option ``service_desk_enabled`` to disable service desk (https://github.com/ansible-collections/community.general/pull/8688). +- locale_gen - add support for multiple locales (https://github.com/ansible-collections/community.general/issues/8677, https://github.com/ansible-collections/community.general/pull/8682). +- memcached, pickle, redis, yaml cache plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8624). +- opentelemetry callback plugin - fix default value for ``store_spans_in_file`` causing traces to be produced to a file named ``None`` (https://github.com/ansible-collections/community.general/issues/8566, https://github.com/ansible-collections/community.general/pull/8741). +- passwordstore lookup plugin - add the current user to the lockfile file name to address issues on multi-user systems (https://github.com/ansible-collections/community.general/pull/8689). +- pipx - add parameter ``suffix`` to module (https://github.com/ansible-collections/community.general/pull/8675, https://github.com/ansible-collections/community.general/issues/8656). +- pkgng - add option ``use_globs`` (default ``true``) to optionally disable glob patterns (https://github.com/ansible-collections/community.general/issues/8632, https://github.com/ansible-collections/community.general/pull/8633). +- proxmox inventory plugin - add new fact for LXC interface details (https://github.com/ansible-collections/community.general/pull/8713). +- redis, redis_info - add ``client_cert`` and ``client_key`` options to specify path to certificate for Redis authentication (https://github.com/ansible-collections/community.general/pull/8654). + +Bugfixes +-------- + +- gitlab_runner - fix ``paused`` parameter being ignored (https://github.com/ansible-collections/community.general/pull/8648). +- homebrew_cask - fix ``upgrade_all`` returns ``changed`` when nothing upgraded (https://github.com/ansible-collections/community.general/issues/8707, https://github.com/ansible-collections/community.general/pull/8708). +- keycloak_user_federation - get cleartext IDP ``clientSecret`` from full realm info to detect changes to it (https://github.com/ansible-collections/community.general/issues/8294, https://github.com/ansible-collections/community.general/pull/8735). +- keycloak_user_federation - remove existing user federation mappers if they are not present in the federation configuration and will not be updated (https://github.com/ansible-collections/community.general/issues/7169, https://github.com/ansible-collections/community.general/pull/8695). +- proxmox - fixed an issue where the new volume handling incorrectly converted ``null`` values into ``"None"`` strings (https://github.com/ansible-collections/community.general/pull/8646). +- proxmox - fixed an issue where volume strings where overwritten instead of appended to in the new ``build_volume()`` method (https://github.com/ansible-collections/community.general/pull/8646). +- proxmox - removed the forced conversion of non-string values to strings to be consistent with the module documentation (https://github.com/ansible-collections/community.general/pull/8646). + +New Modules +----------- + +- community.general.bootc_manage - Bootc Switch and Upgrade. +- community.general.homebrew_services - Services manager for Homebrew. +- community.general.keycloak_realm_keys_metadata_info - Allows obtaining Keycloak realm keys metadata via Keycloak API. + +v9.2.0 +====== + +Release Summary +--------------- + +Regular bugfix and feature release. + +Minor Changes +------------- + +- CmdRunner module utils - the parameter ``force_lang`` now supports the special value ``auto`` which will automatically try and determine the best parsable locale in the system (https://github.com/ansible-collections/community.general/pull/8517). +- proxmox - add ``disk_volume`` and ``mount_volumes`` keys for better readability (https://github.com/ansible-collections/community.general/pull/8542). +- proxmox - translate the old ``disk`` and ``mounts`` keys to the new handling internally (https://github.com/ansible-collections/community.general/pull/8542). +- proxmox_template - small refactor in logic for determining whether a template exists or not (https://github.com/ansible-collections/community.general/pull/8516). +- redfish_* modules - adds ``ciphers`` option for custom cipher selection (https://github.com/ansible-collections/community.general/pull/8533). +- sudosu become plugin - added an option (``alt_method``) to enhance compatibility with more versions of ``su`` (https://github.com/ansible-collections/community.general/pull/8214). +- virtualbox inventory plugin - expose a new parameter ``enable_advanced_group_parsing`` to change how the VirtualBox dynamic inventory parses VM groups (https://github.com/ansible-collections/community.general/issues/8508, https://github.com/ansible-collections/community.general/pull/8510). +- wdc_redfish_command - minor change to handle upgrade file for Redfish WD platforms (https://github.com/ansible-collections/community.general/pull/8444). + +Bugfixes +-------- + +- bitwarden lookup plugin - fix ``KeyError`` in ``search_field`` (https://github.com/ansible-collections/community.general/issues/8549, https://github.com/ansible-collections/community.general/pull/8557). +- keycloak_clientscope - remove IDs from clientscope and its protocol mappers on comparison for changed check (https://github.com/ansible-collections/community.general/pull/8545). +- nsupdate - fix 'index out of range' error when changing NS records by falling back to authority section of the response (https://github.com/ansible-collections/community.general/issues/8612, https://github.com/ansible-collections/community.general/pull/8614). +- proxmox - fix idempotency on creation of mount volumes using Proxmox' special ``:`` syntax (https://github.com/ansible-collections/community.general/issues/8407, https://github.com/ansible-collections/community.general/pull/8542). +- redfish_utils module utils - do not fail when language is not exactly "en" (https://github.com/ansible-collections/community.general/pull/8613). + +New Plugins +----------- + +Filter +~~~~~~ + +- community.general.reveal_ansible_type - Return input type. + +Test +~~~~ + +- community.general.ansible_type - Validate input type. + +v9.1.0 +====== + +Release Summary +--------------- + +Regular feature and bugfix release. + +Minor Changes +------------- + +- CmdRunner module util - argument formats can be specified as plain functions without calling ``cmd_runner_fmt.as_func()`` (https://github.com/ansible-collections/community.general/pull/8479). +- ansible_galaxy_install - add upgrade feature (https://github.com/ansible-collections/community.general/pull/8431, https://github.com/ansible-collections/community.general/issues/8351). +- cargo - add option ``directory``, which allows source directory to be specified (https://github.com/ansible-collections/community.general/pull/8480). +- cmd_runner module utils - add decorator ``cmd_runner_fmt.stack`` (https://github.com/ansible-collections/community.general/pull/8415). +- cmd_runner_fmt module utils - simplify implementation of ``cmd_runner_fmt.as_bool_not()`` (https://github.com/ansible-collections/community.general/pull/8512). +- ipa_dnsrecord - adds ``SSHFP`` record type for managing SSH fingerprints in FreeIPA DNS (https://github.com/ansible-collections/community.general/pull/8404). +- keycloak_client - assign auth flow by name (https://github.com/ansible-collections/community.general/pull/8428). +- openbsd_pkg - adds diff support to show changes in installed package list. This does not yet work for check mode (https://github.com/ansible-collections/community.general/pull/8402). +- proxmox - allow specification of the API port when using proxmox_* (https://github.com/ansible-collections/community.general/issues/8440, https://github.com/ansible-collections/community.general/pull/8441). +- proxmox_vm_info - add ``network`` option to retrieve current network information (https://github.com/ansible-collections/community.general/pull/8471). +- redfish_command - add ``wait`` and ``wait_timeout`` options to allow a user to block a command until a service is accessible after performing the requested command (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434). +- redfish_info - add command ``CheckAvailability`` to check if a service is accessible (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434). +- redis_info - adds support for getting cluster info (https://github.com/ansible-collections/community.general/pull/8464). + +Deprecated Features +------------------- + +- CmdRunner module util - setting the value of the ``ignore_none`` parameter within a ``CmdRunner`` context is deprecated and that feature should be removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/8479). +- git_config - the ``list_all`` option has been deprecated and will be removed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead (https://github.com/ansible-collections/community.general/pull/8453). +- git_config - using ``state=present`` without providing ``value`` is deprecated and will be disallowed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead to read a value (https://github.com/ansible-collections/community.general/pull/8453). + +Bugfixes +-------- + +- git_config - fix behavior of ``state=absent`` if ``value`` is present (https://github.com/ansible-collections/community.general/issues/8436, https://github.com/ansible-collections/community.general/pull/8452). +- keycloak_realm - add normalizations for ``attributes`` and ``protocol_mappers`` (https://github.com/ansible-collections/community.general/pull/8496). +- launched - correctly report changed status in check mode (https://github.com/ansible-collections/community.general/pull/8406). +- opennebula inventory plugin - fix invalid reference to IP when inventory runs against NICs with no IPv4 address (https://github.com/ansible-collections/community.general/pull/8489). +- opentelemetry callback - do not save the JSON response when using the ``ansible.builtin.uri`` module (https://github.com/ansible-collections/community.general/pull/8430). +- opentelemetry callback - do not save the content response when using the ``ansible.builtin.slurp`` module (https://github.com/ansible-collections/community.general/pull/8430). +- paman - do not fail if an empty list of packages has been provided and there is nothing to do (https://github.com/ansible-collections/community.general/pull/8514). + +Known Issues +------------ + +- homectl - the module does not work under Python 3.13 or newer, since it relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4691, https://github.com/ansible-collections/community.general/pull/8497). +- udm_user - the module does not work under Python 3.13 or newer, since it relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4690, https://github.com/ansible-collections/community.general/pull/8497). + +New Plugins +----------- + +Filter +~~~~~~ + +- community.general.keep_keys - Keep specific keys from dictionaries in a list. +- community.general.remove_keys - Remove specific keys from dictionaries in a list. +- community.general.replace_keys - Replace specific keys in a list of dictionaries. + +New Modules +----------- + +- community.general.consul_agent_check - Add, modify, and delete checks within a consul cluster. +- community.general.consul_agent_service - Add, modify and delete services within a consul cluster. +- community.general.django_check - Wrapper for C(django-admin check). +- community.general.django_createcachetable - Wrapper for C(django-admin createcachetable). + +v9.0.1 +====== + +Release Summary +--------------- + +Bugfix release for inclusion in Ansible 10.0.0rc1. + +Minor Changes +------------- + +- ansible_galaxy_install - minor refactor in the module (https://github.com/ansible-collections/community.general/pull/8413). + +Bugfixes +-------- + +- cpanm - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). +- django module utils - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). +- gconftool2_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). +- homebrew - do not fail when brew prints warnings (https://github.com/ansible-collections/community.general/pull/8406, https://github.com/ansible-collections/community.general/issues/7044). +- hponcfg - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). +- kernel_blacklist - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). +- keycloak_client - fix TypeError when sanitizing the ``saml.signing.private.key`` attribute in the module's diff or state output. The ``sanitize_cr`` function expected a dict where in some cases a list might occur (https://github.com/ansible-collections/community.general/pull/8403). +- locale_gen - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). +- mksysb - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). +- pipx_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). +- snap - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). +- snap_alias - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). + +v9.0.0 +====== + +Release Summary +--------------- + +This is release 9.0.0 of ``community.general``, released on 2024-05-20. + +Minor Changes +------------- + +- PythonRunner module utils - specialisation of ``CmdRunner`` to execute Python scripts (https://github.com/ansible-collections/community.general/pull/8289). +- Use offset-aware ``datetime.datetime`` objects (with timezone UTC) instead of offset-naive UTC timestamps, which are deprecated in Python 3.12 (https://github.com/ansible-collections/community.general/pull/8222). +- aix_lvol - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- apt_rpm - add new states ``latest`` and ``present_not_latest``. The value ``latest`` is equivalent to the current behavior of ``present``, which will upgrade a package if a newer version exists. ``present_not_latest`` does what most users would expect ``present`` to do: it does not upgrade if the package is already installed. The current behavior of ``present`` will be deprecated in a later version, and eventually changed to that of ``present_not_latest`` (https://github.com/ansible-collections/community.general/issues/8217, https://github.com/ansible-collections/community.general/pull/8247). +- apt_rpm - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- bitwarden lookup plugin - add ``bw_session`` option, to pass session key instead of reading from env (https://github.com/ansible-collections/community.general/pull/7994). +- bitwarden lookup plugin - add support to filter by organization ID (https://github.com/ansible-collections/community.general/pull/8188). +- bitwarden lookup plugin - allows to fetch all records of a given collection ID, by allowing to pass an empty value for ``search_value`` when ``collection_id`` is provided (https://github.com/ansible-collections/community.general/pull/8013). +- bitwarden lookup plugin - when looking for items using an item ID, the item is now accessed directly with ``bw get item`` instead of searching through all items. This doubles the lookup speed (https://github.com/ansible-collections/community.general/pull/7468). +- btrfs_subvolume - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- cmd_runner module_utils - add validation for minimum and maximum length in the value passed to ``cmd_runner_fmt.as_list()`` (https://github.com/ansible-collections/community.general/pull/8288). +- consul_auth_method, consul_binding_rule, consul_policy, consul_role, consul_session, consul_token - added action group ``community.general.consul`` (https://github.com/ansible-collections/community.general/pull/7897). +- consul_policy - added support for diff and check mode (https://github.com/ansible-collections/community.general/pull/7878). +- consul_policy, consul_role, consul_session - removed dependency on ``requests`` and factored out common parts (https://github.com/ansible-collections/community.general/pull/7826, https://github.com/ansible-collections/community.general/pull/7878). +- consul_role - ``node_identities`` now expects a ``node_name`` option to match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878). +- consul_role - ``service_identities`` now expects a ``service_name`` option to match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878). +- consul_role - added support for diff mode (https://github.com/ansible-collections/community.general/pull/7878). +- consul_role - added support for templated policies (https://github.com/ansible-collections/community.general/pull/7878). +- elastic callback plugin - close elastic client to not leak resources (https://github.com/ansible-collections/community.general/pull/7517). +- filesystem - add bcachefs support (https://github.com/ansible-collections/community.general/pull/8126). +- gandi_livedns - adds support for personal access tokens (https://github.com/ansible-collections/community.general/issues/7639, https://github.com/ansible-collections/community.general/pull/8337). +- gconftool2 - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). +- git_config - allow multiple git configs for the same name with the new ``add_mode`` option (https://github.com/ansible-collections/community.general/pull/7260). +- git_config - the ``after`` and ``before`` fields in the ``diff`` of the return value can be a list instead of a string in case more configs with the same key are affected (https://github.com/ansible-collections/community.general/pull/7260). +- git_config - when a value is unset, all configs with the same key are unset (https://github.com/ansible-collections/community.general/pull/7260). +- gitlab modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/7472). +- gitlab modules - remove duplicate ``gitlab`` package check (https://github.com/ansible-collections/community.general/pull/7486). +- gitlab_deploy_key, gitlab_group_members, gitlab_group_variable, gitlab_hook, gitlab_instance_variable, gitlab_project_badge, gitlab_project_variable, gitlab_user - improve API pagination and compatibility with different versions of ``python-gitlab`` (https://github.com/ansible-collections/community.general/pull/7790). +- gitlab_hook - adds ``releases_events`` parameter for supporting Releases events triggers on GitLab hooks (https://github.com/ansible-collections/community.general/pull/7956). +- gitlab_runner - add support for new runner creation workflow (https://github.com/ansible-collections/community.general/pull/7199). +- homebrew - adds ``force_formula`` parameter to disambiguate a formula from a cask of the same name (https://github.com/ansible-collections/community.general/issues/8274). +- homebrew, homebrew_cask - refactor common argument validation logic into a dedicated ``homebrew`` module utils (https://github.com/ansible-collections/community.general/issues/8323, https://github.com/ansible-collections/community.general/pull/8324). +- icinga2 inventory plugin - add Jinja2 templating support to ``url``, ``user``, and ``password`` paramenters (https://github.com/ansible-collections/community.general/issues/7074, https://github.com/ansible-collections/community.general/pull/7996). +- icinga2 inventory plugin - adds new parameter ``group_by_hostgroups`` in order to make grouping by Icinga2 hostgroups optional (https://github.com/ansible-collections/community.general/pull/7998). +- ini_file - add an optional parameter ``section_has_values``. If the target ini file contains more than one ``section``, use ``section_has_values`` to specify which one should be updated (https://github.com/ansible-collections/community.general/pull/7505). +- ini_file - support optional spaces between section names and their surrounding brackets (https://github.com/ansible-collections/community.general/pull/8075). +- installp - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- ipa_config - adds ``passkey`` choice to ``ipauserauthtype`` parameter's choices (https://github.com/ansible-collections/community.general/pull/7588). +- ipa_dnsrecord - adds ability to manage NS record types (https://github.com/ansible-collections/community.general/pull/7737). +- ipa_pwpolicy - refactor module and exchange a sequence ``if`` statements with a ``for`` loop (https://github.com/ansible-collections/community.general/pull/7723). +- ipa_pwpolicy - update module to support ``maxrepeat``, ``maxsequence``, ``dictcheck``, ``usercheck``, ``gracelimit`` parameters in FreeIPA password policies (https://github.com/ansible-collections/community.general/pull/7723). +- ipa_sudorule - adds options to include denied commands or command groups (https://github.com/ansible-collections/community.general/pull/7415). +- ipa_user - adds ``idp`` and ``passkey`` choice to ``ipauserauthtype`` parameter's choices (https://github.com/ansible-collections/community.general/pull/7589). +- irc - add ``validate_certs`` option, and rename ``use_ssl`` to ``use_tls``, while keeping ``use_ssl`` as an alias. The default value for ``validate_certs`` is ``false`` for backwards compatibility. We recommend to every user of this module to explicitly set ``use_tls=true`` and `validate_certs=true`` whenever possible, especially when communicating to IRC servers over the internet (https://github.com/ansible-collections/community.general/pull/7550). +- java_cert - add ``cert_content`` argument (https://github.com/ansible-collections/community.general/pull/8153). +- java_cert - enable ``owner``, ``group``, ``mode``, and other generic file arguments (https://github.com/ansible-collections/community.general/pull/8116). +- kernel_blacklist - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). +- keycloak module utils - expose error message from Keycloak server for HTTP errors in some specific situations (https://github.com/ansible-collections/community.general/pull/7645). +- keycloak_client, keycloak_clientscope, keycloak_clienttemplate - added ``docker-v2`` protocol support, enhancing alignment with Keycloak's protocol options (https://github.com/ansible-collections/community.general/issues/8215, https://github.com/ansible-collections/community.general/pull/8216). +- keycloak_realm_key - the ``config.algorithm`` option now supports 8 additional key algorithms (https://github.com/ansible-collections/community.general/pull/7698). +- keycloak_realm_key - the ``config.certificate`` option value is no longer defined with ``no_log=True`` (https://github.com/ansible-collections/community.general/pull/7698). +- keycloak_realm_key - the ``provider_id`` option now supports RSA encryption key usage (value ``rsa-enc``) (https://github.com/ansible-collections/community.general/pull/7698). +- keycloak_user_federation - add option for ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/7538). +- keycloak_user_federation - allow custom user storage providers to be set through ``provider_id`` (https://github.com/ansible-collections/community.general/pull/7789). +- ldap_attrs - module now supports diff mode, showing which attributes are changed within an operation (https://github.com/ansible-collections/community.general/pull/8073). +- lvg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- lvol - change ``pvs`` argument type to list of strings (https://github.com/ansible-collections/community.general/pull/7676, https://github.com/ansible-collections/community.general/issues/7504). +- lvol - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- lxd connection plugin - tighten the detection logic for lxd ``Instance not found`` errors, to avoid false detection on unrelated errors such as ``/usr/bin/python3: not found`` (https://github.com/ansible-collections/community.general/pull/7521). +- lxd_container - uses ``/1.0/instances`` API endpoint, if available. Falls back to ``/1.0/containers`` or ``/1.0/virtual-machines``. Fixes issue when using Incus or LXD 5.19 due to migrating to ``/1.0/instances`` endpoint (https://github.com/ansible-collections/community.general/pull/7980). +- macports - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- mail - add ``Message-ID`` header; which is required by some mail servers (https://github.com/ansible-collections/community.general/pull/7740). +- mail module, mail callback plugin - allow to configure the domain name of the Message-ID header with a new ``message_id_domain`` option (https://github.com/ansible-collections/community.general/pull/7765). +- mssql_script - adds transactional (rollback/commit) support via optional boolean param ``transaction`` (https://github.com/ansible-collections/community.general/pull/7976). +- netcup_dns - adds support for record types ``OPENPGPKEY``, ``SMIMEA``, and ``SSHFP`` (https://github.com/ansible-collections/community.general/pull/7489). +- nmcli - add support for new connection type ``loopback`` (https://github.com/ansible-collections/community.general/issues/6572). +- nmcli - adds OpenvSwitch support with new ``type`` values ``ovs-port``, ``ovs-interface``, and ``ovs-bridge``, and new ``slave_type`` value ``ovs-port`` (https://github.com/ansible-collections/community.general/pull/8154). +- nmcli - allow for ``infiniband`` slaves of ``bond`` interface types (https://github.com/ansible-collections/community.general/pull/7569). +- nmcli - allow for the setting of ``MTU`` for ``infiniband`` and ``bond`` interface types (https://github.com/ansible-collections/community.general/pull/7499). +- nmcli - allow setting ``MTU`` for ``bond-slave`` interface types (https://github.com/ansible-collections/community.general/pull/8118). +- onepassword lookup plugin - support 1Password Connect with the opv2 client by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116). +- onepassword_raw lookup plugin - support 1Password Connect with the opv2 client by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116) +- opentelemetry - add support for HTTP trace_exporter and configures the behavior via ``OTEL_EXPORTER_OTLP_TRACES_PROTOCOL`` (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8321). +- opentelemetry - add support for exporting spans in a file via ``ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE`` (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8363). +- opkg - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). +- osx_defaults - add option ``check_types`` to enable changing the type of existing defaults on the fly (https://github.com/ansible-collections/community.general/pull/8173). +- parted - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- passwordstore - adds ``timestamp`` and ``preserve`` parameters to modify the stored password format (https://github.com/ansible-collections/community.general/pull/7426). +- passwordstore lookup - add ``missing_subkey`` parameter defining the behavior of the lookup when a passwordstore subkey is missing (https://github.com/ansible-collections/community.general/pull/8166). +- pipx - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). +- pkg5 - add support for non-silent execution (https://github.com/ansible-collections/community.general/issues/8379, https://github.com/ansible-collections/community.general/pull/8382). +- pkgin - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- portage - adds the possibility to explicitely tell portage to write packages to world file (https://github.com/ansible-collections/community.general/issues/6226, https://github.com/ansible-collections/community.general/pull/8236). +- portinstall - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- proxmox - adds ``startup`` parameters to configure startup order, startup delay and shutdown delay (https://github.com/ansible-collections/community.general/pull/8038). +- proxmox - adds ``template`` value to the ``state`` parameter, allowing conversion of container to a template (https://github.com/ansible-collections/community.general/pull/7143). +- proxmox - adds ``update`` parameter, allowing update of an already existing containers configuration (https://github.com/ansible-collections/community.general/pull/7540). +- proxmox inventory plugin - adds an option to exclude nodes from the dynamic inventory generation. The new setting is optional, not using this option will behave as usual (https://github.com/ansible-collections/community.general/issues/6714, https://github.com/ansible-collections/community.general/pull/7461). +- proxmox* modules - there is now a ``community.general.proxmox`` module defaults group that can be used to set default options for all Proxmox modules (https://github.com/ansible-collections/community.general/pull/8334). +- proxmox_disk - add ability to manipulate CD-ROM drive (https://github.com/ansible-collections/community.general/pull/7495). +- proxmox_kvm - add parameter ``update_unsafe`` to avoid limitations when updating dangerous values (https://github.com/ansible-collections/community.general/pull/7843). +- proxmox_kvm - adds ``template`` value to the ``state`` parameter, allowing conversion of a VM to a template (https://github.com/ansible-collections/community.general/pull/7143). +- proxmox_kvm - adds``usb`` parameter for setting USB devices on proxmox KVM VMs (https://github.com/ansible-collections/community.general/pull/8199). +- proxmox_kvm - support the ``hookscript`` parameter (https://github.com/ansible-collections/community.general/issues/7600). +- proxmox_ostype - it is now possible to specify the ``ostype`` when creating an LXC container (https://github.com/ansible-collections/community.general/pull/7462). +- proxmox_vm_info - add ability to retrieve configuration info (https://github.com/ansible-collections/community.general/pull/7485). +- puppet - new feature to set ``--waitforlock`` option (https://github.com/ansible-collections/community.general/pull/8282). +- redfish_command - add command ``ResetToDefaults`` to reset manager to default state (https://github.com/ansible-collections/community.general/issues/8163). +- redfish_config - add command ``SetServiceIdentification`` to set service identification (https://github.com/ansible-collections/community.general/issues/7916). +- redfish_info - add boolean return value ``MultipartHttpPush`` to ``GetFirmwareUpdateCapabilities`` (https://github.com/ansible-collections/community.general/issues/8194, https://github.com/ansible-collections/community.general/pull/8195). +- redfish_info - add command ``GetServiceIdentification`` to get service identification (https://github.com/ansible-collections/community.general/issues/7882). +- redfish_info - adding the ``BootProgress`` property when getting ``Systems`` info (https://github.com/ansible-collections/community.general/pull/7626). +- revbitspss lookup plugin - removed a redundant unicode prefix. The prefix was not necessary for Python 3 and has been cleaned up to streamline the code (https://github.com/ansible-collections/community.general/pull/8087). +- rundeck module utils - allow to pass ``Content-Type`` to API requests (https://github.com/ansible-collections/community.general/pull/7684). +- slackpkg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- ssh_config - adds ``controlmaster``, ``controlpath`` and ``controlpersist`` parameters (https://github.com/ansible-collections/community.general/pull/7456). +- ssh_config - allow ``accept-new`` as valid value for ``strict_host_key_checking`` (https://github.com/ansible-collections/community.general/pull/8257). +- ssh_config - new feature to set ``AddKeysToAgent`` option to ``yes`` or ``no`` (https://github.com/ansible-collections/community.general/pull/7703). +- ssh_config - new feature to set ``IdentitiesOnly`` option to ``yes`` or ``no`` (https://github.com/ansible-collections/community.general/pull/7704). +- sudoers - add support for the ``NOEXEC`` tag in sudoers rules (https://github.com/ansible-collections/community.general/pull/7983). +- svr4pkg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- swdepot - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). +- terraform - add support for ``diff_mode`` for terraform resource_changes (https://github.com/ansible-collections/community.general/pull/7896). +- terraform - fix ``diff_mode`` in state ``absent`` and when terraform ``resource_changes`` does not exist (https://github.com/ansible-collections/community.general/pull/7963). +- xcc_redfish_command - added support for raw POSTs (``command=PostResource`` in ``category=Raw``) without a specific action info (https://github.com/ansible-collections/community.general/pull/7746). +- xfconf - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). +- xfconf_info - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + +Breaking Changes / Porting Guide +-------------------------------- + +- cpanm - the default of the ``mode`` option changed from ``compatibility`` to ``new`` (https://github.com/ansible-collections/community.general/pull/8198). +- django_manage - the module now requires Django >= 4.1 (https://github.com/ansible-collections/community.general/pull/8198). +- django_manage - the module will now fail if ``virtualenv`` is specified but no virtual environment exists at that location (https://github.com/ansible-collections/community.general/pull/8198). +- redfish_command, redfish_config, redfish_info - change the default for ``timeout`` from 10 to 60 (https://github.com/ansible-collections/community.general/pull/8198). + +Deprecated Features +------------------- + +- MH DependencyCtxMgr module_utils - deprecate ``module_utils.mh.mixin.deps.DependencyCtxMgr`` in favour of ``module_utils.deps`` (https://github.com/ansible-collections/community.general/pull/8280). +- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.AnsibleModule`` (https://github.com/ansible-collections/community.general/pull/8280). +- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.DependencyCtxMgr`` (https://github.com/ansible-collections/community.general/pull/8280). +- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.StateMixin`` (https://github.com/ansible-collections/community.general/pull/8280). +- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarDict,`` (https://github.com/ansible-collections/community.general/pull/8280). +- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarMeta`` (https://github.com/ansible-collections/community.general/pull/8280). +- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarsMixin`` (https://github.com/ansible-collections/community.general/pull/8280). +- ModuleHelper module_utils - deprecate use of ``VarsMixin`` in favor of using the ``VardDict`` module_utils (https://github.com/ansible-collections/community.general/pull/8226). +- ModuleHelper vars module_utils - bump deprecation of ``VarMeta``, ``VarDict`` and ``VarsMixin`` to version 11.0.0 (https://github.com/ansible-collections/community.general/pull/8226). +- apt_rpm - the behavior of ``state=present`` and ``state=installed`` is deprecated and will change in community.general 11.0.0. Right now the module will upgrade a package to the latest version if one of these two states is used. You should explicitly use ``state=latest`` if you want this behavior, and switch to ``state=present_not_latest`` if you do not want to upgrade the package if it is already installed. In community.general 11.0.0 the behavior of ``state=present`` and ``state=installed`` will change to that of ``state=present_not_latest`` (https://github.com/ansible-collections/community.general/issues/8217, https://github.com/ansible-collections/community.general/pull/8285). +- consul_acl - the module has been deprecated and will be removed in community.general 10.0.0. ``consul_token`` and ``consul_policy`` can be used instead (https://github.com/ansible-collections/community.general/pull/7901). +- django_manage - the ``ack_venv_creation_deprecation`` option has no more effect and will be removed from community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8198). +- gitlab modules - the basic auth method on GitLab API have been deprecated and will be removed in community.general 10.0.0 (https://github.com/ansible-collections/community.general/pull/8383). +- hipchat callback plugin - the hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. The callback plugin is therefore deprecated and will be removed from community.general 10.0.0 if nobody provides compelling reasons to still keep it (https://github.com/ansible-collections/community.general/issues/8184, https://github.com/ansible-collections/community.general/pull/8189). +- irc - the defaults ``false`` for ``use_tls`` and ``validate_certs`` have been deprecated and will change to ``true`` in community.general 10.0.0 to improve security. You can already improve security now by explicitly setting them to ``true``. Specifying values now disables the deprecation warning (https://github.com/ansible-collections/community.general/pull/7578). + +Removed Features (previously deprecated) +---------------------------------------- + +- The deprecated redirects for internal module names have been removed. These internal redirects were extra-long FQCNs like ``community.general.packaging.os.apt_rpm`` that redirect to the short FQCN ``community.general.apt_rpm``. They were originally needed to implement flatmapping; as various tooling started to recommend users to use the long names flatmapping was removed from the collection and redirects were added for users who already followed these incorrect recommendations (https://github.com/ansible-collections/community.general/pull/7835). +- ansible_galaxy_install - the ``ack_ansible29`` and ``ack_min_ansiblecore211`` options have been removed. They no longer had any effect (https://github.com/ansible-collections/community.general/pull/8198). +- cloudflare_dns - remove support for SPF records. These are no longer supported by CloudFlare (https://github.com/ansible-collections/community.general/pull/7782). +- django_manage - support for the ``command`` values ``cleanup``, ``syncdb``, and ``validate`` were removed. Use ``clearsessions``, ``migrate``, and ``check`` instead, respectively (https://github.com/ansible-collections/community.general/pull/8198). +- flowdock - this module relied on HTTPS APIs that do not exist anymore and was thus removed (https://github.com/ansible-collections/community.general/pull/8198). +- mh.mixins.deps module utils - the ``DependencyMixin`` has been removed. Use the ``deps`` module utils instead (https://github.com/ansible-collections/community.general/pull/8198). +- proxmox - the ``proxmox_default_behavior`` option has been removed (https://github.com/ansible-collections/community.general/pull/8198). +- rax* modules, rax module utils, rax docs fragment - the Rackspace modules relied on the deprecated package ``pyrax`` and were thus removed (https://github.com/ansible-collections/community.general/pull/8198). +- redhat module utils - the classes ``Rhsm``, ``RhsmPool``, and ``RhsmPools`` have been removed (https://github.com/ansible-collections/community.general/pull/8198). +- redhat_subscription - the alias ``autosubscribe`` of the ``auto_attach`` option was removed (https://github.com/ansible-collections/community.general/pull/8198). +- stackdriver - this module relied on HTTPS APIs that do not exist anymore and was thus removed (https://github.com/ansible-collections/community.general/pull/8198). +- webfaction_* modules - these modules relied on HTTPS APIs that do not exist anymore and were thus removed (https://github.com/ansible-collections/community.general/pull/8198). + +Security Fixes +-------------- + +- cobbler, gitlab_runners, icinga2, linode, lxd, nmap, online, opennebula, proxmox, scaleway, stackpath_compute, virtualbox, and xen_orchestra inventory plugin - make sure all data received from the remote servers is marked as unsafe, so remote code execution by obtaining texts that can be evaluated as templates is not possible (https://www.die-welt.net/2024/03/remote-code-execution-in-ansible-dynamic-inventory-plugins/, https://github.com/ansible-collections/community.general/pull/8098). +- keycloak_identity_provider - the client secret was not correctly sanitized by the module. The return values ``proposed``, ``existing``, and ``end_state``, as well as the diff, did contain the client secret unmasked (https://github.com/ansible-collections/community.general/pull/8355). + +Bugfixes +-------- + +- aix_filesystem - fix ``_validate_vg`` not passing VG name to ``lsvg_cmd`` (https://github.com/ansible-collections/community.general/issues/8151). +- aix_filesystem - fix issue with empty list items in crfs logic and option order (https://github.com/ansible-collections/community.general/pull/8052). +- apt-rpm - the module did not upgrade packages if a newer version exists. Now the package will be reinstalled if the candidate is newer than the installed version (https://github.com/ansible-collections/community.general/issues/7414). +- apt_rpm - when checking whether packages were installed after running ``apt-get -y install ``, only the last package name was checked (https://github.com/ansible-collections/community.general/pull/8263). +- bitwarden_secrets_manager lookup plugin - implements retry with exponential backoff to avoid lookup errors when Bitwardn's API rate limiting is encountered (https://github.com/ansible-collections/community.general/issues/8230, https://github.com/ansible-collections/community.general/pull/8238). +- cargo - fix idempotency issues when using a custom installation path for packages (using the ``--path`` parameter). The initial installation runs fine, but subsequent runs use the ``get_installed()`` function which did not check the given installation location, before running ``cargo install``. This resulted in a false ``changed`` state. Also the removal of packeges using ``state: absent`` failed, as the installation check did not use the given parameter (https://github.com/ansible-collections/community.general/pull/7970). +- cloudflare_dns - fix Cloudflare lookup of SHFP records (https://github.com/ansible-collections/community.general/issues/7652). +- consul_token - fix token creation without ``accessor_id`` (https://github.com/ansible-collections/community.general/pull/8091). +- from_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, https://github.com/ansible-collections/community.general/pull/8185). +- gitlab_group_members - fix gitlab constants call in ``gitlab_group_members`` module (https://github.com/ansible-collections/community.general/issues/7467). +- gitlab_issue - fix behavior to search GitLab issue, using ``search`` keyword instead of ``title`` (https://github.com/ansible-collections/community.general/issues/7846). +- gitlab_issue, gitlab_label, gitlab_milestone - avoid crash during version comparison when the python-gitlab Python module is not installed (https://github.com/ansible-collections/community.general/pull/8158). +- gitlab_project_members - fix gitlab constants call in ``gitlab_project_members`` module (https://github.com/ansible-collections/community.general/issues/7467). +- gitlab_protected_branches - fix gitlab constants call in ``gitlab_protected_branches`` module (https://github.com/ansible-collections/community.general/issues/7467). +- gitlab_runner - fix pagination when checking for existing runners (https://github.com/ansible-collections/community.general/pull/7790). +- gitlab_user - fix gitlab constants call in ``gitlab_user`` module (https://github.com/ansible-collections/community.general/issues/7467). +- haproxy - fix an issue where HAProxy could get stuck in DRAIN mode when the backend was unreachable (https://github.com/ansible-collections/community.general/issues/8092). +- homebrew - detect already installed formulae and casks using JSON output from ``brew info`` (https://github.com/ansible-collections/community.general/issues/864). +- homebrew - error returned from brew command was ignored and tried to parse empty JSON. Fix now checks for an error and raises it to give accurate error message to users (https://github.com/ansible-collections/community.general/issues/8047). +- incus connection plugin - treats ``inventory_hostname`` as a variable instead of a literal in remote connections (https://github.com/ansible-collections/community.general/issues/7874). +- interface_files - also consider ``address_family`` when changing ``option=method`` (https://github.com/ansible-collections/community.general/issues/7610, https://github.com/ansible-collections/community.general/pull/7612). +- inventory plugins - add unsafe wrapper to avoid marking strings that do not contain ``{`` or ``}`` as unsafe, to work around a bug in AWX ((https://github.com/ansible-collections/community.general/issues/8212, https://github.com/ansible-collections/community.general/pull/8225). +- ipa - fix get version regex in IPA module_utils (https://github.com/ansible-collections/community.general/pull/8175). +- ipa_hbacrule - the module uses a string for ``ipaenabledflag`` for new FreeIPA versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880). +- ipa_otptoken - the module expect ``ipatokendisabled`` as string but the ``ipatokendisabled`` value is returned as a boolean (https://github.com/ansible-collections/community.general/pull/7795). +- ipa_sudorule - the module uses a string for ``ipaenabledflag`` for new FreeIPA versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880). +- iptables_state - fix idempotency issues when restoring incomplete iptables dumps (https://github.com/ansible-collections/community.general/issues/8029). +- irc - replace ``ssl.wrap_socket`` that was removed from Python 3.12 with code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542). +- keycloak_* - fix Keycloak API client to quote ``/`` properly (https://github.com/ansible-collections/community.general/pull/7641). +- keycloak_authz_permission - resource payload variable for scope-based permission was constructed as a string, when it needs to be a list, even for a single item (https://github.com/ansible-collections/community.general/issues/7151). +- keycloak_client - add sorted ``defaultClientScopes`` and ``optionalClientScopes`` to normalizations (https://github.com/ansible-collections/community.general/pull/8223). +- keycloak_client - fixes issue when metadata is provided in desired state when task is in check mode (https://github.com/ansible-collections/community.general/issues/1226, https://github.com/ansible-collections/community.general/pull/7881). +- keycloak_identity_provider - ``mappers`` processing was not idempotent if the mappers configuration list had not been sorted by name (in ascending order). Fix resolves the issue by sorting mappers in the desired state using the same key which is used for obtaining existing state (https://github.com/ansible-collections/community.general/pull/7418). +- keycloak_identity_provider - it was not possible to reconfigure (add, remove) ``mappers`` once they were created initially. Removal was ignored, adding new ones resulted in dropping the pre-existing unmodified mappers. Fix resolves the issue by supplying correct input to the internal update call (https://github.com/ansible-collections/community.general/pull/7418). +- keycloak_realm - add normalizations for ``enabledEventTypes`` and ``supportedLocales`` (https://github.com/ansible-collections/community.general/pull/8224). +- keycloak_user - when ``force`` is set, but user does not exist, do not try to delete it (https://github.com/ansible-collections/community.general/pull/7696). +- keycloak_user_federation - fix diff of empty ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/8320). +- ldap - previously the order number (if present) was expected to follow an equals sign in the DN. This makes it so the order number string is identified correctly anywhere within the DN (https://github.com/ansible-collections/community.general/issues/7646). +- linode inventory plugin - add descriptive error message for linode inventory plugin (https://github.com/ansible-collections/community.general/pull/8133). +- log_entries callback plugin - replace ``ssl.wrap_socket`` that was removed from Python 3.12 with code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542). +- lvol - test for output messages in both ``stdout`` and ``stderr`` (https://github.com/ansible-collections/community.general/pull/7601, https://github.com/ansible-collections/community.general/issues/7182). +- merge_variables lookup plugin - fixing cross host merge: providing access to foreign hosts variables to the perspective of the host that is performing the merge (https://github.com/ansible-collections/community.general/pull/8303). +- modprobe - listing modules files or modprobe files could trigger a FileNotFoundError if ``/etc/modprobe.d`` or ``/etc/modules-load.d`` did not exist. Relevant functions now return empty lists if the directories do not exist to avoid crashing the module (https://github.com/ansible-collections/community.general/issues/7717). +- mssql_script - make the module work with Python 2 (https://github.com/ansible-collections/community.general/issues/7818, https://github.com/ansible-collections/community.general/pull/7821). +- nmcli - fix ``connection.slave-type`` wired to ``bond`` and not with parameter ``slave_type`` in case of connection type ``wifi`` (https://github.com/ansible-collections/community.general/issues/7389). +- ocapi_utils, oci_utils, redfish_utils module utils - replace ``type()`` calls with ``isinstance()`` calls (https://github.com/ansible-collections/community.general/pull/7501). +- onepassword lookup plugin - failed for fields that were in sections and had uppercase letters in the label/ID. Field lookups are now case insensitive in all cases (https://github.com/ansible-collections/community.general/pull/7919). +- onepassword lookup plugin - field and section titles are now case insensitive when using op CLI version two or later. This matches the behavior of version one (https://github.com/ansible-collections/community.general/pull/7564). +- opentelemetry callback plugin - close spans always (https://github.com/ansible-collections/community.general/pull/8367). +- opentelemetry callback plugin - honour the ``disable_logs`` option to avoid storing task results since they are not used regardless (https://github.com/ansible-collections/community.general/pull/8373). +- pacemaker_cluster - actually implement check mode, which the module claims to support. This means that until now the module also did changes in check mode (https://github.com/ansible-collections/community.general/pull/8081). +- pam_limits - when the file does not exist, do not create it in check mode (https://github.com/ansible-collections/community.general/issues/8050, https://github.com/ansible-collections/community.general/pull/8057). +- pipx module utils - change the CLI argument formatter for the ``pip_args`` parameter (https://github.com/ansible-collections/community.general/issues/7497, https://github.com/ansible-collections/community.general/pull/7506). +- pkgin - pkgin (pkgsrc package manager used by SmartOS) raises erratic exceptions and spurious ``changed=true`` (https://github.com/ansible-collections/community.general/pull/7971). +- proxmox - fix updating a container config if the setting does not already exist (https://github.com/ansible-collections/community.general/pull/7872). +- proxmox_kvm - fixed status check getting from node-specific API endpoint (https://github.com/ansible-collections/community.general/issues/7817). +- proxmox_kvm - running ``state=template`` will first check whether VM is already a template (https://github.com/ansible-collections/community.general/pull/7792). +- proxmox_pool_member - absent state for type VM did not delete VMs from the pools (https://github.com/ansible-collections/community.general/pull/7464). +- puppet - add option ``environment_lang`` to set the environment language encoding. Defaults to lang ``C``. It is recommended to set it to ``C.UTF-8`` or ``en_US.UTF-8`` depending on what is available on your system. (https://github.com/ansible-collections/community.general/issues/8000) +- redfish_command - fix usage of message parsing in ``SimpleUpdate`` and ``MultipartHTTPPushUpdate`` commands to treat the lack of a ``MessageId`` as no message (https://github.com/ansible-collections/community.general/issues/7465, https://github.com/ansible-collections/community.general/pull/7471). +- redfish_info - allow for a GET operation invoked by ``GetUpdateStatus`` to allow for an empty response body for cases where a service returns 204 No Content (https://github.com/ansible-collections/community.general/issues/8003). +- redfish_info - correct uncaught exception when attempting to retrieve ``Chassis`` information (https://github.com/ansible-collections/community.general/pull/7952). +- redhat_subscription - use the D-Bus registration on RHEL 7 only on 7.4 and + greater; older versions of RHEL 7 do not have it + (https://github.com/ansible-collections/community.general/issues/7622, + https://github.com/ansible-collections/community.general/pull/7624). +- riak - support ``riak admin`` sub-command in newer Riak KV versions beside the legacy ``riak-admin`` main command (https://github.com/ansible-collections/community.general/pull/8211). +- statusio_maintenance - fix error caused by incorrectly formed API data payload. Was raising "Failed to create maintenance HTTP Error 400 Bad Request" caused by bad data type for date/time and deprecated dict keys (https://github.com/ansible-collections/community.general/pull/7754). +- terraform - fix multiline string handling in complex variables (https://github.com/ansible-collections/community.general/pull/7535). +- to_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, https://github.com/ansible-collections/community.general/pull/8185). +- xml - make module work with lxml 5.1.1, which removed some internals that the module was relying on (https://github.com/ansible-collections/community.general/pull/8169). + +New Plugins +----------- + +Become +~~~~~~ + +- community.general.run0 - Systemd's run0. + +Callback +~~~~~~~~ + +- community.general.default_without_diff - The default ansible callback without diff output. +- community.general.timestamp - Adds simple timestamp for each header. + +Connection +~~~~~~~~~~ + +- community.general.incus - Run tasks in Incus instances via the Incus CLI. + +Filter +~~~~~~ + +- community.general.from_ini - Converts INI text input into a dictionary. +- community.general.lists_difference - Difference of lists with a predictive order. +- community.general.lists_intersect - Intersection of lists with a predictive order. +- community.general.lists_symmetric_difference - Symmetric Difference of lists with a predictive order. +- community.general.lists_union - Union of lists with a predictive order. +- community.general.to_ini - Converts a dictionary to the INI file format. + +Lookup +~~~~~~ + +- community.general.github_app_access_token - Obtain short-lived Github App Access tokens. +- community.general.onepassword_doc - Fetch documents stored in 1Password. + +Test +~~~~ + +- community.general.fqdn_valid - Validates fully-qualified domain names against RFC 1123. + +New Modules +----------- + +- community.general.consul_acl_bootstrap - Bootstrap ACLs in Consul. +- community.general.consul_auth_method - Manipulate Consul auth methods. +- community.general.consul_binding_rule - Manipulate Consul binding rules. +- community.general.consul_token - Manipulate Consul tokens. +- community.general.django_command - Run Django admin commands. +- community.general.dnf_config_manager - Enable or disable dnf repositories using config-manager. +- community.general.git_config_info - Read git configuration. +- community.general.gitlab_group_access_token - Manages GitLab group access tokens. +- community.general.gitlab_issue - Create, update, or delete GitLab issues. +- community.general.gitlab_label - Creates/updates/deletes GitLab Labels belonging to project or group. +- community.general.gitlab_milestone - Creates/updates/deletes GitLab Milestones belonging to project or group. +- community.general.gitlab_project_access_token - Manages GitLab project access tokens. +- community.general.keycloak_client_rolescope - Allows administration of Keycloak client roles scope to restrict the usage of certain roles to a other specific client applications. +- community.general.keycloak_component_info - Retrive component info in Keycloak. +- community.general.keycloak_realm_rolemapping - Allows administration of Keycloak realm role mappings into groups with the Keycloak API. +- community.general.nomad_token - Manage Nomad ACL tokens. +- community.general.proxmox_node_info - Retrieve information about one or more Proxmox VE nodes. +- community.general.proxmox_storage_contents_info - List content from a Proxmox VE storage. +- community.general.usb_facts - Allows listing information about USB devices. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5363b4daca..55a7098cc2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -56,6 +56,8 @@ cd ~/dev/ansible_collections/community/general Then you can run `ansible-test` (which is a part of [ansible-core](https://pypi.org/project/ansible-core/)) inside the checkout. The following example commands expect that you have installed Docker or Podman. Note that Podman has only been supported by more recent ansible-core releases. If you are using Docker, the following will work with Ansible 2.9+. +### Sanity tests + The following commands show how to run sanity tests: ```.bash @@ -66,6 +68,8 @@ ansible-test sanity --docker -v ansible-test sanity --docker -v plugins/modules/system/pids.py tests/integration/targets/pids/ ``` +### Unit tests + The following commands show how to run unit tests: ```.bash @@ -79,13 +83,32 @@ ansible-test units --docker -v --python 3.8 ansible-test units --docker -v --python 3.8 tests/unit/plugins/modules/net_tools/test_nmcli.py ``` +### Integration tests + The following commands show how to run integration tests: -```.bash -# Run integration tests for the interfaces_files module in a Docker container using the -# fedora35 operating system image (the supported images depend on your ansible-core version): -ansible-test integration --docker fedora35 -v interfaces_file +#### In Docker +Integration tests on Docker have the following parameters: +- `image_name` (required): The name of the Docker image. To get the list of supported Docker images, run + `ansible-test integration --help` and look for _target docker images_. +- `test_name` (optional): The name of the integration test. + For modules, this equals the short name of the module; for example, `pacman` in case of `community.general.pacman`. + For plugins, the plugin type is added before the plugin's short name, for example `callback_yaml` for the `community.general.yaml` callback. +```.bash +# Test all plugins/modules on fedora40 +ansible-test integration -v --docker fedora40 + +# Template +ansible-test integration -v --docker image_name test_name + +# Example community.general.ini_file module on fedora40 Docker image: +ansible-test integration -v --docker fedora40 ini_file +``` + +#### Without isolation + +```.bash # Run integration tests for the flattened lookup **without any isolation**: ansible-test integration -v lookup_flattened ``` diff --git a/README.md b/README.md index b5a6fcfa24..9e4b2ee96c 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,9 @@ SPDX-License-Identifier: GPL-3.0-or-later # Community General Collection -[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) -[![EOL CI](https://github.com/ansible-collections/community.general/workflows/EOL%20CI/badge.svg?event=push)](https://github.com/ansible-collections/community.general/actions) +[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/latest/collections/community/general/) +[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=stable-9)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) +[![EOL CI](https://github.com/ansible-collections/community.general/actions/workflows/ansible-test.yml/badge.svg?branch=stable-9)](https://github.com/ansible-collections/community.general/actions) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general) [![REUSE status](https://api.reuse.software/badge/github.com/ansible-collections/community.general)](https://api.reuse.software/info/github.com/ansible-collections/community.general) @@ -23,9 +24,21 @@ We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/comm If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint. +## Communication + +* Join the Ansible forum: + * [Get Help](https://forum.ansible.com/c/help/6): get help or help others. This is for questions about modules or plugins in the collection. Please add appropriate tags if you start new discussions. + * [Tag `community-general`](https://forum.ansible.com/tag/community-general): discuss the *collection itself*, instead of specific modules or plugins. + * [Social Spaces](https://forum.ansible.com/c/chat/4): gather and interact with fellow enthusiasts. + * [News & Announcements](https://forum.ansible.com/c/news/5): track project-wide announcements including social events. + +* The Ansible [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): used to announce releases and important changes. + +For more information about communication, see the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). + ## Tested with Ansible -Tested with the current ansible-core 2.13, ansible-core 2.14, ansible-core 2.15, ansible-core 2.16, ansible-core 2.17 releases and the current development version of ansible-core. Ansible-core versions before 2.13.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. +Tested with the current ansible-core 2.13, ansible-core 2.14, ansible-core 2.15, ansible-core 2.16, ansible-core 2.17, ansible-core 2.18 releases and the current development version of ansible-core. Ansible-core versions before 2.13.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. ## External requirements @@ -98,25 +111,13 @@ It is necessary for maintainers of this collection to be subscribed to: They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn). -## Communication - -We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed. - -Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat). - -We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us. - -For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community). - -For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). - ## Publishing New Version See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection. ## Release notes -See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.md). +See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-9/CHANGELOG.md). ## Roadmap @@ -135,8 +136,8 @@ See [this issue](https://github.com/ansible-collections/community.general/issues This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later. -See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/main/COPYING) for the full text. +See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/stable-9/COPYING) for the full text. -Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/PSF-2.0.txt). +Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/stable-9/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/stable-9/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/stable-9/LICENSES/PSF-2.0.txt). All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `.reuse/dep5`. This conforms to the [REUSE specification](https://reuse.software/spec/). diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 02bd8e7803..8c5ac88063 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,2 +1,1565 @@ +--- ancestor: 8.0.0 -releases: {} +releases: + 9.0.0: + changes: + breaking_changes: + - cpanm - the default of the ``mode`` option changed from ``compatibility`` + to ``new`` (https://github.com/ansible-collections/community.general/pull/8198). + - django_manage - the module now requires Django >= 4.1 (https://github.com/ansible-collections/community.general/pull/8198). + - django_manage - the module will now fail if ``virtualenv`` is specified + but no virtual environment exists at that location (https://github.com/ansible-collections/community.general/pull/8198). + - redfish_command, redfish_config, redfish_info - change the default for ``timeout`` + from 10 to 60 (https://github.com/ansible-collections/community.general/pull/8198). + bugfixes: + - aix_filesystem - fix ``_validate_vg`` not passing VG name to ``lsvg_cmd`` + (https://github.com/ansible-collections/community.general/issues/8151). + - aix_filesystem - fix issue with empty list items in crfs logic and option + order (https://github.com/ansible-collections/community.general/pull/8052). + - apt-rpm - the module did not upgrade packages if a newer version exists. + Now the package will be reinstalled if the candidate is newer than the installed + version (https://github.com/ansible-collections/community.general/issues/7414). + - apt_rpm - when checking whether packages were installed after running ``apt-get + -y install ``, only the last package name was checked (https://github.com/ansible-collections/community.general/pull/8263). + - bitwarden_secrets_manager lookup plugin - implements retry with exponential + backoff to avoid lookup errors when Bitwardn's API rate limiting is encountered + (https://github.com/ansible-collections/community.general/issues/8230, https://github.com/ansible-collections/community.general/pull/8238). + - 'cargo - fix idempotency issues when using a custom installation path for + packages (using the ``--path`` parameter). The initial installation runs + fine, but subsequent runs use the ``get_installed()`` function which did + not check the given installation location, before running ``cargo install``. + This resulted in a false ``changed`` state. Also the removal of packeges + using ``state: absent`` failed, as the installation check did not use the + given parameter (https://github.com/ansible-collections/community.general/pull/7970).' + - cloudflare_dns - fix Cloudflare lookup of SHFP records (https://github.com/ansible-collections/community.general/issues/7652). + - consul_token - fix token creation without ``accessor_id`` (https://github.com/ansible-collections/community.general/pull/8091). + - from_ini filter plugin - disabling interpolation of ``ConfigParser`` to + allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, + https://github.com/ansible-collections/community.general/pull/8185). + - gitlab_group_members - fix gitlab constants call in ``gitlab_group_members`` + module (https://github.com/ansible-collections/community.general/issues/7467). + - gitlab_issue - fix behavior to search GitLab issue, using ``search`` keyword + instead of ``title`` (https://github.com/ansible-collections/community.general/issues/7846). + - gitlab_issue, gitlab_label, gitlab_milestone - avoid crash during version + comparison when the python-gitlab Python module is not installed (https://github.com/ansible-collections/community.general/pull/8158). + - gitlab_project_members - fix gitlab constants call in ``gitlab_project_members`` + module (https://github.com/ansible-collections/community.general/issues/7467). + - gitlab_protected_branches - fix gitlab constants call in ``gitlab_protected_branches`` + module (https://github.com/ansible-collections/community.general/issues/7467). + - gitlab_runner - fix pagination when checking for existing runners (https://github.com/ansible-collections/community.general/pull/7790). + - gitlab_user - fix gitlab constants call in ``gitlab_user`` module (https://github.com/ansible-collections/community.general/issues/7467). + - haproxy - fix an issue where HAProxy could get stuck in DRAIN mode when + the backend was unreachable (https://github.com/ansible-collections/community.general/issues/8092). + - homebrew - detect already installed formulae and casks using JSON output + from ``brew info`` (https://github.com/ansible-collections/community.general/issues/864). + - homebrew - error returned from brew command was ignored and tried to parse + empty JSON. Fix now checks for an error and raises it to give accurate error + message to users (https://github.com/ansible-collections/community.general/issues/8047). + - incus connection plugin - treats ``inventory_hostname`` as a variable instead + of a literal in remote connections (https://github.com/ansible-collections/community.general/issues/7874). + - interface_files - also consider ``address_family`` when changing ``option=method`` + (https://github.com/ansible-collections/community.general/issues/7610, https://github.com/ansible-collections/community.general/pull/7612). + - inventory plugins - add unsafe wrapper to avoid marking strings that do + not contain ``{`` or ``}`` as unsafe, to work around a bug in AWX ((https://github.com/ansible-collections/community.general/issues/8212, + https://github.com/ansible-collections/community.general/pull/8225). + - ipa - fix get version regex in IPA module_utils (https://github.com/ansible-collections/community.general/pull/8175). + - ipa_hbacrule - the module uses a string for ``ipaenabledflag`` for new FreeIPA + versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880). + - ipa_otptoken - the module expect ``ipatokendisabled`` as string but the + ``ipatokendisabled`` value is returned as a boolean (https://github.com/ansible-collections/community.general/pull/7795). + - ipa_sudorule - the module uses a string for ``ipaenabledflag`` for new FreeIPA + versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880). + - iptables_state - fix idempotency issues when restoring incomplete iptables + dumps (https://github.com/ansible-collections/community.general/issues/8029). + - irc - replace ``ssl.wrap_socket`` that was removed from Python 3.12 with + code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542). + - keycloak_* - fix Keycloak API client to quote ``/`` properly (https://github.com/ansible-collections/community.general/pull/7641). + - keycloak_authz_permission - resource payload variable for scope-based permission + was constructed as a string, when it needs to be a list, even for a single + item (https://github.com/ansible-collections/community.general/issues/7151). + - keycloak_client - add sorted ``defaultClientScopes`` and ``optionalClientScopes`` + to normalizations (https://github.com/ansible-collections/community.general/pull/8223). + - keycloak_client - fixes issue when metadata is provided in desired state + when task is in check mode (https://github.com/ansible-collections/community.general/issues/1226, + https://github.com/ansible-collections/community.general/pull/7881). + - keycloak_identity_provider - ``mappers`` processing was not idempotent if + the mappers configuration list had not been sorted by name (in ascending + order). Fix resolves the issue by sorting mappers in the desired state using + the same key which is used for obtaining existing state (https://github.com/ansible-collections/community.general/pull/7418). + - keycloak_identity_provider - it was not possible to reconfigure (add, remove) + ``mappers`` once they were created initially. Removal was ignored, adding + new ones resulted in dropping the pre-existing unmodified mappers. Fix resolves + the issue by supplying correct input to the internal update call (https://github.com/ansible-collections/community.general/pull/7418). + - keycloak_realm - add normalizations for ``enabledEventTypes`` and ``supportedLocales`` + (https://github.com/ansible-collections/community.general/pull/8224). + - keycloak_user - when ``force`` is set, but user does not exist, do not try + to delete it (https://github.com/ansible-collections/community.general/pull/7696). + - keycloak_user_federation - fix diff of empty ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/8320). + - ldap - previously the order number (if present) was expected to follow an + equals sign in the DN. This makes it so the order number string is identified + correctly anywhere within the DN (https://github.com/ansible-collections/community.general/issues/7646). + - linode inventory plugin - add descriptive error message for linode inventory + plugin (https://github.com/ansible-collections/community.general/pull/8133). + - log_entries callback plugin - replace ``ssl.wrap_socket`` that was removed + from Python 3.12 with code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542). + - lvol - test for output messages in both ``stdout`` and ``stderr`` (https://github.com/ansible-collections/community.general/pull/7601, + https://github.com/ansible-collections/community.general/issues/7182). + - 'merge_variables lookup plugin - fixing cross host merge: providing access + to foreign hosts variables to the perspective of the host that is performing + the merge (https://github.com/ansible-collections/community.general/pull/8303).' + - modprobe - listing modules files or modprobe files could trigger a FileNotFoundError + if ``/etc/modprobe.d`` or ``/etc/modules-load.d`` did not exist. Relevant + functions now return empty lists if the directories do not exist to avoid + crashing the module (https://github.com/ansible-collections/community.general/issues/7717). + - mssql_script - make the module work with Python 2 (https://github.com/ansible-collections/community.general/issues/7818, + https://github.com/ansible-collections/community.general/pull/7821). + - nmcli - fix ``connection.slave-type`` wired to ``bond`` and not with parameter + ``slave_type`` in case of connection type ``wifi`` (https://github.com/ansible-collections/community.general/issues/7389). + - ocapi_utils, oci_utils, redfish_utils module utils - replace ``type()`` + calls with ``isinstance()`` calls (https://github.com/ansible-collections/community.general/pull/7501). + - onepassword lookup plugin - failed for fields that were in sections and + had uppercase letters in the label/ID. Field lookups are now case insensitive + in all cases (https://github.com/ansible-collections/community.general/pull/7919). + - onepassword lookup plugin - field and section titles are now case insensitive + when using op CLI version two or later. This matches the behavior of version + one (https://github.com/ansible-collections/community.general/pull/7564). + - opentelemetry callback plugin - close spans always (https://github.com/ansible-collections/community.general/pull/8367). + - opentelemetry callback plugin - honour the ``disable_logs`` option to avoid + storing task results since they are not used regardless (https://github.com/ansible-collections/community.general/pull/8373). + - pacemaker_cluster - actually implement check mode, which the module claims + to support. This means that until now the module also did changes in check + mode (https://github.com/ansible-collections/community.general/pull/8081). + - pam_limits - when the file does not exist, do not create it in check mode + (https://github.com/ansible-collections/community.general/issues/8050, https://github.com/ansible-collections/community.general/pull/8057). + - pipx module utils - change the CLI argument formatter for the ``pip_args`` + parameter (https://github.com/ansible-collections/community.general/issues/7497, + https://github.com/ansible-collections/community.general/pull/7506). + - pkgin - pkgin (pkgsrc package manager used by SmartOS) raises erratic exceptions + and spurious ``changed=true`` (https://github.com/ansible-collections/community.general/pull/7971). + - proxmox - fix updating a container config if the setting does not already + exist (https://github.com/ansible-collections/community.general/pull/7872). + - proxmox_kvm - fixed status check getting from node-specific API endpoint + (https://github.com/ansible-collections/community.general/issues/7817). + - proxmox_kvm - running ``state=template`` will first check whether VM is + already a template (https://github.com/ansible-collections/community.general/pull/7792). + - proxmox_pool_member - absent state for type VM did not delete VMs from the + pools (https://github.com/ansible-collections/community.general/pull/7464). + - puppet - add option ``environment_lang`` to set the environment language + encoding. Defaults to lang ``C``. It is recommended to set it to ``C.UTF-8`` + or ``en_US.UTF-8`` depending on what is available on your system. (https://github.com/ansible-collections/community.general/issues/8000) + - redfish_command - fix usage of message parsing in ``SimpleUpdate`` and ``MultipartHTTPPushUpdate`` + commands to treat the lack of a ``MessageId`` as no message (https://github.com/ansible-collections/community.general/issues/7465, + https://github.com/ansible-collections/community.general/pull/7471). + - redfish_info - allow for a GET operation invoked by ``GetUpdateStatus`` + to allow for an empty response body for cases where a service returns 204 + No Content (https://github.com/ansible-collections/community.general/issues/8003). + - redfish_info - correct uncaught exception when attempting to retrieve ``Chassis`` + information (https://github.com/ansible-collections/community.general/pull/7952). + - 'redhat_subscription - use the D-Bus registration on RHEL 7 only on 7.4 + and + + greater; older versions of RHEL 7 do not have it + + (https://github.com/ansible-collections/community.general/issues/7622, + + https://github.com/ansible-collections/community.general/pull/7624). + + ' + - riak - support ``riak admin`` sub-command in newer Riak KV versions beside + the legacy ``riak-admin`` main command (https://github.com/ansible-collections/community.general/pull/8211). + - statusio_maintenance - fix error caused by incorrectly formed API data payload. + Was raising "Failed to create maintenance HTTP Error 400 Bad Request" caused + by bad data type for date/time and deprecated dict keys (https://github.com/ansible-collections/community.general/pull/7754). + - terraform - fix multiline string handling in complex variables (https://github.com/ansible-collections/community.general/pull/7535). + - to_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow + converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, + https://github.com/ansible-collections/community.general/pull/8185). + - xml - make module work with lxml 5.1.1, which removed some internals that + the module was relying on (https://github.com/ansible-collections/community.general/pull/8169). + deprecated_features: + - MH DependencyCtxMgr module_utils - deprecate ``module_utils.mh.mixin.deps.DependencyCtxMgr`` + in favour of ``module_utils.deps`` (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.AnsibleModule`` + (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.DependencyCtxMgr`` + (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.StateMixin`` + (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarDict,`` + (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarMeta`` + (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarsMixin`` + (https://github.com/ansible-collections/community.general/pull/8280). + - ModuleHelper module_utils - deprecate use of ``VarsMixin`` in favor of using + the ``VardDict`` module_utils (https://github.com/ansible-collections/community.general/pull/8226). + - ModuleHelper vars module_utils - bump deprecation of ``VarMeta``, ``VarDict`` + and ``VarsMixin`` to version 11.0.0 (https://github.com/ansible-collections/community.general/pull/8226). + - apt_rpm - the behavior of ``state=present`` and ``state=installed`` is deprecated + and will change in community.general 11.0.0. Right now the module will upgrade + a package to the latest version if one of these two states is used. You + should explicitly use ``state=latest`` if you want this behavior, and switch + to ``state=present_not_latest`` if you do not want to upgrade the package + if it is already installed. In community.general 11.0.0 the behavior of + ``state=present`` and ``state=installed`` will change to that of ``state=present_not_latest`` + (https://github.com/ansible-collections/community.general/issues/8217, https://github.com/ansible-collections/community.general/pull/8285). + - consul_acl - the module has been deprecated and will be removed in community.general + 10.0.0. ``consul_token`` and ``consul_policy`` can be used instead (https://github.com/ansible-collections/community.general/pull/7901). + - django_manage - the ``ack_venv_creation_deprecation`` option has no more + effect and will be removed from community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8198). + - gitlab modules - the basic auth method on GitLab API have been deprecated + and will be removed in community.general 10.0.0 (https://github.com/ansible-collections/community.general/pull/8383). + - hipchat callback plugin - the hipchat service has been discontinued and + the self-hosted variant has been End of Life since 2020. The callback plugin + is therefore deprecated and will be removed from community.general 10.0.0 + if nobody provides compelling reasons to still keep it (https://github.com/ansible-collections/community.general/issues/8184, + https://github.com/ansible-collections/community.general/pull/8189). + - irc - the defaults ``false`` for ``use_tls`` and ``validate_certs`` have + been deprecated and will change to ``true`` in community.general 10.0.0 + to improve security. You can already improve security now by explicitly + setting them to ``true``. Specifying values now disables the deprecation + warning (https://github.com/ansible-collections/community.general/pull/7578). + minor_changes: + - PythonRunner module utils - specialisation of ``CmdRunner`` to execute Python + scripts (https://github.com/ansible-collections/community.general/pull/8289). + - Use offset-aware ``datetime.datetime`` objects (with timezone UTC) instead + of offset-naive UTC timestamps, which are deprecated in Python 3.12 (https://github.com/ansible-collections/community.general/pull/8222). + - aix_lvol - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - 'apt_rpm - add new states ``latest`` and ``present_not_latest``. The value + ``latest`` is equivalent to the current behavior of ``present``, which will + upgrade a package if a newer version exists. ``present_not_latest`` does + what most users would expect ``present`` to do: it does not upgrade if the + package is already installed. The current behavior of ``present`` will be + deprecated in a later version, and eventually changed to that of ``present_not_latest`` + (https://github.com/ansible-collections/community.general/issues/8217, https://github.com/ansible-collections/community.general/pull/8247).' + - apt_rpm - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - bitwarden lookup plugin - add ``bw_session`` option, to pass session key + instead of reading from env (https://github.com/ansible-collections/community.general/pull/7994). + - bitwarden lookup plugin - add support to filter by organization ID (https://github.com/ansible-collections/community.general/pull/8188). + - bitwarden lookup plugin - allows to fetch all records of a given collection + ID, by allowing to pass an empty value for ``search_value`` when ``collection_id`` + is provided (https://github.com/ansible-collections/community.general/pull/8013). + - bitwarden lookup plugin - when looking for items using an item ID, the item + is now accessed directly with ``bw get item`` instead of searching through + all items. This doubles the lookup speed (https://github.com/ansible-collections/community.general/pull/7468). + - btrfs_subvolume - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - cmd_runner module_utils - add validation for minimum and maximum length + in the value passed to ``cmd_runner_fmt.as_list()`` (https://github.com/ansible-collections/community.general/pull/8288). + - consul_auth_method, consul_binding_rule, consul_policy, consul_role, consul_session, + consul_token - added action group ``community.general.consul`` (https://github.com/ansible-collections/community.general/pull/7897). + - consul_policy - added support for diff and check mode (https://github.com/ansible-collections/community.general/pull/7878). + - consul_policy, consul_role, consul_session - removed dependency on ``requests`` + and factored out common parts (https://github.com/ansible-collections/community.general/pull/7826, + https://github.com/ansible-collections/community.general/pull/7878). + - consul_role - ``node_identities`` now expects a ``node_name`` option to + match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878). + - consul_role - ``service_identities`` now expects a ``service_name`` option + to match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878). + - consul_role - added support for diff mode (https://github.com/ansible-collections/community.general/pull/7878). + - consul_role - added support for templated policies (https://github.com/ansible-collections/community.general/pull/7878). + - elastic callback plugin - close elastic client to not leak resources (https://github.com/ansible-collections/community.general/pull/7517). + - filesystem - add bcachefs support (https://github.com/ansible-collections/community.general/pull/8126). + - gandi_livedns - adds support for personal access tokens (https://github.com/ansible-collections/community.general/issues/7639, + https://github.com/ansible-collections/community.general/pull/8337). + - gconftool2 - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + - git_config - allow multiple git configs for the same name with the new ``add_mode`` + option (https://github.com/ansible-collections/community.general/pull/7260). + - git_config - the ``after`` and ``before`` fields in the ``diff`` of the + return value can be a list instead of a string in case more configs with + the same key are affected (https://github.com/ansible-collections/community.general/pull/7260). + - git_config - when a value is unset, all configs with the same key are unset + (https://github.com/ansible-collections/community.general/pull/7260). + - gitlab modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/7472). + - gitlab modules - remove duplicate ``gitlab`` package check (https://github.com/ansible-collections/community.general/pull/7486). + - gitlab_deploy_key, gitlab_group_members, gitlab_group_variable, gitlab_hook, + gitlab_instance_variable, gitlab_project_badge, gitlab_project_variable, + gitlab_user - improve API pagination and compatibility with different versions + of ``python-gitlab`` (https://github.com/ansible-collections/community.general/pull/7790). + - gitlab_hook - adds ``releases_events`` parameter for supporting Releases + events triggers on GitLab hooks (https://github.com/ansible-collections/community.general/pull/7956). + - gitlab_runner - add support for new runner creation workflow (https://github.com/ansible-collections/community.general/pull/7199). + - homebrew - adds ``force_formula`` parameter to disambiguate a formula from + a cask of the same name (https://github.com/ansible-collections/community.general/issues/8274). + - homebrew, homebrew_cask - refactor common argument validation logic into + a dedicated ``homebrew`` module utils (https://github.com/ansible-collections/community.general/issues/8323, + https://github.com/ansible-collections/community.general/pull/8324). + - icinga2 inventory plugin - add Jinja2 templating support to ``url``, ``user``, + and ``password`` paramenters (https://github.com/ansible-collections/community.general/issues/7074, + https://github.com/ansible-collections/community.general/pull/7996). + - icinga2 inventory plugin - adds new parameter ``group_by_hostgroups`` in + order to make grouping by Icinga2 hostgroups optional (https://github.com/ansible-collections/community.general/pull/7998). + - ini_file - add an optional parameter ``section_has_values``. If the target + ini file contains more than one ``section``, use ``section_has_values`` + to specify which one should be updated (https://github.com/ansible-collections/community.general/pull/7505). + - ini_file - support optional spaces between section names and their surrounding + brackets (https://github.com/ansible-collections/community.general/pull/8075). + - installp - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - ipa_config - adds ``passkey`` choice to ``ipauserauthtype`` parameter's + choices (https://github.com/ansible-collections/community.general/pull/7588). + - ipa_dnsrecord - adds ability to manage NS record types (https://github.com/ansible-collections/community.general/pull/7737). + - ipa_pwpolicy - refactor module and exchange a sequence ``if`` statements + with a ``for`` loop (https://github.com/ansible-collections/community.general/pull/7723). + - ipa_pwpolicy - update module to support ``maxrepeat``, ``maxsequence``, + ``dictcheck``, ``usercheck``, ``gracelimit`` parameters in FreeIPA password + policies (https://github.com/ansible-collections/community.general/pull/7723). + - ipa_sudorule - adds options to include denied commands or command groups + (https://github.com/ansible-collections/community.general/pull/7415). + - ipa_user - adds ``idp`` and ``passkey`` choice to ``ipauserauthtype`` parameter's + choices (https://github.com/ansible-collections/community.general/pull/7589). + - irc - add ``validate_certs`` option, and rename ``use_ssl`` to ``use_tls``, + while keeping ``use_ssl`` as an alias. The default value for ``validate_certs`` + is ``false`` for backwards compatibility. We recommend to every user of + this module to explicitly set ``use_tls=true`` and `validate_certs=true`` + whenever possible, especially when communicating to IRC servers over the + internet (https://github.com/ansible-collections/community.general/pull/7550). + - java_cert - add ``cert_content`` argument (https://github.com/ansible-collections/community.general/pull/8153). + - java_cert - enable ``owner``, ``group``, ``mode``, and other generic file + arguments (https://github.com/ansible-collections/community.general/pull/8116). + - kernel_blacklist - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + - keycloak module utils - expose error message from Keycloak server for HTTP + errors in some specific situations (https://github.com/ansible-collections/community.general/pull/7645). + - keycloak_client, keycloak_clientscope, keycloak_clienttemplate - added ``docker-v2`` + protocol support, enhancing alignment with Keycloak's protocol options (https://github.com/ansible-collections/community.general/issues/8215, + https://github.com/ansible-collections/community.general/pull/8216). + - keycloak_realm_key - the ``config.algorithm`` option now supports 8 additional + key algorithms (https://github.com/ansible-collections/community.general/pull/7698). + - keycloak_realm_key - the ``config.certificate`` option value is no longer + defined with ``no_log=True`` (https://github.com/ansible-collections/community.general/pull/7698). + - keycloak_realm_key - the ``provider_id`` option now supports RSA encryption + key usage (value ``rsa-enc``) (https://github.com/ansible-collections/community.general/pull/7698). + - keycloak_user_federation - add option for ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/7538). + - keycloak_user_federation - allow custom user storage providers to be set + through ``provider_id`` (https://github.com/ansible-collections/community.general/pull/7789). + - ldap_attrs - module now supports diff mode, showing which attributes are + changed within an operation (https://github.com/ansible-collections/community.general/pull/8073). + - lvg - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - lvol - change ``pvs`` argument type to list of strings (https://github.com/ansible-collections/community.general/pull/7676, + https://github.com/ansible-collections/community.general/issues/7504). + - lvol - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - 'lxd connection plugin - tighten the detection logic for lxd ``Instance + not found`` errors, to avoid false detection on unrelated errors such as + ``/usr/bin/python3: not found`` (https://github.com/ansible-collections/community.general/pull/7521).' + - lxd_container - uses ``/1.0/instances`` API endpoint, if available. Falls + back to ``/1.0/containers`` or ``/1.0/virtual-machines``. Fixes issue when + using Incus or LXD 5.19 due to migrating to ``/1.0/instances`` endpoint + (https://github.com/ansible-collections/community.general/pull/7980). + - macports - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - mail - add ``Message-ID`` header; which is required by some mail servers + (https://github.com/ansible-collections/community.general/pull/7740). + - mail module, mail callback plugin - allow to configure the domain name of + the Message-ID header with a new ``message_id_domain`` option (https://github.com/ansible-collections/community.general/pull/7765). + - mssql_script - adds transactional (rollback/commit) support via optional + boolean param ``transaction`` (https://github.com/ansible-collections/community.general/pull/7976). + - netcup_dns - adds support for record types ``OPENPGPKEY``, ``SMIMEA``, and + ``SSHFP`` (https://github.com/ansible-collections/community.general/pull/7489). + - nmcli - add support for new connection type ``loopback`` (https://github.com/ansible-collections/community.general/issues/6572). + - nmcli - adds OpenvSwitch support with new ``type`` values ``ovs-port``, + ``ovs-interface``, and ``ovs-bridge``, and new ``slave_type`` value ``ovs-port`` + (https://github.com/ansible-collections/community.general/pull/8154). + - nmcli - allow for ``infiniband`` slaves of ``bond`` interface types (https://github.com/ansible-collections/community.general/pull/7569). + - nmcli - allow for the setting of ``MTU`` for ``infiniband`` and ``bond`` + interface types (https://github.com/ansible-collections/community.general/pull/7499). + - nmcli - allow setting ``MTU`` for ``bond-slave`` interface types (https://github.com/ansible-collections/community.general/pull/8118). + - onepassword lookup plugin - support 1Password Connect with the opv2 client + by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116). + - onepassword_raw lookup plugin - support 1Password Connect with the opv2 + client by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116) + - opentelemetry - add support for HTTP trace_exporter and configures the behavior + via ``OTEL_EXPORTER_OTLP_TRACES_PROTOCOL`` (https://github.com/ansible-collections/community.general/issues/7888, + https://github.com/ansible-collections/community.general/pull/8321). + - opentelemetry - add support for exporting spans in a file via ``ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE`` + (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8363). + - opkg - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + - osx_defaults - add option ``check_types`` to enable changing the type of + existing defaults on the fly (https://github.com/ansible-collections/community.general/pull/8173). + - parted - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - passwordstore - adds ``timestamp`` and ``preserve`` parameters to modify + the stored password format (https://github.com/ansible-collections/community.general/pull/7426). + - passwordstore lookup - add ``missing_subkey`` parameter defining the behavior + of the lookup when a passwordstore subkey is missing (https://github.com/ansible-collections/community.general/pull/8166). + - pipx - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + - pkg5 - add support for non-silent execution (https://github.com/ansible-collections/community.general/issues/8379, + https://github.com/ansible-collections/community.general/pull/8382). + - pkgin - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - portage - adds the possibility to explicitely tell portage to write packages + to world file (https://github.com/ansible-collections/community.general/issues/6226, + https://github.com/ansible-collections/community.general/pull/8236). + - portinstall - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - proxmox - adds ``startup`` parameters to configure startup order, startup + delay and shutdown delay (https://github.com/ansible-collections/community.general/pull/8038). + - proxmox - adds ``template`` value to the ``state`` parameter, allowing conversion + of container to a template (https://github.com/ansible-collections/community.general/pull/7143). + - proxmox - adds ``update`` parameter, allowing update of an already existing + containers configuration (https://github.com/ansible-collections/community.general/pull/7540). + - proxmox inventory plugin - adds an option to exclude nodes from the dynamic + inventory generation. The new setting is optional, not using this option + will behave as usual (https://github.com/ansible-collections/community.general/issues/6714, + https://github.com/ansible-collections/community.general/pull/7461). + - proxmox* modules - there is now a ``community.general.proxmox`` module defaults + group that can be used to set default options for all Proxmox modules (https://github.com/ansible-collections/community.general/pull/8334). + - proxmox_disk - add ability to manipulate CD-ROM drive (https://github.com/ansible-collections/community.general/pull/7495). + - proxmox_kvm - add parameter ``update_unsafe`` to avoid limitations when + updating dangerous values (https://github.com/ansible-collections/community.general/pull/7843). + - proxmox_kvm - adds ``template`` value to the ``state`` parameter, allowing + conversion of a VM to a template (https://github.com/ansible-collections/community.general/pull/7143). + - proxmox_kvm - adds``usb`` parameter for setting USB devices on proxmox KVM + VMs (https://github.com/ansible-collections/community.general/pull/8199). + - proxmox_kvm - support the ``hookscript`` parameter (https://github.com/ansible-collections/community.general/issues/7600). + - proxmox_ostype - it is now possible to specify the ``ostype`` when creating + an LXC container (https://github.com/ansible-collections/community.general/pull/7462). + - proxmox_vm_info - add ability to retrieve configuration info (https://github.com/ansible-collections/community.general/pull/7485). + - puppet - new feature to set ``--waitforlock`` option (https://github.com/ansible-collections/community.general/pull/8282). + - redfish_command - add command ``ResetToDefaults`` to reset manager to default + state (https://github.com/ansible-collections/community.general/issues/8163). + - redfish_config - add command ``SetServiceIdentification`` to set service + identification (https://github.com/ansible-collections/community.general/issues/7916). + - redfish_info - add boolean return value ``MultipartHttpPush`` to ``GetFirmwareUpdateCapabilities`` + (https://github.com/ansible-collections/community.general/issues/8194, https://github.com/ansible-collections/community.general/pull/8195). + - redfish_info - add command ``GetServiceIdentification`` to get service identification + (https://github.com/ansible-collections/community.general/issues/7882). + - redfish_info - adding the ``BootProgress`` property when getting ``Systems`` + info (https://github.com/ansible-collections/community.general/pull/7626). + - revbitspss lookup plugin - removed a redundant unicode prefix. The prefix + was not necessary for Python 3 and has been cleaned up to streamline the + code (https://github.com/ansible-collections/community.general/pull/8087). + - rundeck module utils - allow to pass ``Content-Type`` to API requests (https://github.com/ansible-collections/community.general/pull/7684). + - slackpkg - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - ssh_config - adds ``controlmaster``, ``controlpath`` and ``controlpersist`` + parameters (https://github.com/ansible-collections/community.general/pull/7456). + - ssh_config - allow ``accept-new`` as valid value for ``strict_host_key_checking`` + (https://github.com/ansible-collections/community.general/pull/8257). + - ssh_config - new feature to set ``AddKeysToAgent`` option to ``yes`` or + ``no`` (https://github.com/ansible-collections/community.general/pull/7703). + - ssh_config - new feature to set ``IdentitiesOnly`` option to ``yes`` or + ``no`` (https://github.com/ansible-collections/community.general/pull/7704). + - sudoers - add support for the ``NOEXEC`` tag in sudoers rules (https://github.com/ansible-collections/community.general/pull/7983). + - svr4pkg - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - swdepot - refactor module to pass list of arguments to ``module.run_command()`` + instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264). + - terraform - add support for ``diff_mode`` for terraform resource_changes + (https://github.com/ansible-collections/community.general/pull/7896). + - terraform - fix ``diff_mode`` in state ``absent`` and when terraform ``resource_changes`` + does not exist (https://github.com/ansible-collections/community.general/pull/7963). + - xcc_redfish_command - added support for raw POSTs (``command=PostResource`` + in ``category=Raw``) without a specific action info (https://github.com/ansible-collections/community.general/pull/7746). + - xfconf - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + - xfconf_info - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). + release_summary: This is release 9.0.0 of ``community.general``, released on + 2024-05-20. + removed_features: + - The deprecated redirects for internal module names have been removed. These + internal redirects were extra-long FQCNs like ``community.general.packaging.os.apt_rpm`` + that redirect to the short FQCN ``community.general.apt_rpm``. They were + originally needed to implement flatmapping; as various tooling started to + recommend users to use the long names flatmapping was removed from the collection + and redirects were added for users who already followed these incorrect + recommendations (https://github.com/ansible-collections/community.general/pull/7835). + - ansible_galaxy_install - the ``ack_ansible29`` and ``ack_min_ansiblecore211`` + options have been removed. They no longer had any effect (https://github.com/ansible-collections/community.general/pull/8198). + - cloudflare_dns - remove support for SPF records. These are no longer supported + by CloudFlare (https://github.com/ansible-collections/community.general/pull/7782). + - django_manage - support for the ``command`` values ``cleanup``, ``syncdb``, + and ``validate`` were removed. Use ``clearsessions``, ``migrate``, and ``check`` + instead, respectively (https://github.com/ansible-collections/community.general/pull/8198). + - flowdock - this module relied on HTTPS APIs that do not exist anymore and + was thus removed (https://github.com/ansible-collections/community.general/pull/8198). + - mh.mixins.deps module utils - the ``DependencyMixin`` has been removed. + Use the ``deps`` module utils instead (https://github.com/ansible-collections/community.general/pull/8198). + - proxmox - the ``proxmox_default_behavior`` option has been removed (https://github.com/ansible-collections/community.general/pull/8198). + - rax* modules, rax module utils, rax docs fragment - the Rackspace modules + relied on the deprecated package ``pyrax`` and were thus removed (https://github.com/ansible-collections/community.general/pull/8198). + - redhat module utils - the classes ``Rhsm``, ``RhsmPool``, and ``RhsmPools`` + have been removed (https://github.com/ansible-collections/community.general/pull/8198). + - redhat_subscription - the alias ``autosubscribe`` of the ``auto_attach`` + option was removed (https://github.com/ansible-collections/community.general/pull/8198). + - stackdriver - this module relied on HTTPS APIs that do not exist anymore + and was thus removed (https://github.com/ansible-collections/community.general/pull/8198). + - webfaction_* modules - these modules relied on HTTPS APIs that do not exist + anymore and were thus removed (https://github.com/ansible-collections/community.general/pull/8198). + security_fixes: + - cobbler, gitlab_runners, icinga2, linode, lxd, nmap, online, opennebula, + proxmox, scaleway, stackpath_compute, virtualbox, and xen_orchestra inventory + plugin - make sure all data received from the remote servers is marked as + unsafe, so remote code execution by obtaining texts that can be evaluated + as templates is not possible (https://www.die-welt.net/2024/03/remote-code-execution-in-ansible-dynamic-inventory-plugins/, + https://github.com/ansible-collections/community.general/pull/8098). + - keycloak_identity_provider - the client secret was not correctly sanitized + by the module. The return values ``proposed``, ``existing``, and ``end_state``, + as well as the diff, did contain the client secret unmasked (https://github.com/ansible-collections/community.general/pull/8355). + fragments: + - 000-redhat_subscription-dbus-on-7.4-plus.yaml + - 5588-support-1password-connect.yml + - 6572-nmcli-add-support-loopback-type.yml + - 7143-proxmox-template.yml + - 7151-fix-keycloak_authz_permission-incorrect-resource-payload.yml + - 7199-gitlab-runner-new-creation-workflow.yml + - 7242-multi-values-for-same-name-in-git-config.yml + - 7389-nmcli-issue-with-creating-a-wifi-bridge-slave.yml + - 7418-kc_identity_provider-mapper-reconfiguration-fixes.yml + - 7426-add-timestamp-and-preserve-options-for-passwordstore.yaml + - 7456-add-ssh-control-master.yml + - 7461-proxmox-inventory-add-exclude-nodes.yaml + - 7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml + - 7464-fix-vm-removal-in-proxmox_pool_member.yml + - 7465-redfish-firmware-update-message-id-hardening.yml + - 7467-fix-gitlab-constants-calls.yml + - 7472-gitlab-add-ca-path-option.yml + - 7485-proxmox_vm_info-config.yml + - 7486-gitlab-refactor-package-check.yml + - 7489-netcup-dns-record-types.yml + - 7495-proxmox_disk-manipulate-cdrom.yml + - 7499-allow-mtu-setting-on-bond-and-infiniband-interfaces.yml + - 7501-type.yml + - 7505-ini_file-section_has.yml + - 7506-pipx-pipargs.yml + - 7517-elastic-close-client.yaml + - 7535-terraform-fix-multiline-string-handling-in-complex-variables.yml + - 7538-add-krbprincipalattribute-option.yml + - 7540-proxmox-update-config.yml + - 7542-irc-logentries-ssl.yml + - 7550-irc-use_tls-validate_certs.yml + - 7564-onepassword-lookup-case-insensitive.yaml + - 7569-infiniband-slave-support.yml + - 7577-fix-apt_rpm-module.yml + - 7578-irc-tls.yml + - 7588-ipa-config-new-choice-passkey-to-ipauserauthtype.yml + - 7589-ipa-config-new-choices-idp-and-passkey-to-ipauserauthtype.yml + - 7600-proxmox_kvm-hookscript.yml + - 7601-lvol-fix.yml + - 7612-interface_file-method.yml + - 7626-redfish-info-add-boot-progress-property.yml + - 7641-fix-keycloak-api-client-to-quote-properly.yml + - 7645-Keycloak-print-error-msg-from-server.yml + - 7646-fix-order-number-detection-in-dn.yml + - 7653-fix-cloudflare-lookup.yml + - 7676-lvol-pvs-as-list.yml + - 7683-added-contenttype-parameter.yml + - 7696-avoid-attempt-to-delete-non-existing-user.yml + - 7698-improvements-to-keycloak_realm_key.yml + - 7703-ssh_config_add_keys_to_agent_option.yml + - 7704-ssh_config_identities_only_option.yml + - 7717-prevent-modprobe-error.yml + - 7723-ipa-pwpolicy-update-pwpolicy-module.yml + - 7737-add-ipa-dnsrecord-ns-type.yml + - 7740-add-message-id-header-to-mail-module.yml + - 7746-raw_post-without-actions.yml + - 7754-fixed-payload-format.yml + - 7765-mail-message-id.yml + - 7782-cloudflare_dns-spf.yml + - 7789-keycloak-user-federation-custom-provider-type.yml + - 7790-gitlab-runner-api-pagination.yml + - 7791-proxmox_kvm-state-template-will-check-status-first.yaml + - 7797-ipa-fix-otp-idempotency.yml + - 7821-mssql_script-py2.yml + - 7826-consul-modules-refactoring.yaml + - 7843-proxmox_kvm-update_unsafe.yml + - 7847-gitlab-issue-title.yml + - 7870-homebrew-cask-installed-detection.yml + - 7872-proxmox_fix-update-if-setting-doesnt-exist.yaml + - 7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml + - 7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml + - 7881-fix-keycloak-client-ckeckmode.yml + - 7882-add-redfish-get-service-identification.yml + - 7896-add-terraform-diff-mode.yml + - 7897-consul-action-group.yaml + - 7901-consul-acl-deprecation.yaml + - 7916-add-redfish-set-service-identification.yml + - 7919-onepassword-fieldname-casing.yaml + - 7951-fix-redfish_info-exception.yml + - 7953-proxmox_kvm-fix_status_check.yml + - 7956-adding-releases_events-option-to-gitlab_hook-module.yaml + - 7963-fix-terraform-diff-absent.yml + - 7970-fix-cargo-path-idempotency.yaml + - 7976-add-mssql_script-transactional-support.yml + - 7983-sudoers-add-support-noexec.yml + - 7994-bitwarden-session-arg.yaml + - 7996-add-templating-support-to-icinga2-inventory.yml + - 7998-icinga2-inventory-group_by_hostgroups-parameter.yml + - 8003-redfish-get-update-status-empty-response.yml + - 8013-bitwarden-full-collection-item-list.yaml + - 8029-iptables-state-restore-check-mode.yml + - 8038-proxmox-startup.yml + - 8048-fix-homebrew-module-error-reporting-on-become-true.yaml + - 8057-pam_limits-check-mode.yml + - 8073-ldap-attrs-diff.yml + - 8075-optional-space-around-section-names.yaml + - 8087-removed-redundant-unicode-prefixes.yml + - 8091-consul-token-fixes.yaml + - 8100-haproxy-drain-fails-on-down-backend.yml + - 8116-java_cert-enable-owner-group-mode-args.yml + - 8118-fix-bond-slave-honoring-mtu.yml + - 8126-filesystem-bcachefs-support.yaml + - 8133-add-error-message-for-linode-inventory-plugin.yaml + - 8151-fix-lsvg_cmd-failed.yml + - 8153-java_cert-add-cert_content-arg.yml + - 8154-add-ovs-commands-to-nmcli-module.yml + - 8158-gitlab-version-check.yml + - 8163-redfish-implementing-reset-to-defaults.yml + - 8166-password-store-lookup-missing-subkey.yml + - 8169-lxml.yml + - 8173-osx_defaults-check_type.yml + - 8175-get_ipa_version_regex.yml + - 8183-from_ini_to_ini.yml + - 8188-bitwarden-add-organization_id.yml + - 8194-redfish-add-multipart-to-capabilities.yml + - 8199-added-usb-support-to-proxmox-module.yml + - 8211-riak-admin-sub-command-support.yml + - 8215-add-docker-v2-protocol.yml + - 8222-datetime.yml + - 8223-keycloak_client-additional-normalizations.yaml + - 8224-keycloak_realm-add-normalizations.yaml + - 8225-unsafe.yml + - 8226-mh-vardict.yml + - 8236-portage-select-feature.yml + - 8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml + - 8247-apt_rpm-latest.yml + - 8257-ssh-config-hostkey-support-accept-new.yaml + - 8263-apt_rpm-install-check.yml + - 8264-run_command.yml + - 8274-homebrew-force-formula.yml + - 8280-mh-deprecations.yml + - 8281-puppet-waitforlock.yaml + - 8285-apt_rpm-state-deprecate.yml + - 8288-cmdrunner-fmt-list-len-limits.yml + - 8289-python-runner.yml + - 8290-gandi-livedns-personal-access-token.yml + - 8303-fix-rendering-foreign-variables.yaml + - 8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml + - 8321-fix-opentelemetry-callback.yml + - 8323-refactor-homebrew-logic-module-utils.yml + - 8334-proxmox-action-group.yml + - 8355-keycloak-idp-sanitize.yaml + - 8363-opentelemetry-export-to-a-file.yml + - 8367-fix-close-span-if-no-logs.yaml + - 8373-honour-disable-logs.yaml + - 8379-verbose-mode-pkg5.yml + - 8383-deprecate-gitlab-basic-auth.yml + - 9.0.0.yml + - add-ipa-sudorule-deny-cmd.yml + - aix_filesystem-crfs-issue.yml + - bitwarden-lookup-performance.yaml + - hipchat.yml + - internal-redirects.yml + - inventory-rce.yml + - lxd-instance-not-found-avoid-false-positives.yml + - lxd-instances-api-endpoint-added.yml + - pacemaker-cluster.yml + - pkgin.yml + - puppet_lang_force.yml + - remove_deprecated.yml + modules: + - description: Bootstrap ACLs in Consul. + name: consul_acl_bootstrap + namespace: '' + - description: Manipulate Consul auth methods. + name: consul_auth_method + namespace: '' + - description: Manipulate Consul binding rules. + name: consul_binding_rule + namespace: '' + - description: Manipulate Consul tokens. + name: consul_token + namespace: '' + - description: Run Django admin commands. + name: django_command + namespace: '' + - description: Enable or disable dnf repositories using config-manager. + name: dnf_config_manager + namespace: '' + - description: Read git configuration. + name: git_config_info + namespace: '' + - description: Manages GitLab group access tokens. + name: gitlab_group_access_token + namespace: '' + - description: Create, update, or delete GitLab issues. + name: gitlab_issue + namespace: '' + - description: Creates/updates/deletes GitLab Labels belonging to project or + group. + name: gitlab_label + namespace: '' + - description: Creates/updates/deletes GitLab Milestones belonging to project + or group. + name: gitlab_milestone + namespace: '' + - description: Manages GitLab project access tokens. + name: gitlab_project_access_token + namespace: '' + - description: Allows administration of Keycloak client roles scope to restrict + the usage of certain roles to a other specific client applications. + name: keycloak_client_rolescope + namespace: '' + - description: Retrive component info in Keycloak. + name: keycloak_component_info + namespace: '' + - description: Allows administration of Keycloak realm role mappings into groups + with the Keycloak API. + name: keycloak_realm_rolemapping + namespace: '' + - description: Manage Nomad ACL tokens. + name: nomad_token + namespace: '' + - description: Retrieve information about one or more Proxmox VE nodes. + name: proxmox_node_info + namespace: '' + - description: List content from a Proxmox VE storage. + name: proxmox_storage_contents_info + namespace: '' + - description: Allows listing information about USB devices. + name: usb_facts + namespace: '' + plugins: + become: + - description: Systemd's run0. + name: run0 + namespace: null + callback: + - description: The default ansible callback without diff output. + name: default_without_diff + namespace: null + - description: Adds simple timestamp for each header. + name: timestamp + namespace: null + connection: + - description: Run tasks in Incus instances via the Incus CLI. + name: incus + namespace: null + filter: + - description: Converts INI text input into a dictionary. + name: from_ini + namespace: null + - description: Difference of lists with a predictive order. + name: lists_difference + namespace: null + - description: Intersection of lists with a predictive order. + name: lists_intersect + namespace: null + - description: Symmetric Difference of lists with a predictive order. + name: lists_symmetric_difference + namespace: null + - description: Union of lists with a predictive order. + name: lists_union + namespace: null + - description: Converts a dictionary to the INI file format. + name: to_ini + namespace: null + lookup: + - description: Obtain short-lived Github App Access tokens. + name: github_app_access_token + namespace: null + - description: Fetch documents stored in 1Password. + name: onepassword_doc + namespace: null + test: + - description: Validates fully-qualified domain names against RFC 1123. + name: fqdn_valid + namespace: null + release_date: '2024-05-20' + 9.0.1: + changes: + bugfixes: + - cpanm - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, + https://github.com/ansible-collections/community.general/pull/8411). + - django module utils - use new ``VarDict`` to prevent deprecation warning + (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). + - gconftool2_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, + https://github.com/ansible-collections/community.general/pull/8411). + - homebrew - do not fail when brew prints warnings (https://github.com/ansible-collections/community.general/pull/8406, + https://github.com/ansible-collections/community.general/issues/7044). + - hponcfg - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, + https://github.com/ansible-collections/community.general/pull/8411). + - kernel_blacklist - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, + https://github.com/ansible-collections/community.general/pull/8411). + - keycloak_client - fix TypeError when sanitizing the ``saml.signing.private.key`` + attribute in the module's diff or state output. The ``sanitize_cr`` function + expected a dict where in some cases a list might occur (https://github.com/ansible-collections/community.general/pull/8403). + - locale_gen - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, + https://github.com/ansible-collections/community.general/pull/8411). + - mksysb - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, + https://github.com/ansible-collections/community.general/pull/8411). + - pipx_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, + https://github.com/ansible-collections/community.general/pull/8411). + - snap - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, + https://github.com/ansible-collections/community.general/pull/8411). + - snap_alias - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, + https://github.com/ansible-collections/community.general/pull/8411). + minor_changes: + - ansible_galaxy_install - minor refactor in the module (https://github.com/ansible-collections/community.general/pull/8413). + release_summary: Bugfix release for inclusion in Ansible 10.0.0rc1. + fragments: + - 8403-fix-typeerror-in-keycloak-client.yaml + - 8406-fix-homebrew-cask-warning.yaml + - 8411-locale-gen-vardict.yml + - 8413-galaxy-refactor.yml + - 9.0.1.yml + release_date: '2024-05-27' + 9.1.0: + changes: + bugfixes: + - git_config - fix behavior of ``state=absent`` if ``value`` is present (https://github.com/ansible-collections/community.general/issues/8436, + https://github.com/ansible-collections/community.general/pull/8452). + - keycloak_realm - add normalizations for ``attributes`` and ``protocol_mappers`` + (https://github.com/ansible-collections/community.general/pull/8496). + - launched - correctly report changed status in check mode (https://github.com/ansible-collections/community.general/pull/8406). + - opennebula inventory plugin - fix invalid reference to IP when inventory + runs against NICs with no IPv4 address (https://github.com/ansible-collections/community.general/pull/8489). + - opentelemetry callback - do not save the JSON response when using the ``ansible.builtin.uri`` + module (https://github.com/ansible-collections/community.general/pull/8430). + - opentelemetry callback - do not save the content response when using the + ``ansible.builtin.slurp`` module (https://github.com/ansible-collections/community.general/pull/8430). + - paman - do not fail if an empty list of packages has been provided and there + is nothing to do (https://github.com/ansible-collections/community.general/pull/8514). + deprecated_features: + - CmdRunner module util - setting the value of the ``ignore_none`` parameter + within a ``CmdRunner`` context is deprecated and that feature should be + removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/8479). + - git_config - the ``list_all`` option has been deprecated and will be removed + in community.general 11.0.0. Use the ``community.general.git_config_info`` + module instead (https://github.com/ansible-collections/community.general/pull/8453). + - git_config - using ``state=present`` without providing ``value`` is deprecated + and will be disallowed in community.general 11.0.0. Use the ``community.general.git_config_info`` + module instead to read a value (https://github.com/ansible-collections/community.general/pull/8453). + known_issues: + - homectl - the module does not work under Python 3.13 or newer, since it + relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4691, + https://github.com/ansible-collections/community.general/pull/8497). + - udm_user - the module does not work under Python 3.13 or newer, since it + relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4690, + https://github.com/ansible-collections/community.general/pull/8497). + minor_changes: + - CmdRunner module util - argument formats can be specified as plain functions + without calling ``cmd_runner_fmt.as_func()`` (https://github.com/ansible-collections/community.general/pull/8479). + - ansible_galaxy_install - add upgrade feature (https://github.com/ansible-collections/community.general/pull/8431, + https://github.com/ansible-collections/community.general/issues/8351). + - cargo - add option ``directory``, which allows source directory to be specified + (https://github.com/ansible-collections/community.general/pull/8480). + - cmd_runner module utils - add decorator ``cmd_runner_fmt.stack`` (https://github.com/ansible-collections/community.general/pull/8415). + - cmd_runner_fmt module utils - simplify implementation of ``cmd_runner_fmt.as_bool_not()`` + (https://github.com/ansible-collections/community.general/pull/8512). + - ipa_dnsrecord - adds ``SSHFP`` record type for managing SSH fingerprints + in FreeIPA DNS (https://github.com/ansible-collections/community.general/pull/8404). + - keycloak_client - assign auth flow by name (https://github.com/ansible-collections/community.general/pull/8428). + - openbsd_pkg - adds diff support to show changes in installed package list. + This does not yet work for check mode (https://github.com/ansible-collections/community.general/pull/8402). + - proxmox - allow specification of the API port when using proxmox_* (https://github.com/ansible-collections/community.general/issues/8440, + https://github.com/ansible-collections/community.general/pull/8441). + - proxmox_vm_info - add ``network`` option to retrieve current network information + (https://github.com/ansible-collections/community.general/pull/8471). + - redfish_command - add ``wait`` and ``wait_timeout`` options to allow a user + to block a command until a service is accessible after performing the requested + command (https://github.com/ansible-collections/community.general/issues/8051, + https://github.com/ansible-collections/community.general/pull/8434). + - redfish_info - add command ``CheckAvailability`` to check if a service is + accessible (https://github.com/ansible-collections/community.general/issues/8051, + https://github.com/ansible-collections/community.general/pull/8434). + - redis_info - adds support for getting cluster info (https://github.com/ansible-collections/community.general/pull/8464). + release_summary: Regular feature and bugfix release. + fragments: + - 8051-Redfish-Wait-For-Service.yml + - 8402-add-diif-mode-openbsd-pkg.yml + - 8404-ipa_dnsrecord_sshfp.yml + - 8415-cmd-runner-stack.yml + - 8428-assign-auth-flow-by-name-keycloak-client.yaml + - 8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml + - 8431-galaxy-upgrade.yml + - 8440-allow-api-port-specification.yaml + - 8452-git_config-absent.yml + - 8453-git_config-deprecate-read.yml + - 8464-redis-add-cluster-info.yml + - 8471-proxmox-vm-info-network.yml + - 8476-launchd-check-mode-changed.yaml + - 8479-cmdrunner-improvements.yml + - 8480-directory-feature-cargo.yml + - 8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml + - 8496-keycloak_clientscope-add-normalizations.yaml + - 8497-crypt.yml + - 8512-as-bool-not.yml + - 8514-pacman-empty.yml + - 9.1.0.yml + modules: + - description: Add, modify, and delete checks within a consul cluster. + name: consul_agent_check + namespace: '' + - description: Add, modify and delete services within a consul cluster. + name: consul_agent_service + namespace: '' + - description: Wrapper for C(django-admin check). + name: django_check + namespace: '' + - description: Wrapper for C(django-admin createcachetable). + name: django_createcachetable + namespace: '' + plugins: + filter: + - description: Keep specific keys from dictionaries in a list. + name: keep_keys + namespace: null + - description: Remove specific keys from dictionaries in a list. + name: remove_keys + namespace: null + - description: Replace specific keys in a list of dictionaries. + name: replace_keys + namespace: null + release_date: '2024-06-17' + 9.2.0: + changes: + bugfixes: + - bitwarden lookup plugin - fix ``KeyError`` in ``search_field`` (https://github.com/ansible-collections/community.general/issues/8549, + https://github.com/ansible-collections/community.general/pull/8557). + - keycloak_clientscope - remove IDs from clientscope and its protocol mappers + on comparison for changed check (https://github.com/ansible-collections/community.general/pull/8545). + - nsupdate - fix 'index out of range' error when changing NS records by falling + back to authority section of the response (https://github.com/ansible-collections/community.general/issues/8612, + https://github.com/ansible-collections/community.general/pull/8614). + - proxmox - fix idempotency on creation of mount volumes using Proxmox' special + ``:`` syntax (https://github.com/ansible-collections/community.general/issues/8407, + https://github.com/ansible-collections/community.general/pull/8542). + - redfish_utils module utils - do not fail when language is not exactly "en" + (https://github.com/ansible-collections/community.general/pull/8613). + minor_changes: + - CmdRunner module utils - the parameter ``force_lang`` now supports the special + value ``auto`` which will automatically try and determine the best parsable + locale in the system (https://github.com/ansible-collections/community.general/pull/8517). + - proxmox - add ``disk_volume`` and ``mount_volumes`` keys for better readability + (https://github.com/ansible-collections/community.general/pull/8542). + - proxmox - translate the old ``disk`` and ``mounts`` keys to the new handling + internally (https://github.com/ansible-collections/community.general/pull/8542). + - proxmox_template - small refactor in logic for determining whether a template + exists or not (https://github.com/ansible-collections/community.general/pull/8516). + - redfish_* modules - adds ``ciphers`` option for custom cipher selection + (https://github.com/ansible-collections/community.general/pull/8533). + - sudosu become plugin - added an option (``alt_method``) to enhance compatibility + with more versions of ``su`` (https://github.com/ansible-collections/community.general/pull/8214). + - virtualbox inventory plugin - expose a new parameter ``enable_advanced_group_parsing`` + to change how the VirtualBox dynamic inventory parses VM groups (https://github.com/ansible-collections/community.general/issues/8508, + https://github.com/ansible-collections/community.general/pull/8510). + - wdc_redfish_command - minor change to handle upgrade file for Redfish WD + platforms (https://github.com/ansible-collections/community.general/pull/8444). + release_summary: Regular bugfix and feature release. + fragments: + - 8214-sudosu-not-working-on-some-BSD-machines.yml + - 8444-fix-redfish-gen2-upgrade.yaml + - 8508-virtualbox-inventory.yml + - 8516-proxmox-template-refactor.yml + - 8517-cmd-runner-lang-auto.yml + - 8533-add-ciphers-option.yml + - 8542-fix-proxmox-volume-handling.yml + - 8545-keycloak-clientscope-remove-id-on-compare.yml + - 8557-fix-bug-with-bitwarden.yml + - 8613-redfish_utils-language.yaml + - 8614-nsupdate-index-out-of-range.yml + - 9.2.0.yml + plugins: + filter: + - description: Return input type. + name: reveal_ansible_type + namespace: null + test: + - description: Validate input type. + name: ansible_type + namespace: null + release_date: '2024-07-15' + 9.3.0: + changes: + bugfixes: + - gitlab_runner - fix ``paused`` parameter being ignored (https://github.com/ansible-collections/community.general/pull/8648). + - homebrew_cask - fix ``upgrade_all`` returns ``changed`` when nothing upgraded + (https://github.com/ansible-collections/community.general/issues/8707, https://github.com/ansible-collections/community.general/pull/8708). + - keycloak_user_federation - get cleartext IDP ``clientSecret`` from full + realm info to detect changes to it (https://github.com/ansible-collections/community.general/issues/8294, + https://github.com/ansible-collections/community.general/pull/8735). + - keycloak_user_federation - remove existing user federation mappers if they + are not present in the federation configuration and will not be updated + (https://github.com/ansible-collections/community.general/issues/7169, https://github.com/ansible-collections/community.general/pull/8695). + - proxmox - fixed an issue where the new volume handling incorrectly converted + ``null`` values into ``"None"`` strings (https://github.com/ansible-collections/community.general/pull/8646). + - proxmox - fixed an issue where volume strings where overwritten instead + of appended to in the new ``build_volume()`` method (https://github.com/ansible-collections/community.general/pull/8646). + - proxmox - removed the forced conversion of non-string values to strings + to be consistent with the module documentation (https://github.com/ansible-collections/community.general/pull/8646). + minor_changes: + - cgroup_memory_recap, hipchat, jabber, log_plays, loganalytics, logentries, + logstash, slack, splunk, sumologic, syslog_json callback plugins - make + sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8628). + - chef_databag, consul_kv, cyberarkpassword, dsv, etcd, filetree, hiera, onepassword, + onepassword_doc, onepassword_raw, passwordstore, redis, shelvefile, tss + lookup plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8626). + - chroot, funcd, incus, iocage, jail, lxc, lxd, qubes, zone connection plugins + - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8627). + - cobbler, linode, lxd, nmap, online, scaleway, stackpath_compute, virtualbox + inventory plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8625). + - doas, dzdo, ksu, machinectl, pbrun, pfexec, pmrun, sesu, sudosu become plugins + - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8623). + - gconftool2 - make use of ``ModuleHelper`` features to simplify code (https://github.com/ansible-collections/community.general/pull/8711). + - gitlab_project - add option ``container_expiration_policy`` to schedule + container registry cleanup (https://github.com/ansible-collections/community.general/pull/8674). + - gitlab_project - add option ``model_registry_access_level`` to disable model + registry (https://github.com/ansible-collections/community.general/pull/8688). + - gitlab_project - add option ``pages_access_level`` to disable project pages + (https://github.com/ansible-collections/community.general/pull/8688). + - gitlab_project - add option ``repository_access_level`` to disable project + repository (https://github.com/ansible-collections/community.general/pull/8674). + - gitlab_project - add option ``service_desk_enabled`` to disable service + desk (https://github.com/ansible-collections/community.general/pull/8688). + - locale_gen - add support for multiple locales (https://github.com/ansible-collections/community.general/issues/8677, + https://github.com/ansible-collections/community.general/pull/8682). + - memcached, pickle, redis, yaml cache plugins - make sure that all options + are typed (https://github.com/ansible-collections/community.general/pull/8624). + - opentelemetry callback plugin - fix default value for ``store_spans_in_file`` + causing traces to be produced to a file named ``None`` (https://github.com/ansible-collections/community.general/issues/8566, + https://github.com/ansible-collections/community.general/pull/8741). + - passwordstore lookup plugin - add the current user to the lockfile file + name to address issues on multi-user systems (https://github.com/ansible-collections/community.general/pull/8689). + - pipx - add parameter ``suffix`` to module (https://github.com/ansible-collections/community.general/pull/8675, + https://github.com/ansible-collections/community.general/issues/8656). + - pkgng - add option ``use_globs`` (default ``true``) to optionally disable + glob patterns (https://github.com/ansible-collections/community.general/issues/8632, + https://github.com/ansible-collections/community.general/pull/8633). + - proxmox inventory plugin - add new fact for LXC interface details (https://github.com/ansible-collections/community.general/pull/8713). + - redis, redis_info - add ``client_cert`` and ``client_key`` options to specify + path to certificate for Redis authentication (https://github.com/ansible-collections/community.general/pull/8654). + release_summary: Regular bugfix and feature release. + fragments: + - 8623-become-types.yml + - 8624-cache-types.yml + - 8625-inventory-types.yml + - 8626-lookup-types.yml + - 8627-connection-types.yml + - 8628-callback-types.yml + - 8632-pkgng-add-option-use_globs.yml + - 8646-fix-bug-in-proxmox-volumes.yml + - 8648-fix-gitlab-runner-paused.yaml + - 8654-add-redis-tls-params.yml + - 8674-add-gitlab-project-cleanup-policy.yml + - 8675-pipx-install-suffix.yml + - 8682-locale-gen-multiple.yaml + - 8688-gitlab_project-add-new-params.yml + - 8689-passwordstore-lock-naming.yml + - 8695-keycloak_user_federation-mapper-removal.yml + - 8708-homebrew_cask-fix-upgrade-all.yml + - 8711-gconftool2-refactor.yml + - 8713-proxmox_lxc_interfaces.yml + - 8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml + - 8741-fix-opentelemetry-callback.yml + - 9.3.0.yml + modules: + - description: Bootc Switch and Upgrade. + name: bootc_manage + namespace: '' + - description: Services manager for Homebrew. + name: homebrew_services + namespace: '' + - description: Allows obtaining Keycloak realm keys metadata via Keycloak API. + name: keycloak_realm_keys_metadata_info + namespace: '' + release_date: '2024-08-12' + 9.4.0: + changes: + bugfixes: + - gitlab_group_access_token - fix crash in check mode caused by attempted + access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). + - gitlab_project - fix ``container_expiration_policy`` not being applied when + creating a new project (https://github.com/ansible-collections/community.general/pull/8790). + - gitlab_project - fix crash caused by old Gitlab projects not having a ``container_expiration_policy`` + attribute (https://github.com/ansible-collections/community.general/pull/8790). + - gitlab_project_access_token - fix crash in check mode caused by attempted + access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). + - keycloak_realm_key - fix invalid usage of ``parent_id`` (https://github.com/ansible-collections/community.general/issues/7850, + https://github.com/ansible-collections/community.general/pull/8823). + - keycloak_user_federation - fix key error when removing mappers during an + update and new mappers are specified in the module args (https://github.com/ansible-collections/community.general/pull/8762). + - keycloak_user_federation - fix the ``UnboundLocalError`` that occurs when + an ID is provided for a user federation mapper (https://github.com/ansible-collections/community.general/pull/8831). + - keycloak_user_federation - sort desired and after mapper list by name (analog + to before mapper list) to minimize diff and make change detection more accurate + (https://github.com/ansible-collections/community.general/pull/8761). + - proxmox inventory plugin - fixed a possible error on concatenating responses + from proxmox. In case an API call unexpectedly returned an empty result, + the inventory failed with a fatal error. Added check for empty response + (https://github.com/ansible-collections/community.general/issues/8798, https://github.com/ansible-collections/community.general/pull/8794). + deprecated_features: + - MH decorator cause_changes module utils - deprecate parameters ``on_success`` + and ``on_failure`` (https://github.com/ansible-collections/community.general/pull/8791). + - 'pipx - support for versions of the command line tool ``pipx`` older than + ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 + (https://github.com/ansible-collections/community.general/pull/8793). + + ' + - 'pipx_info - support for versions of the command line tool ``pipx`` older + than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 + (https://github.com/ansible-collections/community.general/pull/8793). + + ' + minor_changes: + - MH module utils - add parameter ``when`` to ``cause_changes`` decorator + (https://github.com/ansible-collections/community.general/pull/8766). + - MH module utils - minor refactor in decorators (https://github.com/ansible-collections/community.general/pull/8766). + - alternatives - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8833). + - consul_acl - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - copr - Added ``includepkgs`` and ``excludepkgs`` parameters to limit the + list of packages fetched or excluded from the repository(https://github.com/ansible-collections/community.general/pull/8779). + - credstash lookup plugin - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8822). + - csv module utils - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - deco MH module utils - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8822). + - etcd3 - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - gio_mime - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). + - gitlab_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - gitlab_project - add option ``issues_access_level`` to enable/disable project + issues (https://github.com/ansible-collections/community.general/pull/8760). + - gitlab_project - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - gitlab_project - sorted parameters in order to avoid future merge conflicts + (https://github.com/ansible-collections/community.general/pull/8759). + - hashids filter plugin - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - hwc_ecs_instance - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8822). + - hwc_evs_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - hwc_vpc_eip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - hwc_vpc_peering_connect - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8822). + - hwc_vpc_port - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - hwc_vpc_subnet - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - imc_rest - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - ipa_otptoken - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - jira - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). + - jira - replace deprecated params when using decorator ``cause_changes`` + (https://github.com/ansible-collections/community.general/pull/8791). + - keep_keys filter plugin - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - keycloak module utils - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8822). + - keycloak_client - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - keycloak_clientscope - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - keycloak_identity_provider - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - keycloak_user_federation - add module argument allowing users to optout + of the removal of unspecified mappers, for example to keep the keycloak + default mappers (https://github.com/ansible-collections/community.general/pull/8764). + - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8822). + - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8833). + - linode - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - lxc_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - lxd_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - manageiq_provider - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - ocapi_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - one_service - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - one_vm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - onepassword lookup plugin - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8833). + - pids - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - pipx - added new states ``install_all``, ``uninject``, ``upgrade_shared``, + ``pin``, and ``unpin`` (https://github.com/ansible-collections/community.general/pull/8809). + - pipx - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). + - pipx - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - pipx_info - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). + - pipx_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - pkg5_publisher - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - proxmox - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - proxmox_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). + - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - redfish_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - redfish_utils module utils - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8822). + - redis cache plugin - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8833). + - remove_keys filter plugin - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - replace_keys filter plugin - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - scaleway - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - scaleway module utils - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8822). + - scaleway_compute - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8833). + - scaleway_ip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - scaleway_lb - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - scaleway_security_group - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8822). + - scaleway_security_group - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8833). + - scaleway_user_data - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8833). + - sensu_silence - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - snmp_facts - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - sorcery - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). + - ufw - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + - unsafe plugin utils - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - vardict module utils - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - vars MH module utils - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8814). + - vmadm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). + release_summary: Bugfix and feature release. + fragments: + - 8738-limit-packages-for-copr.yml + - 8759-gitlab_project-sort-params.yml + - 8760-gitlab_project-add-issues-access-level.yml + - 8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml + - 8762-keycloac_user_federation-fix-key-error-when-updating.yml + - 8764-keycloak_user_federation-make-mapper-removal-optout.yml + - 8766-mh-deco-improve.yml + - 8776-mute-vardict-deprecation.yml + - 8790-gitlab_project-fix-cleanup-policy-on-project-create.yml + - 8791-mh-cause-changes-param-depr.yml + - 8793-pipx-global.yml + - 8794-Fixing-possible-concatination-error.yaml + - 8796-gitlab-access-token-check-mode.yml + - 8809-pipx-new-params.yml + - 8814-dict-comprehension.yml + - 8822-dict-comprehension.yml + - 8823-keycloak-realm-key.yml + - 8831-fix-error-when-mapper-id-is-provided.yml + - 8833-dict-comprehension.yml + - 9.4.0.yml + modules: + - description: Allows managing Keycloak User Profiles. + name: keycloak_userprofile + namespace: '' + - description: Manages OpenNebula virtual networks. + name: one_vnet + namespace: '' + release_date: '2024-09-09' + 9.5.0: + changes: + bugfixes: + - cloudflare_dns - fix changing Cloudflare SRV records (https://github.com/ansible-collections/community.general/issues/8679, + https://github.com/ansible-collections/community.general/pull/8948). + - cmd_runner module utils - call to ``get_best_parsable_locales()`` was missing + parameter (https://github.com/ansible-collections/community.general/pull/8929). + - dig lookup plugin - fix using only the last nameserver specified (https://github.com/ansible-collections/community.general/pull/8970). + - django_command - option ``command`` is now split lexically before passed + to underlying PythonRunner (https://github.com/ansible-collections/community.general/pull/8944). + - homectl - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4691, + https://github.com/ansible-collections/community.general/pull/8987). + - ini_file - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, + https://github.com/ansible-collections/community.general/pull/8925). + - ipa_host - add ``force_create``, fix ``enabled`` and ``disabled`` states + (https://github.com/ansible-collections/community.general/issues/1094, https://github.com/ansible-collections/community.general/pull/8920). + - ipa_hostgroup - fix ``enabled `` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/8408, + https://github.com/ansible-collections/community.general/pull/8900). + - java_keystore - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, + https://github.com/ansible-collections/community.general/pull/8925). + - jenkins_plugin - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, + https://github.com/ansible-collections/community.general/pull/8925). + - kdeconfig - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, + https://github.com/ansible-collections/community.general/pull/8925). + - keycloak_realm - fix change detection in check mode by sorting the lists + in the realms beforehand (https://github.com/ansible-collections/community.general/pull/8877). + - keycloak_user_federation - add module argument allowing users to configure + the update mode for the parameter ``bindCredential`` (https://github.com/ansible-collections/community.general/pull/8898). + - keycloak_user_federation - minimize change detection by setting ``krbPrincipalAttribute`` + to ``''`` in Keycloak responses if missing (https://github.com/ansible-collections/community.general/pull/8785). + - keycloak_user_federation - remove ``lastSync`` parameter from Keycloak responses + to minimize diff/changes (https://github.com/ansible-collections/community.general/pull/8812). + - keycloak_userprofile - fix empty response when fetching userprofile component + by removing ``parent=parent_id`` filter (https://github.com/ansible-collections/community.general/pull/8923). + - keycloak_userprofile - improve diff by deserializing the fetched ``kc.user.profile.config`` + and serialize it only when sending back (https://github.com/ansible-collections/community.general/pull/8940). + - lxd_container - fix bug introduced in previous commit (https://github.com/ansible-collections/community.general/pull/8895, + https://github.com/ansible-collections/community.general/issues/8888). + - one_service - fix service creation after it was deleted with ``unique`` + parameter (https://github.com/ansible-collections/community.general/issues/3137, + https://github.com/ansible-collections/community.general/pull/8887). + - pam_limits - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, + https://github.com/ansible-collections/community.general/pull/8925). + - python_runner module utils - parameter ``path_prefix`` was being handled + as string when it should be a list (https://github.com/ansible-collections/community.general/pull/8944). + - udm_user - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4690, + https://github.com/ansible-collections/community.general/pull/8987). + deprecated_features: + - hipchat - the hipchat service has been discontinued and the self-hosted + variant has been End of Life since 2020. The module is therefore deprecated + and will be removed from community.general 11.0.0 if nobody provides compelling + reasons to still keep it (https://github.com/ansible-collections/community.general/pull/8919). + minor_changes: + - dig lookup plugin - add ``port`` option to specify DNS server port (https://github.com/ansible-collections/community.general/pull/8966). + - flatpak - improve the parsing of Flatpak application IDs based on official + guidelines (https://github.com/ansible-collections/community.general/pull/8909). + - gio_mime - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8855). + - gitlab_deploy_key - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - gitlab_group - add many new parameters (https://github.com/ansible-collections/community.general/pull/8908). + - gitlab_group - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - gitlab_issue - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - gitlab_merge_request - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - gitlab_runner - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - icinga2_host - replace loop with dict comprehension (https://github.com/ansible-collections/community.general/pull/8876). + - jira - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8856). + - keycloak_client - add ``client-x509`` choice to ``client_authenticator_type`` + (https://github.com/ansible-collections/community.general/pull/8973). + - keycloak_user_federation - add the user federation config parameter ``referral`` + to the module arguments (https://github.com/ansible-collections/community.general/pull/8954). + - memset_dns_reload - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - memset_memstore_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - memset_server_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - memset_zone - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - memset_zone_domain - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - memset_zone_record - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). + - nmcli - add ``conn_enable`` param to reload connection (https://github.com/ansible-collections/community.general/issues/3752, + https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/pull/8897). + - nmcli - add ``state=up`` and ``state=down`` to enable/disable connections + (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, + https://github.com/ansible-collections/community.general/issues/7152, https://github.com/ansible-collections/community.general/pull/8897). + - nmcli - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - npm - add ``force`` parameter to allow ``--force`` (https://github.com/ansible-collections/community.general/pull/8885). + - one_image - add option ``persistent`` to manage image persistence (https://github.com/ansible-collections/community.general/issues/3578, + https://github.com/ansible-collections/community.general/pull/8889). + - one_image - extend xsd scheme to make it return a lot more info about image + (https://github.com/ansible-collections/community.general/pull/8889). + - one_image - refactor code to make it more similar to ``one_template`` and + ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). + - one_image_info - extend xsd scheme to make it return a lot more info about + image (https://github.com/ansible-collections/community.general/pull/8889). + - one_image_info - refactor code to make it more similar to ``one_template`` + and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). + - open_iscsi - allow login to a portal with multiple targets without specifying + any of them (https://github.com/ansible-collections/community.general/pull/8719). + - opennebula.py - add VM ``id`` and VM ``host`` to inventory host data (https://github.com/ansible-collections/community.general/pull/8532). + - passwordstore lookup plugin - add subkey creation/update support (https://github.com/ansible-collections/community.general/pull/8952). + - proxmox inventory plugin - clean up authentication code (https://github.com/ansible-collections/community.general/pull/8917). + - redfish_command - add handling of the ``PasswordChangeRequired`` message + from services in the ``UpdateUserPassword`` command to directly modify the + user's password if the requested user is the one invoking the operation + (https://github.com/ansible-collections/community.general/issues/8652, https://github.com/ansible-collections/community.general/pull/8653). + - redfish_confg - remove ``CapacityBytes`` from required paramaters of the + ``CreateVolume`` command (https://github.com/ansible-collections/community.general/pull/8956). + - redfish_config - add parameter ``storage_none_volume_deletion`` to ``CreateVolume`` + command in order to control the automatic deletion of non-RAID volumes (https://github.com/ansible-collections/community.general/pull/8990). + - redfish_info - adds ``RedfishURI`` and ``StorageId`` to Disk inventory (https://github.com/ansible-collections/community.general/pull/8937). + - scaleway_container - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_container_info - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_container_namespace - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_container_namespace_info - replace Python 2.6 construct with dict + comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_container_registry - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_container_registry_info - replace Python 2.6 construct with dict + comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_function - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_function_info - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_function_namespace - replace Python 2.6 construct with dict comprehensions + (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_function_namespace_info - replace Python 2.6 construct with dict + comprehensions (https://github.com/ansible-collections/community.general/pull/8858). + - scaleway_user_data - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). + - udm_dns_record - replace loop with ``dict.update()`` (https://github.com/ansible-collections/community.general/pull/8876). + release_summary: 'Regular bugfix and feature release. + + + Please note that this is the last feature release for community.general 9.x.y. + + From now on, new features will only go into community.general 10.x.y.' + fragments: + - 8532-expand-opennuebula-inventory-data.yml + - 8652-Redfish-Password-Change-Required.yml + - 8679-fix-cloudflare-srv.yml + - 8719-openiscsi-add-multiple-targets.yaml + - 8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml + - 8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml + - 8855-gio_mime_vardict.yml + - 8856-jira_vardict.yml + - 8858-dict-comprehension.yml + - 8876-dict-items-loop.yml + - 8877-keycloak_realm-sort-lists-before-change-detection.yaml + - 8885-add-force-flag-for-nmp.yml + - 8887-fix-one_service-unique.yml + - 8889-refactor-one-image-modules.yml + - 8895-fix-comprehension.yaml + - 8897-nmcli-add-reload-and-up-down.yml + - 8898-add-arg-to-exclude-bind-credential-from-change-check.yaml + - 8900-ipa-hostgroup-fix-states.yml + - 8908-add-gitlab-group-params.yml + - 8909-flatpak-improve-name-parsing.yaml + - 8917-proxmox-clean-auth.yml + - 8920-ipa-host-fix-state.yml + - 8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml + - 8925-atomic.yml + - 8929-cmd_runner-bugfix.yml + - 8937-add-StorageId-RedfishURI-to-disk-facts.yml + - 8940-keycloak_userprofile-improve-diff.yml + - 8944-django-command-fix.yml + - 8952-password-store-lookup-create-subkey-support.yml + - 8954-keycloak-user-federation-add-referral-parameter.yml + - 8956-remove-capacitybytes-from-the-required-parameters_list.yml + - 8966-dig-add-port-option.yml + - 8970-fix-dig-multi-nameservers.yml + - 8973-keycloak_client-add-x509-auth.yml + - 8987-legacycrypt.yml + - 8990.yml + - 9.5.0.yml + - deprecate-hipchat.yml + modules: + - description: Manage keytab file in FreeIPA. + name: ipa_getkeytab + namespace: '' + release_date: '2024-10-07' + 9.5.1: + changes: + bugfixes: + - bitwarden lookup plugin - support BWS v0.3.0 syntax breaking change (https://github.com/ansible-collections/community.general/pull/9028). + - collection_version lookup plugin - use ``importlib`` directly instead of + the deprecated and in ansible-core 2.19 removed ``ansible.module_utils.compat.importlib`` + (https://github.com/ansible-collections/community.general/pull/9084). + - gitlab_label - update label's color (https://github.com/ansible-collections/community.general/pull/9010). + - keycloak_clientscope_type - fix detect changes in check mode (https://github.com/ansible-collections/community.general/issues/9092, + https://github.com/ansible-collections/community.general/pull/9093). + - "keycloak_group - fix crash caused in subgroup creation. The crash was caused\ + \ by a missing or empty ``subGroups`` property in Keycloak \u226523 (https://github.com/ansible-collections/community.general/issues/8788,\ + \ https://github.com/ansible-collections/community.general/pull/8979)." + - modprobe - fix check mode not being honored for ``persistent`` option (https://github.com/ansible-collections/community.general/issues/9051, + https://github.com/ansible-collections/community.general/pull/9052). + - one_host - fix if statements for cases when ``ID=0`` (https://github.com/ansible-collections/community.general/issues/1199, + https://github.com/ansible-collections/community.general/pull/8907). + - one_image - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). + - one_image_info - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). + - one_vnet - fix module failing due to a variable typo (https://github.com/ansible-collections/community.general/pull/9019). + - redfish_utils module utils - fix issue with URI parsing to gracefully handling + trailing slashes when extracting member identifiers (https://github.com/ansible-collections/community.general/issues/9047, + https://github.com/ansible-collections/community.general/pull/9057). + minor_changes: + - redfish_utils module utils - schedule a BIOS configuration job at next reboot + when the BIOS config is changed (https://github.com/ansible-collections/community.general/pull/9012). + release_summary: Regular bugfix release. + fragments: + - 8907-fix-one-host-id.yml + - 8979-keycloak_group-fix-subgroups.yml + - 9.5.1.yml + - 9010-edit-gitlab-label-color.yaml + - 9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml + - 9019-onevnet-bugfix.yml + - 9028-bitwarden-secrets-manager-syntax-fix.yml + - 9047-redfish-uri-parsing.yml + - 9052-modprobe-bugfix.yml + - 9056-fix-one_image-modules.yml + - 9084-collection_version-importlib.yml + - 9092-keycloak-clientscope-type-fix-check-mode.yml + release_date: '2024-11-03' + 9.5.2: + changes: + bugfixes: + - dnf_config_manager - fix hanging when prompting to import GPG keys (https://github.com/ansible-collections/community.general/pull/9124, + https://github.com/ansible-collections/community.general/issues/8830). + - dnf_config_manager - forces locale to ``C`` before module starts. If the + locale was set to non-English, the output of the ``dnf config-manager`` + could not be parsed (https://github.com/ansible-collections/community.general/pull/9157, + https://github.com/ansible-collections/community.general/issues/9046). + - flatpak - force the locale language to ``C`` when running the flatpak command + (https://github.com/ansible-collections/community.general/pull/9187, https://github.com/ansible-collections/community.general/issues/8883). + - github_key - in check mode, a faulty call to ```datetime.strftime(...)``` + was being made which generated an exception (https://github.com/ansible-collections/community.general/issues/9185). + - homebrew_cask - allow ``+`` symbol in Homebrew cask name validation regex + (https://github.com/ansible-collections/community.general/pull/9128). + - keycloak_client - fix diff by removing code that turns the attributes dict + which contains additional settings into a list (https://github.com/ansible-collections/community.general/pull/9077). + - keycloak_clientscope - fix diff and ``end_state`` by removing the code that + turns the attributes dict, which contains additional config items, into + a list (https://github.com/ansible-collections/community.general/pull/9082). + - keycloak_clientscope_type - sort the default and optional clientscope lists + to improve the diff (https://github.com/ansible-collections/community.general/pull/9202). + - redfish_utils module utils - remove undocumented default applytime (https://github.com/ansible-collections/community.general/pull/9114). + - slack - fail if Slack API response is not OK with error message (https://github.com/ansible-collections/community.general/pull/9198). + minor_changes: + - proxmox inventory plugin - fix urllib3 ``InsecureRequestWarnings`` not being + suppressed when a token is used (https://github.com/ansible-collections/community.general/pull/9099). + release_summary: Regular bugfix release. + fragments: + - 9.5.2.yml + - 9077-keycloak_client-fix-attributes-dict-turned-into-list.yml + - 9082-keycloak_clientscope-fix-attributes-dict-turned-into-list.yml + - 9099-proxmox-fix-insecure.yml + - 9114-redfish-utils-update-remove-default-applytime.yml + - 9124-dnf_config_manager.yml + - 9128-homebrew_cask-name-regex-fix.yml + - 9157-fix-dnf_config_manager-locale.yml + - 9186-fix-broken-check-mode-in-github-key.yml + - 9187-flatpak-lang.yml + - 9198-fail-if-slack-api-response-is-not-ok-with-error-message.yml + - 9202-keycloak_clientscope_type-sort-lists.yml + release_date: '2024-12-02' + 9.5.3: + changes: + bugfixes: + - dig lookup plugin - correctly handle ``NoNameserver`` exception (https://github.com/ansible-collections/community.general/pull/9363, + https://github.com/ansible-collections/community.general/issues/9362). + - htpasswd - report changes when file permissions are adjusted (https://github.com/ansible-collections/community.general/issues/9485, + https://github.com/ansible-collections/community.general/pull/9490). + - proxmox_disk - fix async method and make ``resize_disk`` method handle errors + correctly (https://github.com/ansible-collections/community.general/pull/9256). + - proxmox_template - fix the wrong path called on ``proxmox_template.task_status`` + (https://github.com/ansible-collections/community.general/issues/9276, https://github.com/ansible-collections/community.general/pull/9277). + - qubes connection plugin - fix the printing of debug information (https://github.com/ansible-collections/community.general/pull/9334). + - redfish_utils module utils - Fix ``VerifyBiosAttributes`` command on multi + system resource nodes (https://github.com/ansible-collections/community.general/pull/9234). + minor_changes: + - proxmox module utils - add method ``api_task_complete`` that can wait for + task completion and return error message (https://github.com/ansible-collections/community.general/pull/9256). + release_summary: Regular bugfix release. + security_fixes: + - keycloak_authentication - API calls did not properly set the ``priority`` + during update resulting in incorrectly sorted authentication flows. This + apparently only affects Keycloak 25 or newer (https://github.com/ansible-collections/community.general/pull/9263). + fragments: + - 9.5.3.yml + - 9234-fix-verify-bios-attributes-multi-system.yml + - 9256-proxmox_disk-fix-async-method-of-resize_disk.yml + - 9263-kc_authentication-api-priority.yaml + - 9277-proxmox_template-fix-the-wrong-path-called-on-proxmox_template.task_status.yaml + - 9334-qubes-conn.yml + - 9363-dig-nonameservers.yml + - 9490-htpasswd-permissions.yml + release_date: '2024-12-31' + 9.5.4: + changes: + bugfixes: + - 'redhat_subscription - do not try to unsubscribe (i.e. remove subscriptions) + + when unregistering a system: newer versions of subscription-manager, as + + available in EL 10 and Fedora 41+, do not support entitlements anymore, + and + + thus unsubscribing will fail + + (https://github.com/ansible-collections/community.general/pull/9578). + + ' + security_fixes: + - keycloak_client - Sanitize ``saml.encryption.private.key`` so it does not + show in the logs (https://github.com/ansible-collections/community.general/pull/9621). + fragments: + - 9578-redhat_subscription-no-remove-on-unregister.yml + - 9621-keycloak_client-sanitize-saml-encryption-key.yml + release_date: '2025-01-27' diff --git a/changelogs/config.yaml b/changelogs/config.yaml index 2cef6e26f4..32ffe27f2b 100644 --- a/changelogs/config.yaml +++ b/changelogs/config.yaml @@ -18,23 +18,25 @@ output_formats: prelude_section_name: release_summary prelude_section_title: Release Summary sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes / Porting Guide -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues title: Community General trivial_section_name: trivial use_fqcn: true add_plugin_period: true +changelog_nice_yaml: true +changelog_sort: version diff --git a/changelogs/fragments/000-redhat_subscription-dbus-on-7.4-plus.yaml b/changelogs/fragments/000-redhat_subscription-dbus-on-7.4-plus.yaml deleted file mode 100644 index 64390308d7..0000000000 --- a/changelogs/fragments/000-redhat_subscription-dbus-on-7.4-plus.yaml +++ /dev/null @@ -1,6 +0,0 @@ -bugfixes: - - | - redhat_subscription - use the D-Bus registration on RHEL 7 only on 7.4 and - greater; older versions of RHEL 7 do not have it - (https://github.com/ansible-collections/community.general/issues/7622, - https://github.com/ansible-collections/community.general/pull/7624). diff --git a/changelogs/fragments/5588-support-1password-connect.yml b/changelogs/fragments/5588-support-1password-connect.yml deleted file mode 100644 index bec2300d3f..0000000000 --- a/changelogs/fragments/5588-support-1password-connect.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - onepassword lookup plugin - support 1Password Connect with the opv2 client by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116). - - onepassword_raw lookup plugin - support 1Password Connect with the opv2 client by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116) diff --git a/changelogs/fragments/6572-nmcli-add-support-loopback-type.yml b/changelogs/fragments/6572-nmcli-add-support-loopback-type.yml deleted file mode 100644 index 4382851d68..0000000000 --- a/changelogs/fragments/6572-nmcli-add-support-loopback-type.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - add support for new connection type ``loopback`` (https://github.com/ansible-collections/community.general/issues/6572). diff --git a/changelogs/fragments/7143-proxmox-template.yml b/changelogs/fragments/7143-proxmox-template.yml deleted file mode 100644 index 89d44594d3..0000000000 --- a/changelogs/fragments/7143-proxmox-template.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - proxmox - adds ``template`` value to the ``state`` parameter, allowing conversion of container to a template (https://github.com/ansible-collections/community.general/pull/7143). - - proxmox_kvm - adds ``template`` value to the ``state`` parameter, allowing conversion of a VM to a template (https://github.com/ansible-collections/community.general/pull/7143). diff --git a/changelogs/fragments/7151-fix-keycloak_authz_permission-incorrect-resource-payload.yml b/changelogs/fragments/7151-fix-keycloak_authz_permission-incorrect-resource-payload.yml deleted file mode 100644 index 2fa50a47ee..0000000000 --- a/changelogs/fragments/7151-fix-keycloak_authz_permission-incorrect-resource-payload.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_authz_permission - resource payload variable for scope-based permission was constructed as a string, when it needs to be a list, even for a single item (https://github.com/ansible-collections/community.general/issues/7151). diff --git a/changelogs/fragments/7199-gitlab-runner-new-creation-workflow.yml b/changelogs/fragments/7199-gitlab-runner-new-creation-workflow.yml deleted file mode 100644 index d4c5f96f9d..0000000000 --- a/changelogs/fragments/7199-gitlab-runner-new-creation-workflow.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_runner - add support for new runner creation workflow (https://github.com/ansible-collections/community.general/pull/7199). diff --git a/changelogs/fragments/7242-multi-values-for-same-name-in-git-config.yml b/changelogs/fragments/7242-multi-values-for-same-name-in-git-config.yml deleted file mode 100644 index be3dfdcac9..0000000000 --- a/changelogs/fragments/7242-multi-values-for-same-name-in-git-config.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - "git_config - allow multiple git configs for the same name with the new ``add_mode`` option (https://github.com/ansible-collections/community.general/pull/7260)." - - "git_config - the ``after`` and ``before`` fields in the ``diff`` of the return value can be a list instead of a string in case more configs with the same key are affected (https://github.com/ansible-collections/community.general/pull/7260)." - - "git_config - when a value is unset, all configs with the same key are unset (https://github.com/ansible-collections/community.general/pull/7260)." diff --git a/changelogs/fragments/7389-nmcli-issue-with-creating-a-wifi-bridge-slave.yml b/changelogs/fragments/7389-nmcli-issue-with-creating-a-wifi-bridge-slave.yml deleted file mode 100644 index f5f07dc230..0000000000 --- a/changelogs/fragments/7389-nmcli-issue-with-creating-a-wifi-bridge-slave.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmcli - fix ``connection.slave-type`` wired to ``bond`` and not with parameter ``slave_type`` in case of connection type ``wifi`` (https://github.com/ansible-collections/community.general/issues/7389). \ No newline at end of file diff --git a/changelogs/fragments/7418-kc_identity_provider-mapper-reconfiguration-fixes.yml b/changelogs/fragments/7418-kc_identity_provider-mapper-reconfiguration-fixes.yml deleted file mode 100644 index 30f3673499..0000000000 --- a/changelogs/fragments/7418-kc_identity_provider-mapper-reconfiguration-fixes.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - keycloak_identity_provider - it was not possible to reconfigure (add, remove) ``mappers`` once they were created initially. Removal was ignored, adding new ones resulted in dropping the pre-existing unmodified mappers. Fix resolves the issue by supplying correct input to the internal update call (https://github.com/ansible-collections/community.general/pull/7418). - - keycloak_identity_provider - ``mappers`` processing was not idempotent if the mappers configuration list had not been sorted by name (in ascending order). Fix resolves the issue by sorting mappers in the desired state using the same key which is used for obtaining existing state (https://github.com/ansible-collections/community.general/pull/7418). \ No newline at end of file diff --git a/changelogs/fragments/7426-add-timestamp-and-preserve-options-for-passwordstore.yaml b/changelogs/fragments/7426-add-timestamp-and-preserve-options-for-passwordstore.yaml deleted file mode 100644 index 59e22b450f..0000000000 --- a/changelogs/fragments/7426-add-timestamp-and-preserve-options-for-passwordstore.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - passwordstore - adds ``timestamp`` and ``preserve`` parameters to modify the stored password format (https://github.com/ansible-collections/community.general/pull/7426). \ No newline at end of file diff --git a/changelogs/fragments/7456-add-ssh-control-master.yml b/changelogs/fragments/7456-add-ssh-control-master.yml deleted file mode 100644 index de6399e2bd..0000000000 --- a/changelogs/fragments/7456-add-ssh-control-master.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ssh_config - adds ``controlmaster``, ``controlpath`` and ``controlpersist`` parameters (https://github.com/ansible-collections/community.general/pull/7456). diff --git a/changelogs/fragments/7461-proxmox-inventory-add-exclude-nodes.yaml b/changelogs/fragments/7461-proxmox-inventory-add-exclude-nodes.yaml deleted file mode 100644 index 40391342f7..0000000000 --- a/changelogs/fragments/7461-proxmox-inventory-add-exclude-nodes.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox inventory plugin - adds an option to exclude nodes from the dynamic inventory generation. The new setting is optional, not using this option will behave as usual (https://github.com/ansible-collections/community.general/issues/6714, https://github.com/ansible-collections/community.general/pull/7461). diff --git a/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml b/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml deleted file mode 100644 index 20a9b1d144..0000000000 --- a/changelogs/fragments/7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_ostype - it is now possible to specify the ``ostype`` when creating an LXC container (https://github.com/ansible-collections/community.general/pull/7462). diff --git a/changelogs/fragments/7464-fix-vm-removal-in-proxmox_pool_member.yml b/changelogs/fragments/7464-fix-vm-removal-in-proxmox_pool_member.yml deleted file mode 100644 index b42abc88c0..0000000000 --- a/changelogs/fragments/7464-fix-vm-removal-in-proxmox_pool_member.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox_pool_member - absent state for type VM did not delete VMs from the pools (https://github.com/ansible-collections/community.general/pull/7464). diff --git a/changelogs/fragments/7465-redfish-firmware-update-message-id-hardening.yml b/changelogs/fragments/7465-redfish-firmware-update-message-id-hardening.yml deleted file mode 100644 index 01a98c2225..0000000000 --- a/changelogs/fragments/7465-redfish-firmware-update-message-id-hardening.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_command - fix usage of message parsing in ``SimpleUpdate`` and ``MultipartHTTPPushUpdate`` commands to treat the lack of a ``MessageId`` as no message (https://github.com/ansible-collections/community.general/issues/7465, https://github.com/ansible-collections/community.general/pull/7471). diff --git a/changelogs/fragments/7467-fix-gitlab-constants-calls.yml b/changelogs/fragments/7467-fix-gitlab-constants-calls.yml deleted file mode 100644 index 77466f75e6..0000000000 --- a/changelogs/fragments/7467-fix-gitlab-constants-calls.yml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: - - gitlab_group_members - fix gitlab constants call in ``gitlab_group_members`` module (https://github.com/ansible-collections/community.general/issues/7467). - - gitlab_project_members - fix gitlab constants call in ``gitlab_project_members`` module (https://github.com/ansible-collections/community.general/issues/7467). - - gitlab_protected_branches - fix gitlab constants call in ``gitlab_protected_branches`` module (https://github.com/ansible-collections/community.general/issues/7467). - - gitlab_user - fix gitlab constants call in ``gitlab_user`` module (https://github.com/ansible-collections/community.general/issues/7467). diff --git a/changelogs/fragments/7472-gitlab-add-ca-path-option.yml b/changelogs/fragments/7472-gitlab-add-ca-path-option.yml deleted file mode 100644 index 48c041ea31..0000000000 --- a/changelogs/fragments/7472-gitlab-add-ca-path-option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/7472). diff --git a/changelogs/fragments/7485-proxmox_vm_info-config.yml b/changelogs/fragments/7485-proxmox_vm_info-config.yml deleted file mode 100644 index ca2fd3dc57..0000000000 --- a/changelogs/fragments/7485-proxmox_vm_info-config.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_vm_info - add ability to retrieve configuration info (https://github.com/ansible-collections/community.general/pull/7485). diff --git a/changelogs/fragments/7486-gitlab-refactor-package-check.yml b/changelogs/fragments/7486-gitlab-refactor-package-check.yml deleted file mode 100644 index 25b52ac45c..0000000000 --- a/changelogs/fragments/7486-gitlab-refactor-package-check.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab modules - remove duplicate ``gitlab`` package check (https://github.com/ansible-collections/community.general/pull/7486). diff --git a/changelogs/fragments/7489-netcup-dns-record-types.yml b/changelogs/fragments/7489-netcup-dns-record-types.yml deleted file mode 100644 index b065a4d239..0000000000 --- a/changelogs/fragments/7489-netcup-dns-record-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - netcup_dns - adds support for record types ``OPENPGPKEY``, ``SMIMEA``, and ``SSHFP`` (https://github.com/ansible-collections/community.general/pull/7489). \ No newline at end of file diff --git a/changelogs/fragments/7495-proxmox_disk-manipulate-cdrom.yml b/changelogs/fragments/7495-proxmox_disk-manipulate-cdrom.yml deleted file mode 100644 index f3a5b27609..0000000000 --- a/changelogs/fragments/7495-proxmox_disk-manipulate-cdrom.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_disk - add ability to manipulate CD-ROM drive (https://github.com/ansible-collections/community.general/pull/7495). diff --git a/changelogs/fragments/7499-allow-mtu-setting-on-bond-and-infiniband-interfaces.yml b/changelogs/fragments/7499-allow-mtu-setting-on-bond-and-infiniband-interfaces.yml deleted file mode 100644 index f12aa55760..0000000000 --- a/changelogs/fragments/7499-allow-mtu-setting-on-bond-and-infiniband-interfaces.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - allow for the setting of ``MTU`` for ``infiniband`` and ``bond`` interface types (https://github.com/ansible-collections/community.general/pull/7499). diff --git a/changelogs/fragments/7501-type.yml b/changelogs/fragments/7501-type.yml deleted file mode 100644 index 994c31ce5a..0000000000 --- a/changelogs/fragments/7501-type.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "ocapi_utils, oci_utils, redfish_utils module utils - replace ``type()`` calls with ``isinstance()`` calls (https://github.com/ansible-collections/community.general/pull/7501)." diff --git a/changelogs/fragments/7505-ini_file-section_has.yml b/changelogs/fragments/7505-ini_file-section_has.yml deleted file mode 100644 index 0424764fd0..0000000000 --- a/changelogs/fragments/7505-ini_file-section_has.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - "ini_file - add an optional parameter ``section_has_values``. If the - target ini file contains more than one ``section``, use ``section_has_values`` - to specify which one should be updated - (https://github.com/ansible-collections/community.general/pull/7505)." diff --git a/changelogs/fragments/7506-pipx-pipargs.yml b/changelogs/fragments/7506-pipx-pipargs.yml deleted file mode 100644 index fb5cb52e6f..0000000000 --- a/changelogs/fragments/7506-pipx-pipargs.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - pipx module utils - change the CLI argument formatter for the ``pip_args`` parameter (https://github.com/ansible-collections/community.general/issues/7497, https://github.com/ansible-collections/community.general/pull/7506). diff --git a/changelogs/fragments/7517-elastic-close-client.yaml b/changelogs/fragments/7517-elastic-close-client.yaml deleted file mode 100644 index ee383d26a6..0000000000 --- a/changelogs/fragments/7517-elastic-close-client.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - elastic callback plugin - close elastic client to not leak resources (https://github.com/ansible-collections/community.general/pull/7517). diff --git a/changelogs/fragments/7535-terraform-fix-multiline-string-handling-in-complex-variables.yml b/changelogs/fragments/7535-terraform-fix-multiline-string-handling-in-complex-variables.yml deleted file mode 100644 index b991522dd6..0000000000 --- a/changelogs/fragments/7535-terraform-fix-multiline-string-handling-in-complex-variables.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "terraform - fix multiline string handling in complex variables (https://github.com/ansible-collections/community.general/pull/7535)." diff --git a/changelogs/fragments/7538-add-krbprincipalattribute-option.yml b/changelogs/fragments/7538-add-krbprincipalattribute-option.yml deleted file mode 100644 index e2e2ce61c2..0000000000 --- a/changelogs/fragments/7538-add-krbprincipalattribute-option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak_user_federation - add option for ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/7538). diff --git a/changelogs/fragments/7540-proxmox-update-config.yml b/changelogs/fragments/7540-proxmox-update-config.yml deleted file mode 100644 index d89c26115f..0000000000 --- a/changelogs/fragments/7540-proxmox-update-config.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox - adds ``update`` parameter, allowing update of an already existing containers configuration (https://github.com/ansible-collections/community.general/pull/7540). diff --git a/changelogs/fragments/7542-irc-logentries-ssl.yml b/changelogs/fragments/7542-irc-logentries-ssl.yml deleted file mode 100644 index 6897087dfb..0000000000 --- a/changelogs/fragments/7542-irc-logentries-ssl.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - "log_entries callback plugin - replace ``ssl.wrap_socket`` that was removed from Python 3.12 with code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542)." - - "irc - replace ``ssl.wrap_socket`` that was removed from Python 3.12 with code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542)." diff --git a/changelogs/fragments/7550-irc-use_tls-validate_certs.yml b/changelogs/fragments/7550-irc-use_tls-validate_certs.yml deleted file mode 100644 index 0c99d8fd6f..0000000000 --- a/changelogs/fragments/7550-irc-use_tls-validate_certs.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - "irc - add ``validate_certs`` option, and rename ``use_ssl`` to ``use_tls``, while keeping ``use_ssl`` as an alias. - The default value for ``validate_certs`` is ``false`` for backwards compatibility. We recommend to every user of - this module to explicitly set ``use_tls=true`` and `validate_certs=true`` whenever possible, especially when - communicating to IRC servers over the internet (https://github.com/ansible-collections/community.general/pull/7550)." diff --git a/changelogs/fragments/7564-onepassword-lookup-case-insensitive.yaml b/changelogs/fragments/7564-onepassword-lookup-case-insensitive.yaml deleted file mode 100644 index d2eaf2ff11..0000000000 --- a/changelogs/fragments/7564-onepassword-lookup-case-insensitive.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - >- - onepassword lookup plugin - field and section titles are now case insensitive when using - op CLI version two or later. This matches the behavior of version one (https://github.com/ansible-collections/community.general/pull/7564). diff --git a/changelogs/fragments/7569-infiniband-slave-support.yml b/changelogs/fragments/7569-infiniband-slave-support.yml deleted file mode 100644 index f54460842d..0000000000 --- a/changelogs/fragments/7569-infiniband-slave-support.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - allow for ``infiniband`` slaves of ``bond`` interface types (https://github.com/ansible-collections/community.general/pull/7569). diff --git a/changelogs/fragments/7577-fix-apt_rpm-module.yml b/changelogs/fragments/7577-fix-apt_rpm-module.yml deleted file mode 100644 index ef55eb5bd2..0000000000 --- a/changelogs/fragments/7577-fix-apt_rpm-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - apt-rpm - the module did not upgrade packages if a newer version exists. Now the package will be reinstalled if the candidate is newer than the installed version (https://github.com/ansible-collections/community.general/issues/7414). diff --git a/changelogs/fragments/7578-irc-tls.yml b/changelogs/fragments/7578-irc-tls.yml deleted file mode 100644 index a7fcbbca29..0000000000 --- a/changelogs/fragments/7578-irc-tls.yml +++ /dev/null @@ -1,4 +0,0 @@ -deprecated_features: - - "irc - the defaults ``false`` for ``use_tls`` and ``validate_certs`` have been deprecated and will change to ``true`` in community.general 10.0.0 - to improve security. You can already improve security now by explicitly setting them to ``true``. Specifying values now disables the deprecation - warning (https://github.com/ansible-collections/community.general/pull/7578)." diff --git a/changelogs/fragments/7588-ipa-config-new-choice-passkey-to-ipauserauthtype.yml b/changelogs/fragments/7588-ipa-config-new-choice-passkey-to-ipauserauthtype.yml deleted file mode 100644 index c9d83c761a..0000000000 --- a/changelogs/fragments/7588-ipa-config-new-choice-passkey-to-ipauserauthtype.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_config - adds ``passkey`` choice to ``ipauserauthtype`` parameter's choices (https://github.com/ansible-collections/community.general/pull/7588). diff --git a/changelogs/fragments/7589-ipa-config-new-choices-idp-and-passkey-to-ipauserauthtype.yml b/changelogs/fragments/7589-ipa-config-new-choices-idp-and-passkey-to-ipauserauthtype.yml deleted file mode 100644 index bf584514ae..0000000000 --- a/changelogs/fragments/7589-ipa-config-new-choices-idp-and-passkey-to-ipauserauthtype.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_user - adds ``idp`` and ``passkey`` choice to ``ipauserauthtype`` parameter's choices (https://github.com/ansible-collections/community.general/pull/7589). diff --git a/changelogs/fragments/7600-proxmox_kvm-hookscript.yml b/changelogs/fragments/7600-proxmox_kvm-hookscript.yml deleted file mode 100644 index 5d79e71657..0000000000 --- a/changelogs/fragments/7600-proxmox_kvm-hookscript.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "proxmox_kvm - support the ``hookscript`` parameter (https://github.com/ansible-collections/community.general/issues/7600)." diff --git a/changelogs/fragments/7601-lvol-fix.yml b/changelogs/fragments/7601-lvol-fix.yml deleted file mode 100644 index b83fe15683..0000000000 --- a/changelogs/fragments/7601-lvol-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - lvol - test for output messages in both ``stdout`` and ``stderr`` (https://github.com/ansible-collections/community.general/pull/7601, https://github.com/ansible-collections/community.general/issues/7182). diff --git a/changelogs/fragments/7612-interface_file-method.yml b/changelogs/fragments/7612-interface_file-method.yml deleted file mode 100644 index 38fcb71503..0000000000 --- a/changelogs/fragments/7612-interface_file-method.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "interface_files - also consider ``address_family`` when changing ``option=method`` (https://github.com/ansible-collections/community.general/issues/7610, https://github.com/ansible-collections/community.general/pull/7612)." diff --git a/changelogs/fragments/7626-redfish-info-add-boot-progress-property.yml b/changelogs/fragments/7626-redfish-info-add-boot-progress-property.yml deleted file mode 100644 index 919383686b..0000000000 --- a/changelogs/fragments/7626-redfish-info-add-boot-progress-property.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_info - adding the ``BootProgress`` property when getting ``Systems`` info (https://github.com/ansible-collections/community.general/pull/7626). diff --git a/changelogs/fragments/7641-fix-keycloak-api-client-to-quote-properly.yml b/changelogs/fragments/7641-fix-keycloak-api-client-to-quote-properly.yml deleted file mode 100644 index c11cbf3b06..0000000000 --- a/changelogs/fragments/7641-fix-keycloak-api-client-to-quote-properly.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_* - fix Keycloak API client to quote ``/`` properly (https://github.com/ansible-collections/community.general/pull/7641). diff --git a/changelogs/fragments/7645-Keycloak-print-error-msg-from-server.yml b/changelogs/fragments/7645-Keycloak-print-error-msg-from-server.yml deleted file mode 100644 index 509ab0fd81..0000000000 --- a/changelogs/fragments/7645-Keycloak-print-error-msg-from-server.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak module utils - expose error message from Keycloak server for HTTP errors in some specific situations (https://github.com/ansible-collections/community.general/pull/7645). \ No newline at end of file diff --git a/changelogs/fragments/7646-fix-order-number-detection-in-dn.yml b/changelogs/fragments/7646-fix-order-number-detection-in-dn.yml deleted file mode 100644 index f2d2379872..0000000000 --- a/changelogs/fragments/7646-fix-order-number-detection-in-dn.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ldap - previously the order number (if present) was expected to follow an equals sign in the DN. This makes it so the order number string is identified correctly anywhere within the DN (https://github.com/ansible-collections/community.general/issues/7646). diff --git a/changelogs/fragments/7653-fix-cloudflare-lookup.yml b/changelogs/fragments/7653-fix-cloudflare-lookup.yml deleted file mode 100644 index f370a1c1d1..0000000000 --- a/changelogs/fragments/7653-fix-cloudflare-lookup.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - cloudflare_dns - fix Cloudflare lookup of SHFP records (https://github.com/ansible-collections/community.general/issues/7652). diff --git a/changelogs/fragments/7676-lvol-pvs-as-list.yml b/changelogs/fragments/7676-lvol-pvs-as-list.yml deleted file mode 100644 index aa28fff59d..0000000000 --- a/changelogs/fragments/7676-lvol-pvs-as-list.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - lvol - change ``pvs`` argument type to list of strings (https://github.com/ansible-collections/community.general/pull/7676, https://github.com/ansible-collections/community.general/issues/7504). diff --git a/changelogs/fragments/7683-added-contenttype-parameter.yml b/changelogs/fragments/7683-added-contenttype-parameter.yml deleted file mode 100644 index 52f4b6b0c5..0000000000 --- a/changelogs/fragments/7683-added-contenttype-parameter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - rundeck module utils - allow to pass ``Content-Type`` to API requests (https://github.com/ansible-collections/community.general/pull/7684). \ No newline at end of file diff --git a/changelogs/fragments/7696-avoid-attempt-to-delete-non-existing-user.yml b/changelogs/fragments/7696-avoid-attempt-to-delete-non-existing-user.yml deleted file mode 100644 index db57d68233..0000000000 --- a/changelogs/fragments/7696-avoid-attempt-to-delete-non-existing-user.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user - when ``force`` is set, but user does not exist, do not try to delete it (https://github.com/ansible-collections/community.general/pull/7696). diff --git a/changelogs/fragments/7698-improvements-to-keycloak_realm_key.yml b/changelogs/fragments/7698-improvements-to-keycloak_realm_key.yml deleted file mode 100644 index 0cd996c510..0000000000 --- a/changelogs/fragments/7698-improvements-to-keycloak_realm_key.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - keycloak_realm_key - the ``provider_id`` option now supports RSA encryption key usage (value ``rsa-enc``) (https://github.com/ansible-collections/community.general/pull/7698). - - keycloak_realm_key - the ``config.algorithm`` option now supports 8 additional key algorithms (https://github.com/ansible-collections/community.general/pull/7698). - - keycloak_realm_key - the ``config.certificate`` option value is no longer defined with ``no_log=True`` (https://github.com/ansible-collections/community.general/pull/7698). \ No newline at end of file diff --git a/changelogs/fragments/7703-ssh_config_add_keys_to_agent_option.yml b/changelogs/fragments/7703-ssh_config_add_keys_to_agent_option.yml deleted file mode 100644 index 99893a0ff3..0000000000 --- a/changelogs/fragments/7703-ssh_config_add_keys_to_agent_option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ssh_config - new feature to set ``AddKeysToAgent`` option to ``yes`` or ``no`` (https://github.com/ansible-collections/community.general/pull/7703). diff --git a/changelogs/fragments/7704-ssh_config_identities_only_option.yml b/changelogs/fragments/7704-ssh_config_identities_only_option.yml deleted file mode 100644 index 9efa10b70f..0000000000 --- a/changelogs/fragments/7704-ssh_config_identities_only_option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ssh_config - new feature to set ``IdentitiesOnly`` option to ``yes`` or ``no`` (https://github.com/ansible-collections/community.general/pull/7704). diff --git a/changelogs/fragments/7717-prevent-modprobe-error.yml b/changelogs/fragments/7717-prevent-modprobe-error.yml deleted file mode 100644 index bfef30e67b..0000000000 --- a/changelogs/fragments/7717-prevent-modprobe-error.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - modprobe - listing modules files or modprobe files could trigger a FileNotFoundError if ``/etc/modprobe.d`` or ``/etc/modules-load.d`` did not exist. Relevant functions now return empty lists if the directories do not exist to avoid crashing the module (https://github.com/ansible-collections/community.general/issues/7717). diff --git a/changelogs/fragments/7723-ipa-pwpolicy-update-pwpolicy-module.yml b/changelogs/fragments/7723-ipa-pwpolicy-update-pwpolicy-module.yml deleted file mode 100644 index bffd40efcd..0000000000 --- a/changelogs/fragments/7723-ipa-pwpolicy-update-pwpolicy-module.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - ipa_pwpolicy - update module to support ``maxrepeat``, ``maxsequence``, ``dictcheck``, ``usercheck``, ``gracelimit`` parameters in FreeIPA password policies (https://github.com/ansible-collections/community.general/pull/7723). - - ipa_pwpolicy - refactor module and exchange a sequence ``if`` statements with a ``for`` loop (https://github.com/ansible-collections/community.general/pull/7723). diff --git a/changelogs/fragments/7737-add-ipa-dnsrecord-ns-type.yml b/changelogs/fragments/7737-add-ipa-dnsrecord-ns-type.yml deleted file mode 100644 index 534d96e123..0000000000 --- a/changelogs/fragments/7737-add-ipa-dnsrecord-ns-type.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_dnsrecord - adds ability to manage NS record types (https://github.com/ansible-collections/community.general/pull/7737). diff --git a/changelogs/fragments/7740-add-message-id-header-to-mail-module.yml b/changelogs/fragments/7740-add-message-id-header-to-mail-module.yml deleted file mode 100644 index 1c142b62ef..0000000000 --- a/changelogs/fragments/7740-add-message-id-header-to-mail-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - mail - add ``Message-ID`` header; which is required by some mail servers (https://github.com/ansible-collections/community.general/pull/7740). diff --git a/changelogs/fragments/7746-raw_post-without-actions.yml b/changelogs/fragments/7746-raw_post-without-actions.yml deleted file mode 100644 index 10dc110c5e..0000000000 --- a/changelogs/fragments/7746-raw_post-without-actions.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - xcc_redfish_command - added support for raw POSTs (``command=PostResource`` in ``category=Raw``) without a specific action info (https://github.com/ansible-collections/community.general/pull/7746). diff --git a/changelogs/fragments/7754-fixed-payload-format.yml b/changelogs/fragments/7754-fixed-payload-format.yml deleted file mode 100644 index 01458053e5..0000000000 --- a/changelogs/fragments/7754-fixed-payload-format.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - statusio_maintenance - fix error caused by incorrectly formed API data payload. Was raising "Failed to create maintenance HTTP Error 400 Bad Request" caused by bad data type for date/time and deprecated dict keys (https://github.com/ansible-collections/community.general/pull/7754). \ No newline at end of file diff --git a/changelogs/fragments/7765-mail-message-id.yml b/changelogs/fragments/7765-mail-message-id.yml deleted file mode 100644 index 54af767ecf..0000000000 --- a/changelogs/fragments/7765-mail-message-id.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "mail module, mail callback plugin - allow to configure the domain name of the Message-ID header with a new ``message_id_domain`` option (https://github.com/ansible-collections/community.general/pull/7765)." diff --git a/changelogs/fragments/7782-cloudflare_dns-spf.yml b/changelogs/fragments/7782-cloudflare_dns-spf.yml deleted file mode 100644 index 83e7fe79bb..0000000000 --- a/changelogs/fragments/7782-cloudflare_dns-spf.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - "cloudflare_dns - remove support for SPF records. These are no longer supported by CloudFlare (https://github.com/ansible-collections/community.general/pull/7782)." diff --git a/changelogs/fragments/7789-keycloak-user-federation-custom-provider-type.yml b/changelogs/fragments/7789-keycloak-user-federation-custom-provider-type.yml deleted file mode 100644 index dd20a4ea18..0000000000 --- a/changelogs/fragments/7789-keycloak-user-federation-custom-provider-type.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak_user_federation - allow custom user storage providers to be set through ``provider_id`` (https://github.com/ansible-collections/community.general/pull/7789). diff --git a/changelogs/fragments/7790-gitlab-runner-api-pagination.yml b/changelogs/fragments/7790-gitlab-runner-api-pagination.yml deleted file mode 100644 index 59a65ea8ef..0000000000 --- a/changelogs/fragments/7790-gitlab-runner-api-pagination.yml +++ /dev/null @@ -1,8 +0,0 @@ -bugfixes: - - gitlab_runner - fix pagination when checking for existing runners (https://github.com/ansible-collections/community.general/pull/7790). - -minor_changes: - - gitlab_deploy_key, gitlab_group_members, gitlab_group_variable, gitlab_hook, - gitlab_instance_variable, gitlab_project_badge, gitlab_project_variable, - gitlab_user - improve API pagination and compatibility with different versions - of ``python-gitlab`` (https://github.com/ansible-collections/community.general/pull/7790). diff --git a/changelogs/fragments/7791-proxmox_kvm-state-template-will-check-status-first.yaml b/changelogs/fragments/7791-proxmox_kvm-state-template-will-check-status-first.yaml deleted file mode 100644 index 1e061ce6af..0000000000 --- a/changelogs/fragments/7791-proxmox_kvm-state-template-will-check-status-first.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox_kvm - running ``state=template`` will first check whether VM is already a template (https://github.com/ansible-collections/community.general/pull/7792). diff --git a/changelogs/fragments/7797-ipa-fix-otp-idempotency.yml b/changelogs/fragments/7797-ipa-fix-otp-idempotency.yml deleted file mode 100644 index 43fd4f5251..0000000000 --- a/changelogs/fragments/7797-ipa-fix-otp-idempotency.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ipa_otptoken - the module expect ``ipatokendisabled`` as string but the ``ipatokendisabled`` value is returned as a boolean (https://github.com/ansible-collections/community.general/pull/7795). diff --git a/changelogs/fragments/7821-mssql_script-py2.yml b/changelogs/fragments/7821-mssql_script-py2.yml deleted file mode 100644 index 79de688628..0000000000 --- a/changelogs/fragments/7821-mssql_script-py2.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "mssql_script - make the module work with Python 2 (https://github.com/ansible-collections/community.general/issues/7818, https://github.com/ansible-collections/community.general/pull/7821)." diff --git a/changelogs/fragments/7826-consul-modules-refactoring.yaml b/changelogs/fragments/7826-consul-modules-refactoring.yaml deleted file mode 100644 index a51352d88e..0000000000 --- a/changelogs/fragments/7826-consul-modules-refactoring.yaml +++ /dev/null @@ -1,7 +0,0 @@ -minor_changes: - - 'consul_policy, consul_role, consul_session - removed dependency on ``requests`` and factored out common parts (https://github.com/ansible-collections/community.general/pull/7826, https://github.com/ansible-collections/community.general/pull/7878).' - - consul_policy - added support for diff and check mode (https://github.com/ansible-collections/community.general/pull/7878). - - consul_role - added support for diff mode (https://github.com/ansible-collections/community.general/pull/7878). - - consul_role - added support for templated policies (https://github.com/ansible-collections/community.general/pull/7878). - - consul_role - ``service_identities`` now expects a ``service_name`` option to match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878). - - consul_role - ``node_identities`` now expects a ``node_name`` option to match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878). \ No newline at end of file diff --git a/changelogs/fragments/7843-proxmox_kvm-update_unsafe.yml b/changelogs/fragments/7843-proxmox_kvm-update_unsafe.yml deleted file mode 100644 index dcb1ebb218..0000000000 --- a/changelogs/fragments/7843-proxmox_kvm-update_unsafe.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_kvm - add parameter ``update_unsafe`` to avoid limitations when updating dangerous values (https://github.com/ansible-collections/community.general/pull/7843). diff --git a/changelogs/fragments/7847-gitlab-issue-title.yml b/changelogs/fragments/7847-gitlab-issue-title.yml deleted file mode 100644 index c8b8e49905..0000000000 --- a/changelogs/fragments/7847-gitlab-issue-title.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_issue - fix behavior to search GitLab issue, using ``search`` keyword instead of ``title`` (https://github.com/ansible-collections/community.general/issues/7846). diff --git a/changelogs/fragments/7870-homebrew-cask-installed-detection.yml b/changelogs/fragments/7870-homebrew-cask-installed-detection.yml deleted file mode 100644 index 1c70c9a2d4..0000000000 --- a/changelogs/fragments/7870-homebrew-cask-installed-detection.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - homebrew - detect already installed formulae and casks using JSON output from ``brew info`` (https://github.com/ansible-collections/community.general/issues/864). diff --git a/changelogs/fragments/7872-proxmox_fix-update-if-setting-doesnt-exist.yaml b/changelogs/fragments/7872-proxmox_fix-update-if-setting-doesnt-exist.yaml deleted file mode 100644 index 82b4fe31d9..0000000000 --- a/changelogs/fragments/7872-proxmox_fix-update-if-setting-doesnt-exist.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox - fix updating a container config if the setting does not already exist (https://github.com/ansible-collections/community.general/pull/7872). diff --git a/changelogs/fragments/7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml b/changelogs/fragments/7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml deleted file mode 100644 index 83d302e9b9..0000000000 --- a/changelogs/fragments/7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "incus connection plugin - treats ``inventory_hostname`` as a variable instead of a literal in remote connections (https://github.com/ansible-collections/community.general/issues/7874)." diff --git a/changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml b/changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml deleted file mode 100644 index cb2caa3780..0000000000 --- a/changelogs/fragments/7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - ipa_sudorule - the module uses a string for ``ipaenabledflag`` for new FreeIPA versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880). - - ipa_hbacrule - the module uses a string for ``ipaenabledflag`` for new FreeIPA versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880). diff --git a/changelogs/fragments/7881-fix-keycloak-client-ckeckmode.yml b/changelogs/fragments/7881-fix-keycloak-client-ckeckmode.yml deleted file mode 100644 index 485950c11c..0000000000 --- a/changelogs/fragments/7881-fix-keycloak-client-ckeckmode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_client - fixes issue when metadata is provided in desired state when task is in check mode (https://github.com/ansible-collections/community.general/issues/1226, https://github.com/ansible-collections/community.general/pull/7881). \ No newline at end of file diff --git a/changelogs/fragments/7882-add-redfish-get-service-identification.yml b/changelogs/fragments/7882-add-redfish-get-service-identification.yml deleted file mode 100644 index 463c9a2bc5..0000000000 --- a/changelogs/fragments/7882-add-redfish-get-service-identification.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_info - add command ``GetServiceIdentification`` to get service identification (https://github.com/ansible-collections/community.general/issues/7882). diff --git a/changelogs/fragments/7896-add-terraform-diff-mode.yml b/changelogs/fragments/7896-add-terraform-diff-mode.yml deleted file mode 100644 index 7c0834efa5..0000000000 --- a/changelogs/fragments/7896-add-terraform-diff-mode.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - terraform - add support for ``diff_mode`` for terraform resource_changes (https://github.com/ansible-collections/community.general/pull/7896). diff --git a/changelogs/fragments/7897-consul-action-group.yaml b/changelogs/fragments/7897-consul-action-group.yaml deleted file mode 100644 index 1764e1970d..0000000000 --- a/changelogs/fragments/7897-consul-action-group.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - consul_auth_method, consul_binding_rule, consul_policy, consul_role, consul_session, consul_token - added action group ``community.general.consul`` (https://github.com/ansible-collections/community.general/pull/7897). diff --git a/changelogs/fragments/7901-consul-acl-deprecation.yaml b/changelogs/fragments/7901-consul-acl-deprecation.yaml deleted file mode 100644 index 9480b04ce9..0000000000 --- a/changelogs/fragments/7901-consul-acl-deprecation.yaml +++ /dev/null @@ -1,3 +0,0 @@ -deprecated_features: - - "consul_acl - the module has been deprecated and will be removed in community.general 10.0.0. ``consul_token`` and ``consul_policy`` - can be used instead (https://github.com/ansible-collections/community.general/pull/7901)." \ No newline at end of file diff --git a/changelogs/fragments/7916-add-redfish-set-service-identification.yml b/changelogs/fragments/7916-add-redfish-set-service-identification.yml deleted file mode 100644 index 2b1f2ca7b3..0000000000 --- a/changelogs/fragments/7916-add-redfish-set-service-identification.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_config - add command ``SetServiceIdentification`` to set service identification (https://github.com/ansible-collections/community.general/issues/7916). diff --git a/changelogs/fragments/7919-onepassword-fieldname-casing.yaml b/changelogs/fragments/7919-onepassword-fieldname-casing.yaml deleted file mode 100644 index 9119f896f0..0000000000 --- a/changelogs/fragments/7919-onepassword-fieldname-casing.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - onepassword lookup plugin - failed for fields that were in sections and had uppercase letters in the label/ID. Field lookups are now case insensitive in all cases (https://github.com/ansible-collections/community.general/pull/7919). diff --git a/changelogs/fragments/7951-fix-redfish_info-exception.yml b/changelogs/fragments/7951-fix-redfish_info-exception.yml deleted file mode 100644 index cd5707da4b..0000000000 --- a/changelogs/fragments/7951-fix-redfish_info-exception.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "redfish_info - correct uncaught exception when attempting to retrieve ``Chassis`` information (https://github.com/ansible-collections/community.general/pull/7952)." diff --git a/changelogs/fragments/7953-proxmox_kvm-fix_status_check.yml b/changelogs/fragments/7953-proxmox_kvm-fix_status_check.yml deleted file mode 100644 index 10f8e6d26a..0000000000 --- a/changelogs/fragments/7953-proxmox_kvm-fix_status_check.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox_kvm - fixed status check getting from node-specific API endpoint (https://github.com/ansible-collections/community.general/issues/7817). diff --git a/changelogs/fragments/7956-adding-releases_events-option-to-gitlab_hook-module.yaml b/changelogs/fragments/7956-adding-releases_events-option-to-gitlab_hook-module.yaml deleted file mode 100644 index 30186804d4..0000000000 --- a/changelogs/fragments/7956-adding-releases_events-option-to-gitlab_hook-module.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_hook - adds ``releases_events`` parameter for supporting Releases events triggers on GitLab hooks (https://github.com/ansible-collections/community.general/pull/7956). \ No newline at end of file diff --git a/changelogs/fragments/7963-fix-terraform-diff-absent.yml b/changelogs/fragments/7963-fix-terraform-diff-absent.yml deleted file mode 100644 index 4e2cf53c9b..0000000000 --- a/changelogs/fragments/7963-fix-terraform-diff-absent.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - terraform - fix ``diff_mode`` in state ``absent`` and when terraform ``resource_changes`` does not exist (https://github.com/ansible-collections/community.general/pull/7963). diff --git a/changelogs/fragments/7970-fix-cargo-path-idempotency.yaml b/changelogs/fragments/7970-fix-cargo-path-idempotency.yaml deleted file mode 100644 index 143247bc91..0000000000 --- a/changelogs/fragments/7970-fix-cargo-path-idempotency.yaml +++ /dev/null @@ -1,10 +0,0 @@ -bugfixes: - - "cargo - fix idempotency issues when using a custom installation path - for packages (using the ``--path`` parameter). - The initial installation runs fine, but subsequent runs use the - ``get_installed()`` function which did not check the given installation - location, before running ``cargo install``. This resulted in a false - ``changed`` state. - Also the removal of packeges using ``state: absent`` failed, as the - installation check did not use the given parameter - (https://github.com/ansible-collections/community.general/pull/7970)." diff --git a/changelogs/fragments/7976-add-mssql_script-transactional-support.yml b/changelogs/fragments/7976-add-mssql_script-transactional-support.yml deleted file mode 100644 index dc6f335247..0000000000 --- a/changelogs/fragments/7976-add-mssql_script-transactional-support.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - mssql_script - adds transactional (rollback/commit) support via optional boolean param ``transaction`` (https://github.com/ansible-collections/community.general/pull/7976). diff --git a/changelogs/fragments/7983-sudoers-add-support-noexec.yml b/changelogs/fragments/7983-sudoers-add-support-noexec.yml deleted file mode 100644 index f58e6f7ec8..0000000000 --- a/changelogs/fragments/7983-sudoers-add-support-noexec.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - sudoers - add support for the ``NOEXEC`` tag in sudoers rules (https://github.com/ansible-collections/community.general/pull/7983). diff --git a/changelogs/fragments/7994-bitwarden-session-arg.yaml b/changelogs/fragments/7994-bitwarden-session-arg.yaml deleted file mode 100644 index 36f9622ac0..0000000000 --- a/changelogs/fragments/7994-bitwarden-session-arg.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "bitwarden lookup plugin - add ``bw_session`` option, to pass session key instead of reading from env (https://github.com/ansible-collections/community.general/pull/7994)." diff --git a/changelogs/fragments/7996-add-templating-support-to-icinga2-inventory.yml b/changelogs/fragments/7996-add-templating-support-to-icinga2-inventory.yml deleted file mode 100644 index 9998583b83..0000000000 --- a/changelogs/fragments/7996-add-templating-support-to-icinga2-inventory.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - icinga2 inventory plugin - add Jinja2 templating support to ``url``, ``user``, and ``password`` paramenters (https://github.com/ansible-collections/community.general/issues/7074, https://github.com/ansible-collections/community.general/pull/7996). \ No newline at end of file diff --git a/changelogs/fragments/7998-icinga2-inventory-group_by_hostgroups-parameter.yml b/changelogs/fragments/7998-icinga2-inventory-group_by_hostgroups-parameter.yml deleted file mode 100644 index 1170a108fd..0000000000 --- a/changelogs/fragments/7998-icinga2-inventory-group_by_hostgroups-parameter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - icinga2 inventory plugin - adds new parameter ``group_by_hostgroups`` in order to make grouping by Icinga2 hostgroups optional (https://github.com/ansible-collections/community.general/pull/7998). \ No newline at end of file diff --git a/changelogs/fragments/8003-redfish-get-update-status-empty-response.yml b/changelogs/fragments/8003-redfish-get-update-status-empty-response.yml deleted file mode 100644 index 21796e7a0e..0000000000 --- a/changelogs/fragments/8003-redfish-get-update-status-empty-response.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_info - allow for a GET operation invoked by ``GetUpdateStatus`` to allow for an empty response body for cases where a service returns 204 No Content (https://github.com/ansible-collections/community.general/issues/8003). diff --git a/changelogs/fragments/8013-bitwarden-full-collection-item-list.yaml b/changelogs/fragments/8013-bitwarden-full-collection-item-list.yaml deleted file mode 100644 index 7337233aea..0000000000 --- a/changelogs/fragments/8013-bitwarden-full-collection-item-list.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "bitwarden lookup plugin - allows to fetch all records of a given collection ID, by allowing to pass an empty value for ``search_value`` when ``collection_id`` is provided (https://github.com/ansible-collections/community.general/pull/8013)." diff --git a/changelogs/fragments/8029-iptables-state-restore-check-mode.yml b/changelogs/fragments/8029-iptables-state-restore-check-mode.yml deleted file mode 100644 index 900ea50988..0000000000 --- a/changelogs/fragments/8029-iptables-state-restore-check-mode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - iptables_state - fix idempotency issues when restoring incomplete iptables dumps (https://github.com/ansible-collections/community.general/issues/8029). diff --git a/changelogs/fragments/8038-proxmox-startup.yml b/changelogs/fragments/8038-proxmox-startup.yml deleted file mode 100644 index f8afbc0c4e..0000000000 --- a/changelogs/fragments/8038-proxmox-startup.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox - adds ``startup`` parameters to configure startup order, startup delay and shutdown delay (https://github.com/ansible-collections/community.general/pull/8038). diff --git a/changelogs/fragments/8048-fix-homebrew-module-error-reporting-on-become-true.yaml b/changelogs/fragments/8048-fix-homebrew-module-error-reporting-on-become-true.yaml deleted file mode 100644 index 9954be302a..0000000000 --- a/changelogs/fragments/8048-fix-homebrew-module-error-reporting-on-become-true.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - homebrew - error returned from brew command was ignored and tried to parse empty JSON. Fix now checks for an error and raises it to give accurate error message to users (https://github.com/ansible-collections/community.general/issues/8047). diff --git a/changelogs/fragments/8057-pam_limits-check-mode.yml b/changelogs/fragments/8057-pam_limits-check-mode.yml deleted file mode 100644 index f6f034e9b8..0000000000 --- a/changelogs/fragments/8057-pam_limits-check-mode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "pam_limits - when the file does not exist, do not create it in check mode (https://github.com/ansible-collections/community.general/issues/8050, https://github.com/ansible-collections/community.general/pull/8057)." diff --git a/changelogs/fragments/8073-ldap-attrs-diff.yml b/changelogs/fragments/8073-ldap-attrs-diff.yml deleted file mode 100644 index 071fc2919e..0000000000 --- a/changelogs/fragments/8073-ldap-attrs-diff.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ldap_attrs - module now supports diff mode, showing which attributes are changed within an operation (https://github.com/ansible-collections/community.general/pull/8073). \ No newline at end of file diff --git a/changelogs/fragments/8075-optional-space-around-section-names.yaml b/changelogs/fragments/8075-optional-space-around-section-names.yaml deleted file mode 100644 index 2e44555f08..0000000000 --- a/changelogs/fragments/8075-optional-space-around-section-names.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "ini_file - support optional spaces between section names and their surrounding brackets (https://github.com/ansible-collections/community.general/pull/8075)." diff --git a/changelogs/fragments/8087-removed-redundant-unicode-prefixes.yml b/changelogs/fragments/8087-removed-redundant-unicode-prefixes.yml deleted file mode 100644 index 1224ebdfa2..0000000000 --- a/changelogs/fragments/8087-removed-redundant-unicode-prefixes.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "revbitspss lookup plugin - removed a redundant unicode prefix. The prefix was not necessary for Python 3 and has been cleaned up to streamline the code (https://github.com/ansible-collections/community.general/pull/8087)." diff --git a/changelogs/fragments/8091-consul-token-fixes.yaml b/changelogs/fragments/8091-consul-token-fixes.yaml deleted file mode 100644 index c734623588..0000000000 --- a/changelogs/fragments/8091-consul-token-fixes.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "consul_token - fix token creation without ``accessor_id`` (https://github.com/ansible-collections/community.general/pull/8091)." \ No newline at end of file diff --git a/changelogs/fragments/8100-haproxy-drain-fails-on-down-backend.yml b/changelogs/fragments/8100-haproxy-drain-fails-on-down-backend.yml deleted file mode 100644 index 58f1478914..0000000000 --- a/changelogs/fragments/8100-haproxy-drain-fails-on-down-backend.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "haproxy - fix an issue where HAProxy could get stuck in DRAIN mode when the backend was unreachable (https://github.com/ansible-collections/community.general/issues/8092)." diff --git a/changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml b/changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml deleted file mode 100644 index f36c145d74..0000000000 --- a/changelogs/fragments/8116-java_cert-enable-owner-group-mode-args.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - java_cert - enable ``owner``, ``group``, ``mode``, and other generic file arguments (https://github.com/ansible-collections/community.general/pull/8116). \ No newline at end of file diff --git a/changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml b/changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml deleted file mode 100644 index 47f8af9ac3..0000000000 --- a/changelogs/fragments/8118-fix-bond-slave-honoring-mtu.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - allow setting ``MTU`` for ``bond-slave`` interface types (https://github.com/ansible-collections/community.general/pull/8118). diff --git a/changelogs/fragments/8126-filesystem-bcachefs-support.yaml b/changelogs/fragments/8126-filesystem-bcachefs-support.yaml deleted file mode 100644 index 32ff5c64da..0000000000 --- a/changelogs/fragments/8126-filesystem-bcachefs-support.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - filesystem - add bcachefs support (https://github.com/ansible-collections/community.general/pull/8126). diff --git a/changelogs/fragments/8133-add-error-message-for-linode-inventory-plugin.yaml b/changelogs/fragments/8133-add-error-message-for-linode-inventory-plugin.yaml deleted file mode 100644 index 755d7ed4fe..0000000000 --- a/changelogs/fragments/8133-add-error-message-for-linode-inventory-plugin.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - linode inventory plugin - add descriptive error message for linode inventory plugin (https://github.com/ansible-collections/community.general/pull/8133). - diff --git a/changelogs/fragments/8151-fix-lsvg_cmd-failed.yml b/changelogs/fragments/8151-fix-lsvg_cmd-failed.yml deleted file mode 100644 index 0eeee752df..0000000000 --- a/changelogs/fragments/8151-fix-lsvg_cmd-failed.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - aix_filesystem - fix ``_validate_vg`` not passing VG name to ``lsvg_cmd`` (https://github.com/ansible-collections/community.general/issues/8151). diff --git a/changelogs/fragments/8153-java_cert-add-cert_content-arg.yml b/changelogs/fragments/8153-java_cert-add-cert_content-arg.yml deleted file mode 100644 index 40ae1f84a4..0000000000 --- a/changelogs/fragments/8153-java_cert-add-cert_content-arg.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - java_cert - add ``cert_content`` argument (https://github.com/ansible-collections/community.general/pull/8153). diff --git a/changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml b/changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml deleted file mode 100644 index d1fb344ba5..0000000000 --- a/changelogs/fragments/8154-add-ovs-commands-to-nmcli-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - adds OpenvSwitch support with new ``type`` values ``ovs-port``, ``ovs-interface``, and ``ovs-bridge``, and new ``slave_type`` value ``ovs-port`` (https://github.com/ansible-collections/community.general/pull/8154). \ No newline at end of file diff --git a/changelogs/fragments/8158-gitlab-version-check.yml b/changelogs/fragments/8158-gitlab-version-check.yml deleted file mode 100644 index 046bca938f..0000000000 --- a/changelogs/fragments/8158-gitlab-version-check.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "gitlab_issue, gitlab_label, gitlab_milestone - avoid crash during version comparison when the python-gitlab Python module is not installed (https://github.com/ansible-collections/community.general/pull/8158)." diff --git a/changelogs/fragments/8163-redfish-implementing-reset-to-defaults.yml b/changelogs/fragments/8163-redfish-implementing-reset-to-defaults.yml deleted file mode 100644 index 212ecc9fd8..0000000000 --- a/changelogs/fragments/8163-redfish-implementing-reset-to-defaults.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_command - add command ``ResetToDefaults`` to reset manager to default state (https://github.com/ansible-collections/community.general/issues/8163). diff --git a/changelogs/fragments/8166-password-store-lookup-missing-subkey.yml b/changelogs/fragments/8166-password-store-lookup-missing-subkey.yml deleted file mode 100644 index da5be9c9e0..0000000000 --- a/changelogs/fragments/8166-password-store-lookup-missing-subkey.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - passwordstore lookup - add ``missing_subkey`` parameter defining the behavior of the lookup when a passwordstore subkey is missing (https://github.com/ansible-collections/community.general/pull/8166). diff --git a/changelogs/fragments/8169-lxml.yml b/changelogs/fragments/8169-lxml.yml deleted file mode 100644 index e2c1b8b952..0000000000 --- a/changelogs/fragments/8169-lxml.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "xml - make module work with lxml 5.1.1, which removed some internals that the module was relying on (https://github.com/ansible-collections/community.general/pull/8169)." diff --git a/changelogs/fragments/8173-osx_defaults-check_type.yml b/changelogs/fragments/8173-osx_defaults-check_type.yml deleted file mode 100644 index a35f609bf3..0000000000 --- a/changelogs/fragments/8173-osx_defaults-check_type.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - osx_defaults - add option ``check_types`` to enable changing the type of existing defaults on the fly (https://github.com/ansible-collections/community.general/pull/8173). diff --git a/changelogs/fragments/8175-get_ipa_version_regex.yml b/changelogs/fragments/8175-get_ipa_version_regex.yml deleted file mode 100644 index e2a51d1b91..0000000000 --- a/changelogs/fragments/8175-get_ipa_version_regex.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ipa - fix get version regex in IPA module_utils (https://github.com/ansible-collections/community.general/pull/8175). diff --git a/changelogs/fragments/8183-from_ini_to_ini.yml b/changelogs/fragments/8183-from_ini_to_ini.yml deleted file mode 100644 index 1ff455f6ee..0000000000 --- a/changelogs/fragments/8183-from_ini_to_ini.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - "to_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, https://github.com/ansible-collections/community.general/pull/8185)." - - "from_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, https://github.com/ansible-collections/community.general/pull/8185)." diff --git a/changelogs/fragments/8188-bitwarden-add-organization_id.yml b/changelogs/fragments/8188-bitwarden-add-organization_id.yml deleted file mode 100644 index c57ba3a479..0000000000 --- a/changelogs/fragments/8188-bitwarden-add-organization_id.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- bitwarden lookup plugin - add support to filter by organization ID (https://github.com/ansible-collections/community.general/pull/8188). diff --git a/changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml b/changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml deleted file mode 100644 index 6b96d98a7f..0000000000 --- a/changelogs/fragments/8194-redfish-add-multipart-to-capabilities.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_info - add boolean return value ``MultipartHttpPush`` to ``GetFirmwareUpdateCapabilities`` (https://github.com/ansible-collections/community.general/issues/8194, https://github.com/ansible-collections/community.general/pull/8195). diff --git a/changelogs/fragments/8199-added-usb-support-to-proxmox-module.yml b/changelogs/fragments/8199-added-usb-support-to-proxmox-module.yml deleted file mode 100644 index b621fe284c..0000000000 --- a/changelogs/fragments/8199-added-usb-support-to-proxmox-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "proxmox_kvm - adds``usb`` parameter for setting USB devices on proxmox KVM VMs (https://github.com/ansible-collections/community.general/pull/8199)." diff --git a/changelogs/fragments/8211-riak-admin-sub-command-support.yml b/changelogs/fragments/8211-riak-admin-sub-command-support.yml deleted file mode 100644 index dc6eb00e45..0000000000 --- a/changelogs/fragments/8211-riak-admin-sub-command-support.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "riak - support ``riak admin`` sub-command in newer Riak KV versions beside the legacy ``riak-admin`` main command (https://github.com/ansible-collections/community.general/pull/8211)." \ No newline at end of file diff --git a/changelogs/fragments/8215-add-docker-v2-protocol.yml b/changelogs/fragments/8215-add-docker-v2-protocol.yml deleted file mode 100644 index 6a9cc60556..0000000000 --- a/changelogs/fragments/8215-add-docker-v2-protocol.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - keycloak_client, keycloak_clientscope, keycloak_clienttemplate - added ``docker-v2`` protocol support, enhancing alignment with Keycloak's protocol options (https://github.com/ansible-collections/community.general/issues/8215, https://github.com/ansible-collections/community.general/pull/8216). diff --git a/changelogs/fragments/8222-datetime.yml b/changelogs/fragments/8222-datetime.yml deleted file mode 100644 index 00bf862186..0000000000 --- a/changelogs/fragments/8222-datetime.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - "Use offset-aware ``datetime.datetime`` objects (with timezone UTC) instead of offset-naive UTC timestamps, - which are deprecated in Python 3.12 (https://github.com/ansible-collections/community.general/pull/8222)." diff --git a/changelogs/fragments/8223-keycloak_client-additional-normalizations.yaml b/changelogs/fragments/8223-keycloak_client-additional-normalizations.yaml deleted file mode 100644 index 47f7e6bd7b..0000000000 --- a/changelogs/fragments/8223-keycloak_client-additional-normalizations.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_client - add sorted ``defaultClientScopes`` and ``optionalClientScopes`` to normalizations (https://github.com/ansible-collections/community.general/pull/8223). diff --git a/changelogs/fragments/8224-keycloak_realm-add-normalizations.yaml b/changelogs/fragments/8224-keycloak_realm-add-normalizations.yaml deleted file mode 100644 index 0574141f61..0000000000 --- a/changelogs/fragments/8224-keycloak_realm-add-normalizations.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_realm - add normalizations for ``enabledEventTypes`` and ``supportedLocales`` (https://github.com/ansible-collections/community.general/pull/8224). diff --git a/changelogs/fragments/8225-unsafe.yml b/changelogs/fragments/8225-unsafe.yml deleted file mode 100644 index 496797ef74..0000000000 --- a/changelogs/fragments/8225-unsafe.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "inventory plugins - add unsafe wrapper to avoid marking strings that do not contain ``{`` or ``}`` as unsafe, to work around a bug in AWX ((https://github.com/ansible-collections/community.general/issues/8212, https://github.com/ansible-collections/community.general/pull/8225)." diff --git a/changelogs/fragments/8226-mh-vardict.yml b/changelogs/fragments/8226-mh-vardict.yml deleted file mode 100644 index c7c62c7db0..0000000000 --- a/changelogs/fragments/8226-mh-vardict.yml +++ /dev/null @@ -1,10 +0,0 @@ -deprecated_features: - - ModuleHelper vars module_utils - bump deprecation of ``VarMeta``, ``VarDict`` and ``VarsMixin`` to version 11.0.0 (https://github.com/ansible-collections/community.general/pull/8226). - - ModuleHelper module_utils - deprecate use of ``VarsMixin`` in favor of using the ``VardDict`` module_utils (https://github.com/ansible-collections/community.general/pull/8226). -minor_changes: - - gconftool2 - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). - - kernel_blacklist - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). - - opkg - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). - - pipx - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). - - xfconf - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). - - xfconf_info - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226). diff --git a/changelogs/fragments/8236-portage-select-feature.yml b/changelogs/fragments/8236-portage-select-feature.yml deleted file mode 100644 index 742d5cc966..0000000000 --- a/changelogs/fragments/8236-portage-select-feature.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - portage - adds the possibility to explicitely tell portage to write packages to world file (https://github.com/ansible-collections/community.general/issues/6226, https://github.com/ansible-collections/community.general/pull/8236). diff --git a/changelogs/fragments/8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml b/changelogs/fragments/8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml deleted file mode 100644 index b9d80a7cba..0000000000 --- a/changelogs/fragments/8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "bitwarden_secrets_manager lookup plugin - implements retry with exponential backoff to avoid lookup errors when Bitwardn's API rate limiting is encountered (https://github.com/ansible-collections/community.general/issues/8230, https://github.com/ansible-collections/community.general/pull/8238)." diff --git a/changelogs/fragments/8247-apt_rpm-latest.yml b/changelogs/fragments/8247-apt_rpm-latest.yml deleted file mode 100644 index d62fb40340..0000000000 --- a/changelogs/fragments/8247-apt_rpm-latest.yml +++ /dev/null @@ -1,6 +0,0 @@ -minor_changes: - - "apt_rpm - add new states ``latest`` and ``present_not_latest``. The value ``latest`` is equivalent to the current behavior of - ``present``, which will upgrade a package if a newer version exists. ``present_not_latest`` does what most users would expect ``present`` - to do: it does not upgrade if the package is already installed. The current behavior of ``present`` will be deprecated in a later version, - and eventually changed to that of ``present_not_latest`` - (https://github.com/ansible-collections/community.general/issues/8217, https://github.com/ansible-collections/community.general/pull/8247)." diff --git a/changelogs/fragments/8257-ssh-config-hostkey-support-accept-new.yaml b/changelogs/fragments/8257-ssh-config-hostkey-support-accept-new.yaml deleted file mode 100644 index ca1d61aefd..0000000000 --- a/changelogs/fragments/8257-ssh-config-hostkey-support-accept-new.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ssh_config - allow ``accept-new`` as valid value for ``strict_host_key_checking`` (https://github.com/ansible-collections/community.general/pull/8257). diff --git a/changelogs/fragments/8263-apt_rpm-install-check.yml b/changelogs/fragments/8263-apt_rpm-install-check.yml deleted file mode 100644 index ae44616e79..0000000000 --- a/changelogs/fragments/8263-apt_rpm-install-check.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "apt_rpm - when checking whether packages were installed after running ``apt-get -y install ``, only the last package name was checked (https://github.com/ansible-collections/community.general/pull/8263)." diff --git a/changelogs/fragments/8264-run_command.yml b/changelogs/fragments/8264-run_command.yml deleted file mode 100644 index dd66cd6123..0000000000 --- a/changelogs/fragments/8264-run_command.yml +++ /dev/null @@ -1,14 +0,0 @@ -minor_changes: - - "aix_lvol - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "apt_rpm - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "btrfs_subvolume - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "installp - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "lvg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "lvol - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "macports - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "parted - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "pkgin - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "portinstall - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "slackpkg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "svr4pkg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." - - "swdepot - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264)." diff --git a/changelogs/fragments/8274-homebrew-force-formula.yml b/changelogs/fragments/8274-homebrew-force-formula.yml deleted file mode 100644 index 4a9e471f4c..0000000000 --- a/changelogs/fragments/8274-homebrew-force-formula.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "homebrew - adds ``force_formula`` parameter to disambiguate a formula from a cask of the same name (https://github.com/ansible-collections/community.general/issues/8274)." \ No newline at end of file diff --git a/changelogs/fragments/8280-mh-deprecations.yml b/changelogs/fragments/8280-mh-deprecations.yml deleted file mode 100644 index ae70f96b1e..0000000000 --- a/changelogs/fragments/8280-mh-deprecations.yml +++ /dev/null @@ -1,8 +0,0 @@ -deprecated_features: - - MH DependencyCtxMgr module_utils - deprecate ``module_utils.mh.mixin.deps.DependencyCtxMgr`` in favour of ``module_utils.deps`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.AnsibleModule`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.StateMixin`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.DependencyCtxMgr`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarMeta`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarDict,`` (https://github.com/ansible-collections/community.general/pull/8280). - - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarsMixin`` (https://github.com/ansible-collections/community.general/pull/8280). diff --git a/changelogs/fragments/8281-puppet-waitforlock.yaml b/changelogs/fragments/8281-puppet-waitforlock.yaml deleted file mode 100644 index bd8a820170..0000000000 --- a/changelogs/fragments/8281-puppet-waitforlock.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - puppet - new feature to set ``--waitforlock`` option (https://github.com/ansible-collections/community.general/pull/8282). diff --git a/changelogs/fragments/8285-apt_rpm-state-deprecate.yml b/changelogs/fragments/8285-apt_rpm-state-deprecate.yml deleted file mode 100644 index 19f3415841..0000000000 --- a/changelogs/fragments/8285-apt_rpm-state-deprecate.yml +++ /dev/null @@ -1,7 +0,0 @@ -deprecated_features: - - "apt_rpm - the behavior of ``state=present`` and ``state=installed`` is deprecated and will change in community.general 11.0.0. - Right now the module will upgrade a package to the latest version if one of these two states is used. You should explicitly - use ``state=latest`` if you want this behavior, and switch to ``state=present_not_latest`` if you do not want to upgrade the - package if it is already installed. In community.general 11.0.0 the behavior of ``state=present`` and ``state=installed`` will - change to that of ``state=present_not_latest`` (https://github.com/ansible-collections/community.general/issues/8217, - https://github.com/ansible-collections/community.general/pull/8285)." diff --git a/changelogs/fragments/8288-cmdrunner-fmt-list-len-limits.yml b/changelogs/fragments/8288-cmdrunner-fmt-list-len-limits.yml deleted file mode 100644 index 94de04740b..0000000000 --- a/changelogs/fragments/8288-cmdrunner-fmt-list-len-limits.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - cmd_runner module_utils - add validation for minimum and maximum length in the value passed to ``cmd_runner_fmt.as_list()`` (https://github.com/ansible-collections/community.general/pull/8288). diff --git a/changelogs/fragments/8289-python-runner.yml b/changelogs/fragments/8289-python-runner.yml deleted file mode 100644 index 97a45fd8f3..0000000000 --- a/changelogs/fragments/8289-python-runner.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - PythonRunner module utils - specialisation of ``CmdRunner`` to execute Python scripts (https://github.com/ansible-collections/community.general/pull/8289). diff --git a/changelogs/fragments/8290-gandi-livedns-personal-access-token.yml b/changelogs/fragments/8290-gandi-livedns-personal-access-token.yml deleted file mode 100644 index 3168bf20fd..0000000000 --- a/changelogs/fragments/8290-gandi-livedns-personal-access-token.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gandi_livedns - adds support for personal access tokens (https://github.com/ansible-collections/community.general/issues/7639, https://github.com/ansible-collections/community.general/pull/8337). diff --git a/changelogs/fragments/8303-fix-rendering-foreign-variables.yaml b/changelogs/fragments/8303-fix-rendering-foreign-variables.yaml deleted file mode 100644 index c2162771f2..0000000000 --- a/changelogs/fragments/8303-fix-rendering-foreign-variables.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "merge_variables lookup plugin - fixing cross host merge: providing access to foreign hosts variables to the perspective of the host that is performing the merge (https://github.com/ansible-collections/community.general/pull/8303)." diff --git a/changelogs/fragments/8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml b/changelogs/fragments/8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml deleted file mode 100644 index df4a892733..0000000000 --- a/changelogs/fragments/8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - keycloak_user_federation - fix diff of empty ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/8320). diff --git a/changelogs/fragments/8321-fix-opentelemetry-callback.yml b/changelogs/fragments/8321-fix-opentelemetry-callback.yml deleted file mode 100644 index a02f12c6b9..0000000000 --- a/changelogs/fragments/8321-fix-opentelemetry-callback.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opentelemetry - add support for HTTP trace_exporter and configures the behavior via ``OTEL_EXPORTER_OTLP_TRACES_PROTOCOL`` (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8321). diff --git a/changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml b/changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml deleted file mode 100644 index d29aed5ae4..0000000000 --- a/changelogs/fragments/8323-refactor-homebrew-logic-module-utils.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "homebrew, homebrew_cask - refactor common argument validation logic into a dedicated ``homebrew`` module utils (https://github.com/ansible-collections/community.general/issues/8323, https://github.com/ansible-collections/community.general/pull/8324)." \ No newline at end of file diff --git a/changelogs/fragments/8334-proxmox-action-group.yml b/changelogs/fragments/8334-proxmox-action-group.yml deleted file mode 100644 index 0e5aeeccde..0000000000 --- a/changelogs/fragments/8334-proxmox-action-group.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "proxmox* modules - there is now a ``community.general.proxmox`` module defaults group that can be used to set default options for all Proxmox modules (https://github.com/ansible-collections/community.general/pull/8334)." diff --git a/changelogs/fragments/8355-keycloak-idp-sanitize.yaml b/changelogs/fragments/8355-keycloak-idp-sanitize.yaml deleted file mode 100644 index 3a7942bb88..0000000000 --- a/changelogs/fragments/8355-keycloak-idp-sanitize.yaml +++ /dev/null @@ -1,2 +0,0 @@ -security_fixes: - - keycloak_identity_provider - the client secret was not correctly sanitized by the module. The return values ``proposed``, ``existing``, and ``end_state``, as well as the diff, did contain the client secret unmasked (https://github.com/ansible-collections/community.general/pull/8355). \ No newline at end of file diff --git a/changelogs/fragments/8363-opentelemetry-export-to-a-file.yml b/changelogs/fragments/8363-opentelemetry-export-to-a-file.yml deleted file mode 100644 index b62521ec9f..0000000000 --- a/changelogs/fragments/8363-opentelemetry-export-to-a-file.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - opentelemetry - add support for exporting spans in a file via ``ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE`` (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8363). diff --git a/changelogs/fragments/8367-fix-close-span-if-no-logs.yaml b/changelogs/fragments/8367-fix-close-span-if-no-logs.yaml deleted file mode 100644 index e0a90be311..0000000000 --- a/changelogs/fragments/8367-fix-close-span-if-no-logs.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "opentelemetry callback plugin - close spans always (https://github.com/ansible-collections/community.general/pull/8367)." diff --git a/changelogs/fragments/8373-honour-disable-logs.yaml b/changelogs/fragments/8373-honour-disable-logs.yaml deleted file mode 100644 index 112b10a9f4..0000000000 --- a/changelogs/fragments/8373-honour-disable-logs.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - opentelemetry callback plugin - honour the ``disable_logs`` option to avoid storing task results since they are not used regardless (https://github.com/ansible-collections/community.general/pull/8373). - diff --git a/changelogs/fragments/8379-verbose-mode-pkg5.yml b/changelogs/fragments/8379-verbose-mode-pkg5.yml deleted file mode 100644 index abc1c61dce..0000000000 --- a/changelogs/fragments/8379-verbose-mode-pkg5.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pkg5 - add support for non-silent execution (https://github.com/ansible-collections/community.general/issues/8379, https://github.com/ansible-collections/community.general/pull/8382). diff --git a/changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml b/changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml deleted file mode 100644 index b9c35cd0e4..0000000000 --- a/changelogs/fragments/8383-deprecate-gitlab-basic-auth.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - "gitlab modules - the basic auth method on GitLab API have been deprecated and will be removed in community.general 10.0.0 (https://github.com/ansible-collections/community.general/pull/8383)." diff --git a/changelogs/fragments/9.0.0.yml b/changelogs/fragments/9.0.0.yml deleted file mode 100644 index 8de366f74c..0000000000 --- a/changelogs/fragments/9.0.0.yml +++ /dev/null @@ -1 +0,0 @@ -release_summary: This is release 9.0.0 of `community.general`, released on 2024-05-20. diff --git a/changelogs/fragments/add-ipa-sudorule-deny-cmd.yml b/changelogs/fragments/add-ipa-sudorule-deny-cmd.yml deleted file mode 100644 index 2d5dc6205c..0000000000 --- a/changelogs/fragments/add-ipa-sudorule-deny-cmd.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_sudorule - adds options to include denied commands or command groups (https://github.com/ansible-collections/community.general/pull/7415). diff --git a/changelogs/fragments/aix_filesystem-crfs-issue.yml b/changelogs/fragments/aix_filesystem-crfs-issue.yml deleted file mode 100644 index 6b3ddfb0d6..0000000000 --- a/changelogs/fragments/aix_filesystem-crfs-issue.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - aix_filesystem - fix issue with empty list items in crfs logic and option order (https://github.com/ansible-collections/community.general/pull/8052). diff --git a/changelogs/fragments/bitwarden-lookup-performance.yaml b/changelogs/fragments/bitwarden-lookup-performance.yaml deleted file mode 100644 index cb0405b1cb..0000000000 --- a/changelogs/fragments/bitwarden-lookup-performance.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "bitwarden lookup plugin - when looking for items using an item ID, the item is now accessed directly with ``bw get item`` instead of searching through all items. This doubles the lookup speed (https://github.com/ansible-collections/community.general/pull/7468)." diff --git a/changelogs/fragments/hipchat.yml b/changelogs/fragments/hipchat.yml deleted file mode 100644 index 0260c09c84..0000000000 --- a/changelogs/fragments/hipchat.yml +++ /dev/null @@ -1,4 +0,0 @@ -deprecated_features: - - "hipchat callback plugin - the hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. - The callback plugin is therefore deprecated and will be removed from community.general 10.0.0 if nobody provides compelling reasons to still keep it - (https://github.com/ansible-collections/community.general/issues/8184, https://github.com/ansible-collections/community.general/pull/8189)." diff --git a/changelogs/fragments/internal-redirects.yml b/changelogs/fragments/internal-redirects.yml deleted file mode 100644 index 23ce456d4e..0000000000 --- a/changelogs/fragments/internal-redirects.yml +++ /dev/null @@ -1,5 +0,0 @@ -removed_features: - - "The deprecated redirects for internal module names have been removed. - These internal redirects were extra-long FQCNs like ``community.general.packaging.os.apt_rpm`` that redirect to the short FQCN ``community.general.apt_rpm``. - They were originally needed to implement flatmapping; as various tooling started to recommend users to use the long names flatmapping was removed from the collection - and redirects were added for users who already followed these incorrect recommendations (https://github.com/ansible-collections/community.general/pull/7835)." diff --git a/changelogs/fragments/inventory-rce.yml b/changelogs/fragments/inventory-rce.yml deleted file mode 100644 index 9eee6dff52..0000000000 --- a/changelogs/fragments/inventory-rce.yml +++ /dev/null @@ -1,6 +0,0 @@ -security_fixes: - - "cobbler, gitlab_runners, icinga2, linode, lxd, nmap, online, opennebula, proxmox, scaleway, stackpath_compute, virtualbox, - and xen_orchestra inventory plugin - make sure all data received from the remote servers is marked as unsafe, so remote - code execution by obtaining texts that can be evaluated as templates is not possible - (https://www.die-welt.net/2024/03/remote-code-execution-in-ansible-dynamic-inventory-plugins/, - https://github.com/ansible-collections/community.general/pull/8098)." diff --git a/changelogs/fragments/lxd-instance-not-found-avoid-false-positives.yml b/changelogs/fragments/lxd-instance-not-found-avoid-false-positives.yml deleted file mode 100644 index 03ac8ee01b..0000000000 --- a/changelogs/fragments/lxd-instance-not-found-avoid-false-positives.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "lxd connection plugin - tighten the detection logic for lxd ``Instance not found`` errors, to avoid false detection on unrelated errors such as ``/usr/bin/python3: not found`` (https://github.com/ansible-collections/community.general/pull/7521)." diff --git a/changelogs/fragments/lxd-instances-api-endpoint-added.yml b/changelogs/fragments/lxd-instances-api-endpoint-added.yml deleted file mode 100644 index 3e7aa3b50e..0000000000 --- a/changelogs/fragments/lxd-instances-api-endpoint-added.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "lxd_container - uses ``/1.0/instances`` API endpoint, if available. Falls back to ``/1.0/containers`` or ``/1.0/virtual-machines``. Fixes issue when using Incus or LXD 5.19 due to migrating to ``/1.0/instances`` endpoint (https://github.com/ansible-collections/community.general/pull/7980)." diff --git a/changelogs/fragments/pacemaker-cluster.yml b/changelogs/fragments/pacemaker-cluster.yml deleted file mode 100644 index 07e1ff3e04..0000000000 --- a/changelogs/fragments/pacemaker-cluster.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - "pacemaker_cluster - actually implement check mode, which the module claims to support. This means that until now the module - also did changes in check mode (https://github.com/ansible-collections/community.general/pull/8081)." diff --git a/changelogs/fragments/pkgin.yml b/changelogs/fragments/pkgin.yml deleted file mode 100644 index 60eff0bfe5..0000000000 --- a/changelogs/fragments/pkgin.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - pkgin - pkgin (pkgsrc package manager used by SmartOS) raises erratic exceptions and spurious ``changed=true`` (https://github.com/ansible-collections/community.general/pull/7971). diff --git a/changelogs/fragments/puppet_lang_force.yml b/changelogs/fragments/puppet_lang_force.yml deleted file mode 100644 index b826c8dba4..0000000000 --- a/changelogs/fragments/puppet_lang_force.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - puppet - add option ``environment_lang`` to set the environment language encoding. Defaults to lang ``C``. It is recommended - to set it to ``C.UTF-8`` or ``en_US.UTF-8`` depending on what is available on your system. (https://github.com/ansible-collections/community.general/issues/8000) diff --git a/changelogs/fragments/remove_deprecated.yml b/changelogs/fragments/remove_deprecated.yml deleted file mode 100644 index e777bf14e2..0000000000 --- a/changelogs/fragments/remove_deprecated.yml +++ /dev/null @@ -1,18 +0,0 @@ -removed_features: - - "rax* modules, rax module utils, rax docs fragment - the Rackspace modules relied on the deprecated package ``pyrax`` and were thus removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "stackdriver - this module relied on HTTPS APIs that do not exist anymore and was thus removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "webfaction_* modules - these modules relied on HTTPS APIs that do not exist anymore and were thus removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "flowdock - this module relied on HTTPS APIs that do not exist anymore and was thus removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "redhat_subscription - the alias ``autosubscribe`` of the ``auto_attach`` option was removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "redhat module utils - the classes ``Rhsm``, ``RhsmPool``, and ``RhsmPools`` have been removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "mh.mixins.deps module utils - the ``DependencyMixin`` has been removed. Use the ``deps`` module utils instead (https://github.com/ansible-collections/community.general/pull/8198)." - - "proxmox - the ``proxmox_default_behavior`` option has been removed (https://github.com/ansible-collections/community.general/pull/8198)." - - "ansible_galaxy_install - the ``ack_ansible29`` and ``ack_min_ansiblecore211`` options have been removed. They no longer had any effect (https://github.com/ansible-collections/community.general/pull/8198)." - - "django_manage - support for the ``command`` values ``cleanup``, ``syncdb``, and ``validate`` were removed. Use ``clearsessions``, ``migrate``, and ``check`` instead, respectively (https://github.com/ansible-collections/community.general/pull/8198)." -deprecated_features: - - "django_manage - the ``ack_venv_creation_deprecation`` option has no more effect and will be removed from community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8198)." -breaking_changes: - - "redfish_command, redfish_config, redfish_info - change the default for ``timeout`` from 10 to 60 (https://github.com/ansible-collections/community.general/pull/8198)." - - "cpanm - the default of the ``mode`` option changed from ``compatibility`` to ``new`` (https://github.com/ansible-collections/community.general/pull/8198)." - - "django_manage - the module will now fail if ``virtualenv`` is specified but no virtual environment exists at that location (https://github.com/ansible-collections/community.general/pull/8198)." - - "django_manage - the module now requires Django >= 4.1 (https://github.com/ansible-collections/community.general/pull/8198)." diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml index 529573606c..f73d0fe012 100644 --- a/docs/docsite/extra-docs.yml +++ b/docs/docsite/extra-docs.yml @@ -14,3 +14,9 @@ sections: - guide_online - guide_packet - guide_scaleway + - title: Developer Guides + toctree: + - guide_deps + - guide_vardict + - guide_cmdrunner + - guide_modulehelper diff --git a/docs/docsite/helper/keep_keys/README.md b/docs/docsite/helper/keep_keys/README.md new file mode 100644 index 0000000000..69a4076ef9 --- /dev/null +++ b/docs/docsite/helper/keep_keys/README.md @@ -0,0 +1,61 @@ + + +# Docs helper. Create RST file. + +The playbook `playbook.yml` writes a RST file that can be used in +docs/docsite/rst. The usage of this helper is recommended but not +mandatory. You can stop reading here and update the RST file manually +if you don't want to use this helper. + +## Run the playbook + +If you want to generate the RST file by this helper fit the variables +in the playbook and the template to your needs. Then, run the play + +```sh +shell> ansible-playbook playbook.yml +``` + +## Copy RST to docs/docsite/rst + +Copy the RST file to `docs/docsite/rst` and remove it from this +directory. + +## Update the checksums + +Substitute the variables and run the below commands + +```sh +shell> sha1sum {{ target_vars }} > {{ target_sha1 }} +shell> sha1sum {{ file_rst }} > {{ file_sha1 }} +``` + +## Playbook explained + +The playbook includes the variable *tests* from the integration tests +and creates the RST file from the template. The playbook will +terminate if: + +* The file with the variable *tests* was changed +* The RST file was changed + +This means that this helper is probably not up to date. + +### The file with the variable *tests* was changed + +This means that somebody updated the integration tests. Review the +changes and update the template if needed. Update the checksum to pass +the integrity test. The playbook message provides you with the +command. + +### The RST file was changed + +This means that somebody updated the RST file manually. Review the +changes and update the template. Update the checksum to pass the +integrity test. The playbook message provides you with the +command. Make sure that the updated template will create identical RST +file. Only then apply your changes. diff --git a/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 b/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 new file mode 100644 index 0000000000..77281549ba --- /dev/null +++ b/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 @@ -0,0 +1,80 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +keep_keys +""""""""" + +Use the filter :ansplugin:`community.general.keep_keys#filter` if you have a list of dictionaries and want to keep certain keys only. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + {{ tests.0.input | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[0:1]|subelements('group') %} +* {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.0.result | to_yaml(indent=2) | indent(5) }} + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.1.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[1:2]|subelements('group') %} +{{ loop.index }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: {{ i.1.mp }} + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.2.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[2:3]|subelements('group') %} +{{ loop.index + 5 }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: {{ i.1.mp }} + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} diff --git a/docs/docsite/helper/keep_keys/keep_keys.rst.sha1 b/docs/docsite/helper/keep_keys/keep_keys.rst.sha1 new file mode 100644 index 0000000000..532c6a192c --- /dev/null +++ b/docs/docsite/helper/keep_keys/keep_keys.rst.sha1 @@ -0,0 +1 @@ +8690afce792abc95693c2f61f743ee27388b1592 ../../rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst diff --git a/docs/docsite/helper/keep_keys/keep_keys.rst.sha1.license b/docs/docsite/helper/keep_keys/keep_keys.rst.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/keep_keys/keep_keys.rst.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/helper/keep_keys/playbook.yml b/docs/docsite/helper/keep_keys/playbook.yml new file mode 100644 index 0000000000..75ef90385b --- /dev/null +++ b/docs/docsite/helper/keep_keys/playbook.yml @@ -0,0 +1,79 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# Create docs REST files +# shell> ansible-playbook playbook.yml +# +# Proofread and copy created *.rst file into the directory +# docs/docsite/rst. Do not add *.rst in this directory to the version +# control. +# +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# community.general/docs/docsite/helper/keep_keys/playbook.yml + +- name: Create RST file for docs/docsite/rst + hosts: localhost + gather_facts: false + + vars: + + plugin: keep_keys + plugin_type: filter + docs_path: + - filter_guide + - abstract_informations + - lists_of_dictionaries + + file_base: "{{ (docs_path + [plugin]) | join('-') }}" + file_rst: ../../rst/{{ file_base }}.rst + file_sha1: "{{ plugin }}.rst.sha1" + + target: "../../../../tests/integration/targets/{{ plugin_type }}_{{ plugin }}" + target_vars: "{{ target }}/vars/main/tests.yml" + target_sha1: tests.yml.sha1 + + tasks: + + - name: Test integrity tests.yml + when: + - integrity | d(true) | bool + - lookup('file', target_sha1) != lookup('pipe', 'sha1sum ' ~ target_vars) + block: + + - name: Changed tests.yml + ansible.builtin.debug: + msg: | + Changed {{ target_vars }} + Review the changes and update {{ target_sha1 }} + shell> sha1sum {{ target_vars }} > {{ target_sha1 }} + + - name: Changed tests.yml end host + ansible.builtin.meta: end_play + + - name: Test integrity RST file + when: + - integrity | d(true) | bool + - lookup('file', file_sha1) != lookup('pipe', 'sha1sum ' ~ file_rst) + block: + + - name: Changed RST file + ansible.builtin.debug: + msg: | + Changed {{ file_rst }} + Review the changes and update {{ file_sha1 }} + shell> sha1sum {{ file_rst }} > {{ file_sha1 }} + + - name: Changed RST file end host + ansible.builtin.meta: end_play + + - name: Include target vars + include_vars: + file: "{{ target_vars }}" + + - name: Create RST file + ansible.builtin.template: + src: "{{ file_base }}.rst.j2" + dest: "{{ file_base }}.rst" diff --git a/docs/docsite/helper/keep_keys/tests.yml.sha1 b/docs/docsite/helper/keep_keys/tests.yml.sha1 new file mode 100644 index 0000000000..fcf41a4347 --- /dev/null +++ b/docs/docsite/helper/keep_keys/tests.yml.sha1 @@ -0,0 +1 @@ +c6fc4ee2017d9222675bcd13cc4f88ba8d14f38d ../../../../tests/integration/targets/filter_keep_keys/vars/main/tests.yml diff --git a/docs/docsite/helper/keep_keys/tests.yml.sha1.license b/docs/docsite/helper/keep_keys/tests.yml.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/keep_keys/tests.yml.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/helper/lists_mergeby/default-common.yml b/docs/docsite/helper/lists_mergeby/default-common.yml index fd874e5c91..4431fe27dc 100644 --- a/docs/docsite/helper/lists_mergeby/default-common.yml +++ b/docs/docsite/helper/lists_mergeby/default-common.yml @@ -2,17 +2,11 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - list1: - - name: foo - extra: true - - name: bar - extra: false - - name: meh - extra: true + - {name: foo, extra: true} + - {name: bar, extra: false} + - {name: meh, extra: true} list2: - - name: foo - path: /foo - - name: baz - path: /baz + - {name: foo, path: /foo} + - {name: baz, path: /baz} diff --git a/docs/docsite/helper/lists_mergeby/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/default-recursive-true.yml index 133c8f2aec..eb83ea82e1 100644 --- a/docs/docsite/helper/lists_mergeby/default-recursive-true.yml +++ b/docs/docsite/helper/lists_mergeby/default-recursive-true.yml @@ -2,14 +2,12 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - list1: - name: myname01 param01: x: default_value y: default_value - list: - - default_value + list: [default_value] - name: myname02 param01: [1, 1, 2, 3] @@ -18,7 +16,6 @@ list2: param01: y: patch_value z: patch_value - list: - - patch_value + list: [patch_value] - name: myname02 - param01: [3, 4, 4, {key: value}] + param01: [3, 4, 4] diff --git a/docs/docsite/helper/lists_mergeby/example-001.yml b/docs/docsite/helper/lists_mergeby/example-001.yml index 0cf6a9b8a7..c27b019e52 100644 --- a/docs/docsite/helper/lists_mergeby/example-001.yml +++ b/docs/docsite/helper/lists_mergeby/example-001.yml @@ -8,7 +8,7 @@ dir: example-001_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-001.out diff --git a/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml index 0604feccbd..8bd8bc8f24 100644 --- a/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml @@ -2,6 +2,5 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ list1| +list3: "{{ list1 | community.general.lists_mergeby(list2, 'name') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-002.yml b/docs/docsite/helper/lists_mergeby/example-002.yml index 5e6e0315df..e164db1251 100644 --- a/docs/docsite/helper/lists_mergeby/example-002.yml +++ b/docs/docsite/helper/lists_mergeby/example-002.yml @@ -8,7 +8,7 @@ dir: example-002_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-002.out diff --git a/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml index 8ad7524072..be6cfcbf31 100644 --- a/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml @@ -2,6 +2,5 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-003.yml b/docs/docsite/helper/lists_mergeby/example-003.yml index 2f93ab8a27..cbc5e43a50 100644 --- a/docs/docsite/helper/lists_mergeby/example-003.yml +++ b/docs/docsite/helper/lists_mergeby/example-003.yml @@ -8,7 +8,7 @@ dir: example-003_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-003.out diff --git a/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml index d5374eece5..2eff5df41a 100644 --- a/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml @@ -2,7 +2,6 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true) }}" diff --git a/docs/docsite/helper/lists_mergeby/example-004.yml b/docs/docsite/helper/lists_mergeby/example-004.yml index 3ef067faf3..68e77dea81 100644 --- a/docs/docsite/helper/lists_mergeby/example-004.yml +++ b/docs/docsite/helper/lists_mergeby/example-004.yml @@ -8,7 +8,7 @@ dir: example-004_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-004.out diff --git a/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml index a054ea1e73..94c8ceed38 100644 --- a/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml @@ -2,8 +2,7 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='keep') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-005.yml b/docs/docsite/helper/lists_mergeby/example-005.yml index 57e7a779d9..b7b81de294 100644 --- a/docs/docsite/helper/lists_mergeby/example-005.yml +++ b/docs/docsite/helper/lists_mergeby/example-005.yml @@ -8,7 +8,7 @@ dir: example-005_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-005.out diff --git a/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml index 3480bf6581..f0d7751f22 100644 --- a/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml @@ -2,8 +2,7 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-006.yml b/docs/docsite/helper/lists_mergeby/example-006.yml index 41fc88e496..1be3becbc0 100644 --- a/docs/docsite/helper/lists_mergeby/example-006.yml +++ b/docs/docsite/helper/lists_mergeby/example-006.yml @@ -8,7 +8,7 @@ dir: example-006_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-006.out diff --git a/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml index 97513b5593..f555c8dcb2 100644 --- a/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml @@ -2,8 +2,7 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-007.yml b/docs/docsite/helper/lists_mergeby/example-007.yml index 3de7158447..8a596ea68e 100644 --- a/docs/docsite/helper/lists_mergeby/example-007.yml +++ b/docs/docsite/helper/lists_mergeby/example-007.yml @@ -8,7 +8,7 @@ dir: example-007_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug|d(false) | bool - template: src: list3.out.j2 dest: example-007.out diff --git a/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml index cb51653b49..d8ad16cf4d 100644 --- a/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml @@ -2,8 +2,7 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append_rp') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-008.yml b/docs/docsite/helper/lists_mergeby/example-008.yml index e33828bf9a..6d5c03bc6d 100644 --- a/docs/docsite/helper/lists_mergeby/example-008.yml +++ b/docs/docsite/helper/lists_mergeby/example-008.yml @@ -8,7 +8,7 @@ dir: example-008_vars - debug: var: list3 - when: debug|d(false)|bool + when: debug | d(false) | bool - template: src: list3.out.j2 dest: example-008.out diff --git a/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml index af7001fc4a..b2051376ea 100644 --- a/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml +++ b/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml @@ -2,8 +2,7 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| +list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend_rp') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-009.yml b/docs/docsite/helper/lists_mergeby/example-009.yml new file mode 100644 index 0000000000..beef5d356c --- /dev/null +++ b/docs/docsite/helper/lists_mergeby/example-009.yml @@ -0,0 +1,14 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: 9. Merge single list by common attribute 'name' + include_vars: + dir: example-009_vars +- debug: + var: list3 + when: debug | d(false) | bool +- template: + src: list3.out.j2 + dest: example-009.out diff --git a/docs/docsite/helper/lists_mergeby/example-009_vars/default-common.yml b/docs/docsite/helper/lists_mergeby/example-009_vars/default-common.yml new file mode 120000 index 0000000000..7ea8984a8d --- /dev/null +++ b/docs/docsite/helper/lists_mergeby/example-009_vars/default-common.yml @@ -0,0 +1 @@ +../default-common.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-009_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-009_vars/list3.yml new file mode 100644 index 0000000000..1708e3bafa --- /dev/null +++ b/docs/docsite/helper/lists_mergeby/example-009_vars/list3.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +list3: "{{ [list1 + list2, []] | + community.general.lists_mergeby('name') }}" diff --git a/docs/docsite/helper/lists_mergeby/examples.yml b/docs/docsite/helper/lists_mergeby/examples.yml index 83b985084e..34ad2d1558 100644 --- a/docs/docsite/helper/lists_mergeby/examples.yml +++ b/docs/docsite/helper/lists_mergeby/examples.yml @@ -4,51 +4,75 @@ # SPDX-License-Identifier: GPL-3.0-or-later examples: - - label: 'In the example below the lists are merged by the attribute ``name``:' + - title: Two lists + description: 'In the example below the lists are merged by the attribute ``name``:' file: example-001_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-001.out lang: 'yaml' - - label: 'It is possible to use a list of lists as an input of the filter:' + - title: List of two lists + description: 'It is possible to use a list of lists as an input of the filter:' file: example-002_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces the same result as in the previous example:' + - title: + description: 'This produces the same result as in the previous example:' file: example-002.out lang: 'yaml' - - label: 'Example ``list_merge=replace`` (default):' + - title: Single list + description: 'It is possible to merge single list:' + file: example-009_vars/list3.yml + lang: 'yaml+jinja' + - title: + description: 'This produces the same result as in the previous example:' + file: example-009.out + lang: 'yaml' + - title: list_merge=replace (default) + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=replace` (default):' file: example-003_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-003.out lang: 'yaml' - - label: 'Example ``list_merge=keep``:' + - title: list_merge=keep + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=keep`:' file: example-004_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-004.out lang: 'yaml' - - label: 'Example ``list_merge=append``:' + - title: list_merge=append + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append`:' file: example-005_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-005.out lang: 'yaml' - - label: 'Example ``list_merge=prepend``:' + - title: list_merge=prepend + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend`:' file: example-006_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-006.out lang: 'yaml' - - label: 'Example ``list_merge=append_rp``:' + - title: list_merge=append_rp + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append_rp`:' file: example-007_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-007.out lang: 'yaml' - - label: 'Example ``list_merge=prepend_rp``:' + - title: list_merge=prepend_rp + description: 'Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend_rp`:' file: example-008_vars/list3.yml lang: 'yaml+jinja' - - label: 'This produces:' + - title: + description: 'This produces:' file: example-008.out lang: 'yaml' diff --git a/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 b/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 index 95a0fafddc..88098683b9 100644 --- a/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 +++ b/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 @@ -4,10 +4,10 @@ SPDX-License-Identifier: GPL-3.0-or-later {% for i in examples %} -{{ i.label }} +{{ i.description }} .. code-block:: {{ i.lang }} - {{ lookup('file', i.file)|indent(2) }} + {{ lookup('file', i.file) | split('\n') | reject('match', '^(#|---)') | join ('\n') | indent(2) }} {% endfor %} diff --git a/docs/docsite/helper/lists_mergeby/extra-vars.yml b/docs/docsite/helper/lists_mergeby/extra-vars.yml new file mode 100644 index 0000000000..0482c7ff29 --- /dev/null +++ b/docs/docsite/helper/lists_mergeby/extra-vars.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +examples_one: true +examples_all: true +merging_lists_of_dictionaries: true diff --git a/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 b/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 index 71d0d5da6c..ad74161dcd 100644 --- a/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 +++ b/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 @@ -6,57 +6,69 @@ Merging lists of dictionaries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the ``lists_mergeby`` filter. +If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the :ansplugin:`community.general.lists_mergeby ` filter. -.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin `. +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See the documentation for the :ansplugin:`community.general.yaml callback plugin `. Let us use the lists below in the following examples: .. code-block:: yaml - {{ lookup('file', 'default-common.yml')|indent(2) }} + {{ lookup('file', 'default-common.yml') | split('\n') | reject('match', '^(#|---)') | join ('\n') | indent(2) }} {% for i in examples[0:2] %} -{{ i.label }} +{% if i.title | d('', true) | length > 0 %} +{{ i.title }} +{{ "%s" % ('"' * i.title|length) }} +{% endif %} +{{ i.description }} .. code-block:: {{ i.lang }} - {{ lookup('file', i.file)|indent(2) }} + {{ lookup('file', i.file) | split('\n') | reject('match', '^(#|---)') | join ('\n') | indent(2) }} {% endfor %} .. versionadded:: 2.0.0 -{% for i in examples[2:4] %} -{{ i.label }} +{% for i in examples[2:6] %} +{% if i.title | d('', true) | length > 0 %} +{{ i.title }} +{{ "%s" % ('"' * i.title|length) }} +{% endif %} +{{ i.description }} .. code-block:: {{ i.lang }} - {{ lookup('file', i.file)|indent(2) }} + {{ lookup('file', i.file) | split('\n') | reject('match', '^(#|---)') | join ('\n') | indent(2) }} {% endfor %} -The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. These parameters are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9. This is available since community.general 4.4.0. +The filter also accepts two optional parameters: :ansopt:`community.general.lists_mergeby#filter:recursive` and :ansopt:`community.general.lists_mergeby#filter:list_merge`. This is available since community.general 4.4.0. **recursive** - Is a boolean, default to ``False``. Should the ``community.general.lists_mergeby`` recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. + Is a boolean, default to ``false``. Should the :ansplugin:`community.general.lists_mergeby#filter` filter recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. **list_merge** - Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. It modifies the behaviour of ``community.general.lists_mergeby`` when the hashes to merge contain arrays/lists. + Is a string, its possible values are :ansval:`replace` (default), :ansval:`keep`, :ansval:`append`, :ansval:`prepend`, :ansval:`append_rp` or :ansval:`prepend_rp`. It modifies the behaviour of :ansplugin:`community.general.lists_mergeby#filter` when the hashes to merge contain arrays/lists. -The examples below set ``recursive=true`` and display the differences among all six options of ``list_merge``. Functionality of the parameters is exactly the same as in the filter ``combine``. See :ref:`Combining hashes/dictionaries ` to learn details about these options. +The examples below set :ansopt:`community.general.lists_mergeby#filter:recursive=true` and display the differences among all six options of :ansopt:`community.general.lists_mergeby#filter:list_merge`. Functionality of the parameters is exactly the same as in the filter :ansplugin:`ansible.builtin.combine#filter`. See :ref:`Combining hashes/dictionaries ` to learn details about these options. Let us use the lists below in the following examples .. code-block:: yaml - {{ lookup('file', 'default-recursive-true.yml')|indent(2) }} + {{ lookup('file', 'default-recursive-true.yml') | split('\n') | reject('match', '^(#|---)') | join ('\n') |indent(2) }} -{% for i in examples[4:16] %} -{{ i.label }} +{% for i in examples[6:] %} +{% if i.title | d('', true) | length > 0 %} +{{ i.title }} +{{ "%s" % ('"' * i.title|length) }} +{% endif %} +{{ i.description }} .. code-block:: {{ i.lang }} - {{ lookup('file', i.file)|indent(2) }} + {{ lookup('file', i.file) | split('\n') | reject('match', '^(#|---)') | join ('\n') |indent(2) }} {% endfor %} diff --git a/docs/docsite/helper/lists_mergeby/list3.out.j2 b/docs/docsite/helper/lists_mergeby/list3.out.j2 index b51f6b8681..a30a5c4ab0 100644 --- a/docs/docsite/helper/lists_mergeby/list3.out.j2 +++ b/docs/docsite/helper/lists_mergeby/list3.out.j2 @@ -4,4 +4,4 @@ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://w SPDX-License-Identifier: GPL-3.0-or-later #} list3: -{{ list3|to_nice_yaml(indent=0) }} + {{ list3 | to_yaml(indent=2, sort_keys=false) | indent(2) }} diff --git a/docs/docsite/helper/lists_mergeby/playbook.yml b/docs/docsite/helper/lists_mergeby/playbook.yml index 793d233485..ab389fa129 100644 --- a/docs/docsite/helper/lists_mergeby/playbook.yml +++ b/docs/docsite/helper/lists_mergeby/playbook.yml @@ -5,7 +5,7 @@ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # 1) Run all examples and create example-XXX.out -# shell> ansible-playbook playbook.yml -e examples=true +# shell> ansible-playbook playbook.yml -e examples_one=true # # 2) Optionally, for testing, create examples_all.rst # shell> ansible-playbook playbook.yml -e examples_all=true @@ -45,18 +45,20 @@ tags: t007 - import_tasks: example-008.yml tags: t008 - when: examples|d(false)|bool + - import_tasks: example-009.yml + tags: t009 + when: examples_one | d(false) | bool - block: - include_vars: examples.yml - template: src: examples_all.rst.j2 dest: examples_all.rst - when: examples_all|d(false)|bool + when: examples_all | d(false) | bool - block: - include_vars: examples.yml - template: src: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 dest: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst - when: merging_lists_of_dictionaries|d(false)|bool + when: merging_lists_of_dictionaries | d(false) | bool diff --git a/docs/docsite/helper/remove_keys/README.md b/docs/docsite/helper/remove_keys/README.md new file mode 100644 index 0000000000..69a4076ef9 --- /dev/null +++ b/docs/docsite/helper/remove_keys/README.md @@ -0,0 +1,61 @@ + + +# Docs helper. Create RST file. + +The playbook `playbook.yml` writes a RST file that can be used in +docs/docsite/rst. The usage of this helper is recommended but not +mandatory. You can stop reading here and update the RST file manually +if you don't want to use this helper. + +## Run the playbook + +If you want to generate the RST file by this helper fit the variables +in the playbook and the template to your needs. Then, run the play + +```sh +shell> ansible-playbook playbook.yml +``` + +## Copy RST to docs/docsite/rst + +Copy the RST file to `docs/docsite/rst` and remove it from this +directory. + +## Update the checksums + +Substitute the variables and run the below commands + +```sh +shell> sha1sum {{ target_vars }} > {{ target_sha1 }} +shell> sha1sum {{ file_rst }} > {{ file_sha1 }} +``` + +## Playbook explained + +The playbook includes the variable *tests* from the integration tests +and creates the RST file from the template. The playbook will +terminate if: + +* The file with the variable *tests* was changed +* The RST file was changed + +This means that this helper is probably not up to date. + +### The file with the variable *tests* was changed + +This means that somebody updated the integration tests. Review the +changes and update the template if needed. Update the checksum to pass +the integrity test. The playbook message provides you with the +command. + +### The RST file was changed + +This means that somebody updated the RST file manually. Review the +changes and update the template. Update the checksum to pass the +integrity test. The playbook message provides you with the +command. Make sure that the updated template will create identical RST +file. Only then apply your changes. diff --git a/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 b/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 new file mode 100644 index 0000000000..62b25c344c --- /dev/null +++ b/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 @@ -0,0 +1,80 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +remove_keys +""""""""""" + +Use the filter :ansplugin:`community.general.remove_keys#filter` if you have a list of dictionaries and want to remove certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + {{ tests.0.input | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[0:1]|subelements('group') %} +* {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.0.result | to_yaml(indent=2) | indent(5) }} + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.1.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[1:2]|subelements('group') %} +{{ loop.index }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: {{ i.1.mp }} + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.2.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[2:3]|subelements('group') %} +{{ loop.index + 5 }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: {{ i.1.mp }} + target: {{ i.1.tt }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} diff --git a/docs/docsite/helper/remove_keys/playbook.yml b/docs/docsite/helper/remove_keys/playbook.yml new file mode 100644 index 0000000000..a2243d992e --- /dev/null +++ b/docs/docsite/helper/remove_keys/playbook.yml @@ -0,0 +1,79 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# Create docs REST files +# shell> ansible-playbook playbook.yml +# +# Proofread and copy created *.rst file into the directory +# docs/docsite/rst. Do not add *.rst in this directory to the version +# control. +# +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# community.general/docs/docsite/helper/remove_keys/playbook.yml + +- name: Create RST file for docs/docsite/rst + hosts: localhost + gather_facts: false + + vars: + + plugin: remove_keys + plugin_type: filter + docs_path: + - filter_guide + - abstract_informations + - lists_of_dictionaries + + file_base: "{{ (docs_path + [plugin]) | join('-') }}" + file_rst: ../../rst/{{ file_base }}.rst + file_sha1: "{{ plugin }}.rst.sha1" + + target: "../../../../tests/integration/targets/{{ plugin_type }}_{{ plugin }}" + target_vars: "{{ target }}/vars/main/tests.yml" + target_sha1: tests.yml.sha1 + + tasks: + + - name: Test integrity tests.yml + when: + - integrity | d(true) | bool + - lookup('file', target_sha1) != lookup('pipe', 'sha1sum ' ~ target_vars) + block: + + - name: Changed tests.yml + ansible.builtin.debug: + msg: | + Changed {{ target_vars }} + Review the changes and update {{ target_sha1 }} + shell> sha1sum {{ target_vars }} > {{ target_sha1 }} + + - name: Changed tests.yml end host + ansible.builtin.meta: end_play + + - name: Test integrity RST file + when: + - integrity | d(true) | bool + - lookup('file', file_sha1) != lookup('pipe', 'sha1sum ' ~ file_rst) + block: + + - name: Changed RST file + ansible.builtin.debug: + msg: | + Changed {{ file_rst }} + Review the changes and update {{ file_sha1 }} + shell> sha1sum {{ file_rst }} > {{ file_sha1 }} + + - name: Changed RST file end host + ansible.builtin.meta: end_play + + - name: Include target vars + include_vars: + file: "{{ target_vars }}" + + - name: Create RST file + ansible.builtin.template: + src: "{{ file_base }}.rst.j2" + dest: "{{ file_base }}.rst" diff --git a/docs/docsite/helper/remove_keys/remove_keys.rst.sha1 b/docs/docsite/helper/remove_keys/remove_keys.rst.sha1 new file mode 100644 index 0000000000..a1c9e18210 --- /dev/null +++ b/docs/docsite/helper/remove_keys/remove_keys.rst.sha1 @@ -0,0 +1 @@ +3cc606b42e3d450cf6323f25930f7c5a591fa086 ../../rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst diff --git a/docs/docsite/helper/remove_keys/remove_keys.rst.sha1.license b/docs/docsite/helper/remove_keys/remove_keys.rst.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/remove_keys/remove_keys.rst.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/helper/remove_keys/tests.yml.sha1 b/docs/docsite/helper/remove_keys/tests.yml.sha1 new file mode 100644 index 0000000000..107a64d73c --- /dev/null +++ b/docs/docsite/helper/remove_keys/tests.yml.sha1 @@ -0,0 +1 @@ +0554335045f02d8c37b824355b0cf86864cee9a5 ../../../../tests/integration/targets/filter_remove_keys/vars/main/tests.yml diff --git a/docs/docsite/helper/remove_keys/tests.yml.sha1.license b/docs/docsite/helper/remove_keys/tests.yml.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/remove_keys/tests.yml.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/helper/replace_keys/README.md b/docs/docsite/helper/replace_keys/README.md new file mode 100644 index 0000000000..69a4076ef9 --- /dev/null +++ b/docs/docsite/helper/replace_keys/README.md @@ -0,0 +1,61 @@ + + +# Docs helper. Create RST file. + +The playbook `playbook.yml` writes a RST file that can be used in +docs/docsite/rst. The usage of this helper is recommended but not +mandatory. You can stop reading here and update the RST file manually +if you don't want to use this helper. + +## Run the playbook + +If you want to generate the RST file by this helper fit the variables +in the playbook and the template to your needs. Then, run the play + +```sh +shell> ansible-playbook playbook.yml +``` + +## Copy RST to docs/docsite/rst + +Copy the RST file to `docs/docsite/rst` and remove it from this +directory. + +## Update the checksums + +Substitute the variables and run the below commands + +```sh +shell> sha1sum {{ target_vars }} > {{ target_sha1 }} +shell> sha1sum {{ file_rst }} > {{ file_sha1 }} +``` + +## Playbook explained + +The playbook includes the variable *tests* from the integration tests +and creates the RST file from the template. The playbook will +terminate if: + +* The file with the variable *tests* was changed +* The RST file was changed + +This means that this helper is probably not up to date. + +### The file with the variable *tests* was changed + +This means that somebody updated the integration tests. Review the +changes and update the template if needed. Update the checksum to pass +the integrity test. The playbook message provides you with the +command. + +### The RST file was changed + +This means that somebody updated the RST file manually. Review the +changes and update the template. Update the checksum to pass the +integrity test. The playbook message provides you with the +command. Make sure that the updated template will create identical RST +file. Only then apply your changes. diff --git a/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 b/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 new file mode 100644 index 0000000000..fb0af32f2f --- /dev/null +++ b/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 @@ -0,0 +1,110 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +replace_keys +"""""""""""" + +Use the filter :ansplugin:`community.general.replace_keys#filter` if you have a list of dictionaries and want to replace certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + {{ tests.0.input | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[0:1]|subelements('group') %} +* {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + target: + {{ i.1.tt | to_yaml(indent=2) | indent(5) }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.0.result | to_yaml(indent=2) | indent(5) }} + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-3 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.1.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[1:2]|subelements('group') %} +{{ loop.index }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: {{ i.1.mp }} + target: + {{ i.1.tt | to_yaml(indent=2) | indent(5) }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +* The results of the below examples 4-5 are the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ tests.2.result | to_yaml(indent=2) | indent(5) }} + +{% for i in tests[2:3]|subelements('group') %} +{{ loop.index + 3 }}. {{ i.1.d }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: {{ i.1.mp }} + target: + {{ i.1.tt | to_yaml(indent=2) | indent(5) }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +{% endfor %} + +{% for i in tests[3:4]|subelements('group') %} +{{ loop.index + 5 }}. {{ i.1.d }} + +.. code-block:: yaml + :emphasize-lines: 1- + + input: + {{ i.0.input | to_yaml(indent=2) | indent(5) }} + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: {{ i.1.mp }} + target: + {{ i.1.tt | to_yaml(indent=2) | indent(5) }} + result: "{{ lookup('file', target ~ '/templates/' ~ i.0.template) }}" + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + {{ i.0.result | to_yaml(indent=2) | indent(5) }} + +{% endfor %} diff --git a/docs/docsite/helper/replace_keys/playbook.yml b/docs/docsite/helper/replace_keys/playbook.yml new file mode 100644 index 0000000000..3619000144 --- /dev/null +++ b/docs/docsite/helper/replace_keys/playbook.yml @@ -0,0 +1,79 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# Create docs REST files +# shell> ansible-playbook playbook.yml +# +# Proofread and copy created *.rst file into the directory +# docs/docsite/rst. Do not add *.rst in this directory to the version +# control. +# +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# community.general/docs/docsite/helper/replace_keys/playbook.yml + +- name: Create RST file for docs/docsite/rst + hosts: localhost + gather_facts: false + + vars: + + plugin: replace_keys + plugin_type: filter + docs_path: + - filter_guide + - abstract_informations + - lists_of_dictionaries + + file_base: "{{ (docs_path + [plugin]) | join('-') }}" + file_rst: ../../rst/{{ file_base }}.rst + file_sha1: "{{ plugin }}.rst.sha1" + + target: "../../../../tests/integration/targets/{{ plugin_type }}_{{ plugin }}" + target_vars: "{{ target }}/vars/main/tests.yml" + target_sha1: tests.yml.sha1 + + tasks: + + - name: Test integrity tests.yml + when: + - integrity | d(true) | bool + - lookup('file', target_sha1) != lookup('pipe', 'sha1sum ' ~ target_vars) + block: + + - name: Changed tests.yml + ansible.builtin.debug: + msg: | + Changed {{ target_vars }} + Review the changes and update {{ target_sha1 }} + shell> sha1sum {{ target_vars }} > {{ target_sha1 }} + + - name: Changed tests.yml end host + ansible.builtin.meta: end_play + + - name: Test integrity RST file + when: + - integrity | d(true) | bool + - lookup('file', file_sha1) != lookup('pipe', 'sha1sum ' ~ file_rst) + block: + + - name: Changed RST file + ansible.builtin.debug: + msg: | + Changed {{ file_rst }} + Review the changes and update {{ file_sha1 }} + shell> sha1sum {{ file_rst }} > {{ file_sha1 }} + + - name: Changed RST file end host + ansible.builtin.meta: end_play + + - name: Include target vars + include_vars: + file: "{{ target_vars }}" + + - name: Create RST file + ansible.builtin.template: + src: "{{ file_base }}.rst.j2" + dest: "{{ file_base }}.rst" diff --git a/docs/docsite/helper/replace_keys/replace_keys.rst.sha1 b/docs/docsite/helper/replace_keys/replace_keys.rst.sha1 new file mode 100644 index 0000000000..2ae692f3cc --- /dev/null +++ b/docs/docsite/helper/replace_keys/replace_keys.rst.sha1 @@ -0,0 +1 @@ +403f23c02ac02b1c3b611cb14f9b3ba59dc3f587 ../../rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst diff --git a/docs/docsite/helper/replace_keys/replace_keys.rst.sha1.license b/docs/docsite/helper/replace_keys/replace_keys.rst.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/replace_keys/replace_keys.rst.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/helper/replace_keys/tests.yml.sha1 b/docs/docsite/helper/replace_keys/tests.yml.sha1 new file mode 100644 index 0000000000..53944ddf74 --- /dev/null +++ b/docs/docsite/helper/replace_keys/tests.yml.sha1 @@ -0,0 +1 @@ +2e54f3528c95cca746d5748f1ed7ada56ad0890e ../../../../tests/integration/targets/filter_replace_keys/vars/main/tests.yml diff --git a/docs/docsite/helper/replace_keys/tests.yml.sha1.license b/docs/docsite/helper/replace_keys/tests.yml.sha1.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/docs/docsite/helper/replace_keys/tests.yml.sha1.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/docs/docsite/links.yml b/docs/docsite/links.yml index bd954c4096..fe41d1d2fd 100644 --- a/docs/docsite/links.yml +++ b/docs/docsite/links.yml @@ -9,6 +9,8 @@ edit_on_github: path_prefix: '' extra_links: + - description: Ask for help + url: https://forum.ansible.com/c/help/6/none - description: Submit a bug report url: https://github.com/ansible-collections/community.general/issues/new?assignees=&labels=&template=bug_report.yml - description: Request a feature @@ -22,6 +24,10 @@ communication: - topic: General usage and support questions network: Libera channel: '#ansible' - mailing_lists: - - topic: Ansible Project List - url: https://groups.google.com/g/ansible-project + forums: + - topic: "Ansible Forum: General usage and support questions" + # The following URL directly points to the "Get Help" section + url: https://forum.ansible.com/c/help/6/none + - topic: "Ansible Forum: Discussions about the collection itself, not for specific modules or plugins" + # The following URL directly points to the "community-general" tag + url: https://forum.ansible.com/tag/community-general diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst new file mode 100644 index 0000000000..488cb2ce7d --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst @@ -0,0 +1,151 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +keep_keys +""""""""" + +Use the filter :ansplugin:`community.general.keep_keys#filter` if you have a list of dictionaries and want to keep certain keys only. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.keep_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + +1. Match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +2. Match keys that start with any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: ['k0', 'k1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +3. Match keys that end with any of the items in target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: ['x0', 'x1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +4. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ['^.*[01]_x.*$'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +5. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*[01]_x.*$ + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0} + - {k0_x0: A1} + + +6. Match keys that equal the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: k0_x0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +7. Match keys that start with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: k0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +8. Match keys that end with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: x0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +9. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*0_x.*$ + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst new file mode 100644 index 0000000000..03d4710f3a --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst @@ -0,0 +1,159 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +remove_keys +""""""""""" + +Use the filter :ansplugin:`community.general.remove_keys#filter` if you have a list of dictionaries and want to remove certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.remove_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k2_x2: [C0] + k3_x3: foo + - k2_x2: [C1] + k3_x3: bar + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k2_x2: [C0] + k3_x3: foo + - k2_x2: [C1] + k3_x3: bar + + +1. Match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +2. Match keys that start with any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: ['k0', 'k1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +3. Match keys that end with any of the items in target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: ['x0', 'x1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +4. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ['^.*[01]_x.*$'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +5. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*[01]_x.*$ + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +6. Match keys that equal the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: k0_x0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +7. Match keys that start with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: k0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +8. Match keys that end with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: x0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +9. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*0_x.*$ + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst new file mode 100644 index 0000000000..ba1bcad502 --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst @@ -0,0 +1,175 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +replace_keys +"""""""""""" + +Use the filter :ansplugin:`community.general.replace_keys#filter` if you have a list of dictionaries and want to replace certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + target: + - {after: a0, before: k0_x0} + - {after: a1, before: k1_x1} + + result: "{{ input | community.general.replace_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - a0: A0 + a1: B0 + k2_x2: [C0] + k3_x3: foo + - a0: A1 + a1: B1 + k2_x2: [C1] + k3_x3: bar + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-3 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - a0: A0 + a1: B0 + k2_x2: [C0] + k3_x3: foo + - a0: A1 + a1: B1 + k2_x2: [C1] + k3_x3: bar + + +1. Replace keys that starts with any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: starts_with + target: + - {after: a0, before: k0} + - {after: a1, before: k1} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +2. Replace keys that ends with any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: ends_with + target: + - {after: a0, before: x0} + - {after: a1, before: x1} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +3. Replace keys that match any regex of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: regex + target: + - {after: a0, before: ^.*0_x.*$} + - {after: a1, before: ^.*1_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 4-5 are the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {X: foo} + - {X: bar} + + +4. If more keys match the same attribute before the last one will be used. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: regex + target: + - {after: X, before: ^.*_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +5. If there are items with equal attribute before the first one will be used. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: regex + target: + - {after: X, before: ^.*_x.*$} + - {after: Y, before: ^.*_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + + +6. If there are more matches for a key the first one will be used. + +.. code-block:: yaml + :emphasize-lines: 1- + + input: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} + + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: starts_with + target: + - {after: X, before: a} + - {after: Y, before: aa} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} + + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst new file mode 100644 index 0000000000..42737c44b7 --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst @@ -0,0 +1,18 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.filter_guide.filter_guide_abstract_informations.lists_of_dicts: + +Lists of dictionaries +^^^^^^^^^^^^^^^^^^^^^ + +Filters to manage keys in a list of dictionaries: + +.. toctree:: + :maxdepth: 1 + + filter_guide-abstract_informations-lists_of_dictionaries-keep_keys + filter_guide-abstract_informations-lists_of_dictionaries-remove_keys + filter_guide-abstract_informations-lists_of_dictionaries-replace_keys diff --git a/docs/docsite/rst/filter_guide_abstract_informations.rst b/docs/docsite/rst/filter_guide_abstract_informations.rst index cac85089a0..818c09f02c 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations.rst @@ -11,6 +11,7 @@ Abstract transformations filter_guide_abstract_informations_dictionaries filter_guide_abstract_informations_grouping + filter_guide-abstract_informations-lists_of_dictionaries filter_guide_abstract_informations_merging_lists_of_dictionaries filter_guide_abstract_informations_lists_helper filter_guide_abstract_informations_counting_elements_in_sequence diff --git a/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst index 06fa79d16a..cafe04e5c4 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst @@ -6,33 +6,30 @@ Merging lists of dictionaries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the :ansplugin:`community.general.lists_mergeby filter `. +If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the :ansplugin:`community.general.lists_mergeby ` filter. -.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin `. +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See the documentation for the :ansplugin:`community.general.yaml callback plugin `. Let us use the lists below in the following examples: .. code-block:: yaml list1: - - name: foo - extra: true - - name: bar - extra: false - - name: meh - extra: true + - {name: foo, extra: true} + - {name: bar, extra: false} + - {name: meh, extra: true} list2: - - name: foo - path: /foo - - name: baz - path: /baz + - {name: foo, path: /foo} + - {name: baz, path: /baz} +Two lists +""""""""" In the example below the lists are merged by the attribute ``name``: .. code-block:: yaml+jinja - list3: "{{ list1| + list3: "{{ list1 | community.general.lists_mergeby(list2, 'name') }}" This produces: @@ -40,24 +37,21 @@ This produces: .. code-block:: yaml list3: - - extra: false - name: bar - - name: baz - path: /baz - - extra: true - name: foo - path: /foo - - extra: true - name: meh + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} .. versionadded:: 2.0.0 +List of two lists +""""""""""""""""" It is possible to use a list of lists as an input of the filter: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name') }}" This produces the same result as in the previous example: @@ -65,15 +59,29 @@ This produces the same result as in the previous example: .. code-block:: yaml list3: - - extra: false - name: bar - - name: baz - path: /baz - - extra: true - name: foo - path: /foo - - extra: true - name: meh + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} + +Single list +""""""""""" +It is possible to merge single list: + +.. code-block:: yaml+jinja + + list3: "{{ [list1 + list2, []] | + community.general.lists_mergeby('name') }}" + +This produces the same result as in the previous example: + +.. code-block:: yaml + + list3: + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} The filter also accepts two optional parameters: :ansopt:`community.general.lists_mergeby#filter:recursive` and :ansopt:`community.general.lists_mergeby#filter:list_merge`. This is available since community.general 4.4.0. @@ -95,8 +103,7 @@ Let us use the lists below in the following examples param01: x: default_value y: default_value - list: - - default_value + list: [default_value] - name: myname02 param01: [1, 1, 2, 3] @@ -105,16 +112,17 @@ Let us use the lists below in the following examples param01: y: patch_value z: patch_value - list: - - patch_value + list: [patch_value] - name: myname02 - param01: [3, 4, 4, {key: value}] + param01: [3, 4, 4] +list_merge=replace (default) +"""""""""""""""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=replace` (default): .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true) }}" @@ -123,25 +131,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4] +list_merge=keep +""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=keep`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='keep') }}" @@ -151,25 +156,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3] +list_merge=append +""""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append') }}" @@ -179,30 +181,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value, patch_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3, 3, 4, 4] +list_merge=prepend +"""""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend') }}" @@ -212,30 +206,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value - - 1 - - 1 - - 2 - - 3 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value, default_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4, 1, 1, 2, 3] +list_merge=append_rp +"""""""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append_rp`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append_rp') }}" @@ -245,29 +231,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value, patch_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3, 4, 4] +list_merge=prepend_rp +""""""""""""""""""""" Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend_rp`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend_rp') }}" @@ -277,21 +256,12 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value - - 1 - - 1 - - 2 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value, default_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4, 1, 1, 2] diff --git a/docs/docsite/rst/guide_cmdrunner.rst b/docs/docsite/rst/guide_cmdrunner.rst new file mode 100644 index 0000000000..f7b70a86e1 --- /dev/null +++ b/docs/docsite/rst/guide_cmdrunner.rst @@ -0,0 +1,499 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_cmdrunner: + + +Command Runner guide +==================== + + +Introduction +^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.cmd_runner`` module util provides the +``CmdRunner`` class to help execute external commands. The class is a wrapper around +the standard ``AnsibleModule.run_command()`` method, handling command arguments, localization setting, +output processing output, check mode, and other features. + +It is even more useful when one command is used in multiple modules, so that you can define all options +in a module util file, and each module uses the same runner with different arguments. + +For the sake of clarity, throughout this guide, unless otherwise specified, we use the term *option* when referring to +Ansible module options, and the term *argument* when referring to the command line arguments for the external command. + + +Quickstart +"""""""""" + +``CmdRunner`` defines a command and a set of coded instructions on how to format +the command-line arguments, in which specific order, for a particular execution. +It relies on ``ansible.module_utils.basic.AnsibleModule.run_command()`` to actually execute the command. +There are other features, see more details throughout this document. + +To use ``CmdRunner`` you must start by creating an object. The example below is a simplified +version of the actual code in :ansplugin:`community.general.ansible_galaxy_install#module`: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + runner = CmdRunner( + module, + command="ansible-galaxy", + arg_formats=dict( + type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), + galaxy_cmd=cmd_runner_fmt.as_list(), + upgrade=cmd_runner_fmt.as_bool("--upgrade"), + requirements_file=cmd_runner_fmt.as_opt_val('-r'), + dest=cmd_runner_fmt.as_opt_val('-p'), + force=cmd_runner_fmt.as_bool("--force"), + no_deps=cmd_runner_fmt.as_bool("--no-deps"), + version=cmd_runner_fmt.as_fixed("--version"), + name=cmd_runner_fmt.as_list(), + ) + ) + +This is meant to be done once, then every time you need to execute the command you create a context and pass values as needed: + +.. code-block:: python + + # Run the command with these arguments, when values exist for them + with runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx: + ctx.run(galaxy_cmd="install", upgrade=upgrade) + + # version is fixed, requires no value + with runner("version") as ctx: + dummy, stdout, dummy = ctx.run() + + # passes arg 'data' to AnsibleModule.run_command() + with runner("type name", data=stdin_data) as ctx: + dummy, stdout, dummy = ctx.run() + + # Another way of expressing it + dummy, stdout, dummy = runner("version").run() + +Note that you can pass values for the arguments when calling ``run()``, otherwise ``CmdRunner`` +uses the module options with the exact same names to provide values for the runner arguments. +If no value is passed and no module option is found for the name specified, then an exception is raised, unless +the argument is using ``cmd_runner_fmt.as_fixed`` as format function like the ``version`` in the example above. +See more about it below. + +In the first example, values of ``type``, ``force``, ``no_deps`` and others +are taken straight from the module, whilst ``galaxy_cmd`` and ``upgrade`` are +passed explicitly. + +.. note:: + + It is not possible to automatically retrieve values of suboptions. + +That generates a resulting command line similar to (example taken from the +output of an integration test): + +.. code-block:: python + + [ + "/bin/ansible-galaxy", + "collection", + "install", + "--upgrade", + "-p", + "", + "netbox.netbox", + ] + + +Argument formats +^^^^^^^^^^^^^^^^ + +As seen in the example, ``CmdRunner`` expects a parameter named ``arg_formats`` +defining how to format each CLI named argument. +An "argument format" is nothing but a function to transform the value of a variable +into something formatted for the command line. + + +Argument format function +"""""""""""""""""""""""" + +An ``arg_format`` function is defined in the form similar to: + +.. code-block:: python + + def func(value): + return ["--some-param-name", value] + +The parameter ``value`` can be of any type - although there are convenience +mechanisms to help handling sequence and mapping objects. + +The result is expected to be of the type ``Sequence[str]`` type (most commonly +``list[str]`` or ``tuple[str]``), otherwise it is considered to be a ``str``, +and it is coerced into ``list[str]``. +This resulting sequence of strings is added to the command line when that +argument is actually used. + +For example, if ``func`` returns: + +- ``["nee", 2, "shruberries"]``, the command line adds arguments ``"nee" "2" "shruberries"``. +- ``2 == 2``, the command line adds argument ``True``. +- ``None``, the command line adds argument ``None``. +- ``[]``, the command line adds no command line argument for that particular argument. + + +Convenience format methods +"""""""""""""""""""""""""" + +In the same module as ``CmdRunner`` there is a class ``cmd_runner_fmt`` which +provides a set of convenience methods that return format functions for common cases. +In the first block of code in the `Quickstart`_ section you can see the importing of +that class: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +The same example shows how to make use of some of them in the instantiation of the ``CmdRunner`` object. +A description of each one of the convenience methods available and examples of how to use them is found below. +In these descriptions ``value`` refers to the single parameter passed to the formatting function. + +- ``cmd_runner_fmt.as_list()`` + This method does not receive any parameter, function returns ``value`` as-is. + + - Creation: + ``cmd_runner_fmt.as_list()`` + - Examples: + +----------------------+---------------------+ + | Value | Outcome | + +======================+=====================+ + | ``["foo", "bar"]`` | ``["foo", "bar"]`` | + +----------------------+---------------------+ + | ``"foobar"`` | ``["foobar"]`` | + +----------------------+---------------------+ + +- ``cmd_runner_fmt.as_bool()`` + This method receives two different parameters: ``args_true`` and ``args_false``, latter being optional. + If the boolean evaluation of ``value`` is ``True``, the format function returns ``args_true``. + If the boolean evaluation is ``False``, then the function returns ``args_false`` if it was provided, or ``[]`` otherwise. + + - Creation (one arg): + ``cmd_runner_fmt.as_bool("--force")`` + - Examples: + +------------+--------------------+ + | Value | Outcome | + +============+====================+ + | ``True`` | ``["--force"]`` | + +------------+--------------------+ + | ``False`` | ``[]`` | + +------------+--------------------+ + - Creation (two args, ``None`` treated as ``False``): + ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it")`` + - Examples: + +------------+----------------------+ + | Value | Outcome | + +============+======================+ + | ``True`` | ``["--relax"]`` | + +------------+----------------------+ + | ``False`` | ``["--dont-do-it"]`` | + +------------+----------------------+ + | | ``["--dont-do-it"]`` | + +------------+----------------------+ + - Creation (two args, ``None`` is ignored): + ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it", ignore_none=True)`` + - Examples: + +------------+----------------------+ + | Value | Outcome | + +============+======================+ + | ``True`` | ``["--relax"]`` | + +------------+----------------------+ + | ``False`` | ``["--dont-do-it"]`` | + +------------+----------------------+ + | | ``[]`` | + +------------+----------------------+ + +- ``cmd_runner_fmt.as_bool_not()`` + This method receives one parameter, which is returned by the function when the boolean evaluation + of ``value`` is ``False``. + + - Creation: + ``cmd_runner_fmt.as_bool_not("--no-deps")`` + - Examples: + +-------------+---------------------+ + | Value | Outcome | + +=============+=====================+ + | ``True`` | ``[]`` | + +-------------+---------------------+ + | ``False`` | ``["--no-deps"]`` | + +-------------+---------------------+ + +- ``cmd_runner_fmt.as_optval()`` + This method receives one parameter ``arg``, the function returns the string concatenation + of ``arg`` and ``value``. + + - Creation: + ``cmd_runner_fmt.as_optval("-i")`` + - Examples: + +---------------+---------------------+ + | Value | Outcome | + +===============+=====================+ + | ``3`` | ``["-i3"]`` | + +---------------+---------------------+ + | ``foobar`` | ``["-ifoobar"]`` | + +---------------+---------------------+ + +- ``cmd_runner_fmt.as_opt_val()`` + This method receives one parameter ``arg``, the function returns ``[arg, value]``. + + - Creation: + ``cmd_runner_fmt.as_opt_val("--name")`` + - Examples: + +--------------+--------------------------+ + | Value | Outcome | + +==============+==========================+ + | ``abc`` | ``["--name", "abc"]`` | + +--------------+--------------------------+ + +- ``cmd_runner_fmt.as_opt_eq_val()`` + This method receives one parameter ``arg``, the function returns the string of the form + ``{arg}={value}``. + + - Creation: + ``cmd_runner_fmt.as_opt_eq_val("--num-cpus")`` + - Examples: + +------------+-------------------------+ + | Value | Outcome | + +============+=========================+ + | ``10`` | ``["--num-cpus=10"]`` | + +------------+-------------------------+ + +- ``cmd_runner_fmt.as_fixed()`` + This method receives one parameter ``arg``, the function expects no ``value`` - if one + is provided then it is ignored. + The function returns ``arg`` as-is. + + - Creation: + ``cmd_runner_fmt.as_fixed("--version")`` + - Examples: + +---------+-----------------------+ + | Value | Outcome | + +=========+=======================+ + | | ``["--version"]`` | + +---------+-----------------------+ + | 57 | ``["--version"]`` | + +---------+-----------------------+ + + - Note: + This is the only special case in which a value can be missing for the formatting function. + The example also comes from the code in `Quickstart`_. + In that case, the module has code to determine the command's version so that it can assert compatibility. + There is no *value* to be passed for that CLI argument. + +- ``cmd_runner_fmt.as_map()`` + This method receives one parameter ``arg`` which must be a dictionary, and an optional parameter ``default``. + The function returns the evaluation of ``arg[value]``. + If ``value not in arg``, then it returns ``default`` if defined, otherwise ``[]``. + + - Creation: + ``cmd_runner_fmt.as_map(dict(a=1, b=2, c=3), default=42)`` + - Examples: + +---------------------+---------------+ + | Value | Outcome | + +=====================+===============+ + | ``"b"`` | ``["2"]`` | + +---------------------+---------------+ + | ``"yabadabadoo"`` | ``["42"]`` | + +---------------------+---------------+ + + - Note: + If ``default`` is not specified, invalid values return an empty list, meaning they are silently ignored. + +- ``cmd_runner_fmt.as_func()`` + This method receives one parameter ``arg`` which is itself is a format function and it must abide by the rules described above. + + - Creation: + ``cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)])`` + - Note: + The outcome for that depends entirely on the function provided by the developer. + + +Other features for argument formatting +"""""""""""""""""""""""""""""""""""""" + +Some additional features are available as decorators: + +- ``cmd_runner_fmt.unpack args()`` + This decorator unpacks the incoming ``value`` as a list of elements. + + For example, in ``ansible_collections.community.general.plugins.module_utils.puppet``, it is used as: + + .. code-block:: python + + @cmd_runner_fmt.unpack_args + def execute_func(execute, manifest): + if execute: + return ["--execute", execute] + else: + return [manifest] + + runner = CmdRunner( + module, + command=_prepare_base_cmd(), + path_prefix=_PUPPET_PATH_PREFIX, + arg_formats=dict( + # ... + _execute=cmd_runner_fmt.as_func(execute_func), + # ... + ), + ) + + Then, in :ansplugin:`community.general.puppet#module` it is put to use with: + + .. code-block:: python + + with runner(args_order) as ctx: + rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']]) + +- ``cmd_runner_fmt.unpack_kwargs()`` + Conversely, this decorator unpacks the incoming ``value`` as a ``dict``-like object. + +- ``cmd_runner_fmt.stack()`` + This decorator assumes ``value`` is a sequence and concatenates the output + of the wrapped function applied to each element of the sequence. + + For example, in :ansplugin:`community.general.django_check#module`, the argument format for ``database`` + is defined as: + + .. code-block:: python + + arg_formats = dict( + # ... + database=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"), + # ... + ) + + When receiving a list ``["abc", "def"]``, the output is: + + .. code-block:: python + + ["--database", "abc", "--database", "def"] + + +Command Runner +^^^^^^^^^^^^^^ + +Settings that can be passed to the ``CmdRunner`` constructor are: + +- ``module: AnsibleModule`` + Module instance. Mandatory parameter. +- ``command: str | list[str]`` + Command to be executed. It can be a single string, the executable name, or a list + of strings containing the executable name as the first element and, optionally, fixed parameters. + Those parameters are used in all executions of the runner. + The *executable* pointed by this parameter (whether itself when ``str`` or its first element when ``list``) is + processed using ``AnsibleModule.get_bin_path()`` *unless* it is an absolute path or contains the character ``/``. +- ``arg_formats: dict`` + Mapping of argument names to formatting functions. +- ``default_args_order: str`` + As the name suggests, a default ordering for the arguments. When + this is passed, the context can be created without specifying ``args_order``. Defaults to ``()``. +- ``check_rc: bool`` + When ``True``, if the return code from the command is not zero, the module exits + with an error. Defaults to ``False``. +- ``path_prefix: list[str]`` + If the command being executed is installed in a non-standard directory path, + additional paths might be provided to search for the executable. Defaults to ``None``. +- ``environ_update: dict`` + Pass additional environment variables to be set during the command execution. + Defaults to ``None``. +- ``force_lang: str`` + It is usually important to force the locale to one specific value, so that responses are consistent and, therefore, parseable. + Please note that using this option (which is enabled by default) overwrites the environment variables ``LANGUAGE`` and ``LC_ALL``. + To disable this mechanism, set this parameter to ``None``. + In community.general 9.1.0 a special value ``auto`` was introduced for this parameter, with the effect + that ``CmdRunner`` then tries to determine the best parseable locale for the runtime. + It should become the default value in the future, but for the time being the default value is ``C``. + +When creating a context, the additional settings that can be passed to the call are: + +- ``args_order: str`` + Establishes the order in which the arguments are rendered in the command line. + This parameter is mandatory unless ``default_args_order`` was provided to the runner instance. +- ``output_process: func`` + Function to transform the output of the executable into different values or formats. + See examples in section below. +- ``check_mode_skip: bool`` + Whether to skip the actual execution of the command when the module is in check mode. + Defaults to ``False``. +- ``check_mode_return: any`` + If ``check_mode_skip=True``, then return this value instead. +- valid named arguments to ``AnsibleModule.run_command()`` + Other than ``args``, any valid argument to ``run_command()`` can be passed when setting up the run context. + For example, ``data`` can be used to send information to the command's standard input. + Or ``cwd`` can be used to run the command inside a specific working directory. + +Additionally, any other valid parameters for ``AnsibleModule.run_command()`` may be passed, but unexpected behavior +might occur if redefining options already present in the runner or its context creation. Use with caution. + + +Processing results +^^^^^^^^^^^^^^^^^^ + +As mentioned, ``CmdRunner`` uses ``AnsibleModule.run_command()`` to execute the external command, +and it passes the return value from that method back to caller. That means that, +by default, the result is going to be a tuple ``(rc, stdout, stderr)``. + +If you need to transform or process that output, you can pass a function to the context, +as the ``output_process`` parameter. It must be a function like: + +.. code-block:: python + + def process(rc, stdout, stderr): + # do some magic + return processed_value # whatever that is + +In that case, the return of ``run()`` is the ``processed_value`` returned by the function. + + +PythonRunner +^^^^^^^^^^^^ + +The ``PythonRunner`` class is a specialized version of ``CmdRunner``, geared towards the execution of +Python scripts. It features two extra and mutually exclusive parameters ``python`` and ``venv`` in its constructor: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner + from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt + + runner = PythonRunner( + module, + command=["-m", "django"], + arg_formats=dict(...), + python="python", + venv="/path/to/some/venv", + ) + +The default value for ``python`` is the string ``python``, and the for ``venv`` it is ``None``. + +The command line produced by such a command with ``python="python3.12"`` is something like: + +.. code-block:: shell + + /usr/bin/python3.12 -m django ... + +And the command line for ``venv="/work/venv"`` is like: + +.. code-block:: shell + + /work/venv/bin/python -m django ... + +You may provide the value of the ``command`` argument as a string (in that case the string is used as a script name) +or as a list, in which case the elements of the list must be valid arguments for the Python interpreter, as in the example above. +See `Command line and environment `_ for more details. + +If the parameter ``python`` is an absolute path, or contains directory separators, such as ``/``, then it is used +as-is, otherwise the runtime ``PATH`` is searched for that command name. + +Other than that, everything else works as in ``CmdRunner``. + +.. versionadded:: 4.8.0 diff --git a/docs/docsite/rst/guide_deps.rst b/docs/docsite/rst/guide_deps.rst new file mode 100644 index 0000000000..4c0c4687a4 --- /dev/null +++ b/docs/docsite/rst/guide_deps.rst @@ -0,0 +1,74 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_deps: + +``deps`` Guide +============== + + +Using ``deps`` +^^^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.deps`` module util simplifies +the importing of code as described in :ref:`Importing and using shared code `. +Please notice that ``deps`` is meant to be used specifically with Ansible modules, and not other types of plugins. + +The same example from the Developer Guide would become: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils import deps + + with deps.declare("foo"): + import foo + +Then in ``main()``, just after the argspec (or anywhere in the code, for that matter), do + +.. code-block:: python + + deps.validate(module) # assuming module is a valid AnsibleModule instance + +By default, ``deps`` will rely on ``ansible.module_utils.basic.missing_required_lib`` to generate +a message about a failing import. That function accepts parameters ``reason`` and ``url``, and +and so does ``deps```: + +.. code-block:: python + + with deps.declare("foo", reason="foo is needed to properly bar", url="https://foo.bar.io"): + import foo + +If you would rather write a custom message instead of using ``missing_required_lib`` then do: + +.. code-block:: python + + with deps.declare("foo", msg="Custom msg explaining why foo is needed"): + import foo + +``deps`` allows for multiple dependencies to be declared: + +.. code-block:: python + + with deps.declare("foo"): + import foo + + with deps.declare("bar"): + import bar + + with deps.declare("doe"): + import doe + +By default, ``deps.validate()`` will check on all the declared dependencies, but if so desired, +they can be validated selectively by doing: + +.. code-block:: python + + deps.validate(module, "foo") # only validates the "foo" dependency + + deps.validate(module, "doe:bar") # only validates the "doe" and "bar" dependencies + + deps.validate(module, "-doe:bar") # validates all dependencies except "doe" and "bar" + +.. versionadded:: 6.1.0 diff --git a/docs/docsite/rst/guide_modulehelper.rst b/docs/docsite/rst/guide_modulehelper.rst new file mode 100644 index 0000000000..68b46e6c94 --- /dev/null +++ b/docs/docsite/rst/guide_modulehelper.rst @@ -0,0 +1,540 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_modulehelper: + +Module Helper guide +=================== + + +Introduction +^^^^^^^^^^^^ + +Writing a module for Ansible is largely described in existing documentation. +However, a good part of that is boilerplate code that needs to be repeated every single time. +That is where ``ModuleHelper`` comes to assistance: a lot of that boilerplate code is done. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.quickstart: + +Quickstart +"""""""""" + +See the `example from Ansible documentation `_ +written with ``ModuleHelper``. +But bear in mind that it does not showcase all of MH's features: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + + class MyTest(ModuleHelper): + module = dict( + argument_spec=dict( + name=dict(type='str', required=True), + new=dict(type='bool', required=False, default=False), + ), + supports_check_mode=True, + ) + use_old_vardict = False + + def __run__(self): + self.vars.original_message = '' + self.vars.message = '' + if self.check_mode: + return + self.vars.original_message = self.vars.name + self.vars.message = 'goodbye' + self.changed = self.vars['new'] + if self.vars.name == "fail me": + self.do_raise("You requested this to fail") + + + def main(): + MyTest.execute() + + + if __name__ == '__main__': + main() + + +Module Helper +^^^^^^^^^^^^^ + +Introduction +"""""""""""" + +``ModuleHelper`` is a wrapper around the standard ``AnsibleModule``, providing extra features and conveniences. +The basic structure of a module using ``ModuleHelper`` is as shown in the +:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart` +section above, but there are more elements that will take part in it. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + class MyTest(ModuleHelper): + output_params = () + change_params = () + diff_params = () + facts_name = None + facts_params = () + use_old_vardict = True + mute_vardict_deprecation = False + module = dict( + argument_spec=dict(...), + # ... + ) + +After importing the ``ModuleHelper`` class, you need to declare your own class extending it. + +.. seealso:: + + There is a variation called ``StateModuleHelper``, which builds on top of the features provided by MH. + See :ref:`ansible_collections.community.general.docsite.guide_modulehelper.statemh` below for more details. + +The easiest way of specifying the module is to create the class variable ``module`` with a dictionary +containing the exact arguments that would be passed as parameters to ``AnsibleModule``. +If you prefer to create the ``AnsibleModule`` object yourself, just assign it to the ``module`` class variable. +MH also accepts a parameter ``module`` in its constructor, if that parameter is used used, +then it will override the class variable. The parameter can either be ``dict`` or ``AnsibleModule`` as well. + +Beyond the definition of the module, there are other variables that can be used to control aspects +of MH's behavior. These variables should be set at the very beginning of the class, and their semantics are +explained through this document. + +The main logic of MH happens in the ``ModuleHelper.run()`` method, which looks like: + +.. code-block:: python + + @module_fails_on_exception + def run(self): + self.__init_module__() + self.__run__() + self.__quit_module__() + output = self.output + if 'failed' not in output: + output['failed'] = False + self.module.exit_json(changed=self.has_changed(), **output) + +The method ``ModuleHelper.__run__()`` must be implemented by the module and most +modules will be able to perform their actions implementing only that MH method. +However, in some cases, you might want to execute actions before or after the main tasks, in which cases +you should implement ``ModuleHelper.__init_module__()`` and ``ModuleHelper.__quit_module__()`` respectively. + +Note that the output comes from ``self.output``, which is a ``@property`` method. +By default, that property will collect all the variables that are marked for output and return them in a dictionary with their values. +Moreover, the default ``self.output`` will also handle Ansible ``facts`` and *diff mode*. +Also note the changed status comes from ``self.has_changed()``, which is usually calculated from variables that are marked +to track changes in their content. + +.. seealso:: + + More details in sections + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput` and + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below. + +.. seealso:: + + See more about the decorator + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco` below. + + +Another way to write the example from the +:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart` +would be: + +.. code-block:: python + + def __init_module__(self): + self.vars.original_message = '' + self.vars.message = '' + + def __run__(self): + if self.check_mode: + return + self.vars.original_message = self.vars.name + self.vars.message = 'goodbye' + self.changed = self.vars['new'] + + def __quit_module__(self): + if self.vars.name == "fail me": + self.do_raise("You requested this to fail") + +Notice that there are no calls to ``module.exit_json()`` nor ``module.fail_json()``: if the module fails, raise an exception. +You can use the convenience method ``self.do_raise()`` or raise the exception as usual in Python to do that. +If no exception is raised, then the module succeeds. + +.. seealso:: + + See more about exceptions in section + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.exceptions` below. + +Ansible modules must have a ``main()`` function and the usual test for ``'__main__'``. When using MH that should look like: + +.. code-block:: python + + def main(): + MyTest.execute() + + + if __name__ == '__main__': + main() + +The class method ``execute()`` is nothing more than a convenience shorcut for: + +.. code-block:: python + + m = MyTest() + m.run() + +Optionally, an ``AnsibleModule`` may be passed as parameter to ``execute()``. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput: + +Parameters, variables, and output +""""""""""""""""""""""""""""""""" + +All the parameters automatically become variables in the ``self.vars`` attribute, which is of the ``VarDict`` type. +By using ``self.vars``, you get a central mechanism to access the parameters but also to expose variables as return values of the module. +As described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, variables in ``VarDict`` have metadata associated to them. +One of the attributes in that metadata marks the variable for output, and MH makes use of that to generate the module's return values. + +.. important:: + + The ``VarDict`` feature described was introduced in community.general 7.1.0, but there was a first + implementation of it embedded within ``ModuleHelper``. + That older implementation is now deprecated and will be removed in community.general 11.0.0. + After community.general 7.1.0, MH modules generate a deprecation message about *using the old VarDict*. + There are two ways to prevent that from happening: + + #. Set ``mute_vardict_deprecation = True`` and the deprecation will be silenced. If the module still uses the old ``VarDict``, + it will not be able to update to community.general 11.0.0 (Spring 2026) upon its release. + #. Set ``use_old_vardict = False`` to make the MH module use the new ``VarDict`` immediatelly. + The new ``VarDict`` and its use is documented and this is the recommended way to handle this. + + .. code-block:: python + + class MyTest(ModuleHelper): + use_old_vardict = False + mute_vardict_deprecation = True + ... + + These two settings are mutually exclusive, but that is not enforced and the behavior when setting both is not specified. + +Contrary to new variables created in ``VarDict``, module parameters are not set for output by default. +If you want to include some module parameters in the output, list them in the ``output_params`` class variable. + +.. code-block:: python + + class MyTest(ModuleHelper): + output_params = ('state', 'name') + ... + +Another neat feature provided by MH by using ``VarDict`` is the automatic tracking of changes when setting the metadata ``change=True``. +Again, to enable this feature for module parameters, you must list them in the ``change_params`` class variable. + +.. code-block:: python + + class MyTest(ModuleHelper): + # example from community.general.xfconf + change_params = ('value', ) + ... + +.. seealso:: + + See more about this in + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below. + +Similarly, if you want to use Ansible's diff mode, you can set the metadata ``diff=True`` and ``diff_params`` for module parameters. +With that, MH will automatically generate the diff output for variables that have changed. + +.. code-block:: python + + class MyTest(ModuleHelper): + diff_params = ('value', ) + + def __run__(self): + # example from community.general.gio_mime + self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True) + +Moreover, if a module is set to return *facts* instead of return values, then again use the metadata ``fact=True`` and ``fact_params`` for module parameters. +Additionally, you must specify ``facts_name``, as in: + +.. code-block:: python + + class VolumeFacts(ModuleHelper): + facts_name = 'volume_facts' + + def __init_module__(self): + self.vars.set("volume", 123, fact=True) + +That generates an Ansible fact like: + +.. code-block:: yaml+jinja + + - name: Obtain volume facts + some.collection.volume_facts: + # parameters + + - name: Print volume facts + debug: + msg: Volume fact is {{ ansible_facts.volume_facts.volume }} + +.. important:: + + If ``facts_name`` is not set, the module does not generate any facts. + + +.. _ansible_collections.community.general.docsite.guide_modulehelper.changes: + +Handling changes +"""""""""""""""" + +In MH there are many ways to indicate change in the module execution. Here they are: + +Tracking changes in variables +----------------------------- + +As explained above, you can enable change tracking in any number of variables in ``self.vars``. +By the end of the module execution, if any of those variables has a value different then the first value assigned to them, +then that will be picked up by MH and signalled as changed at the module output. +See the example below to learn how you can enabled change tracking in variables: + +.. code-block:: python + + # using __init_module__() as example, it works the same in __run__() and __quit_module__() + def __init_module__(self): + # example from community.general.ansible_galaxy_install + self.vars.set("new_roles", {}, change=True) + + # example of "hidden" variable used only to track change in a value from community.general.gconftool2 + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + + # enable change-tracking without assigning value + self.vars.set_meta("new_roles", change=True) + + # if you must forcibly set an initial value to the variable + self.vars.set_meta("new_roles", initial_value=[]) + ... + +If the end value of any variable marked ``change`` is different from its initial value, then MH will return ``changed=True``. + +Indicating changes with ``changed`` +----------------------------------- + +If you want to indicate change directly in the code, then use the ``self.changed`` property in MH. +Beware that this is a ``@property`` method in MH, with both a *getter* and a *setter*. +By default, that hidden field is set to ``False``. + +Effective change +---------------- + +The effective outcome for the module is determined in the ``self.has_changed()`` method, and it consists of the logical *OR* operation +between ``self.changed`` and the change calculated from ``self.vars``. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.exceptions: + +Exceptions +"""""""""" + +In MH, instead of calling ``module.fail_json()`` you can just raise an exception. +The output variables are collected the same way they would be for a successful execution. +However, you can set output variables specifically for that exception, if you so choose. + +.. code-block:: python + + def __init_module__(self): + if not complex_validation(): + self.do_raise("Validation failed!") + + # Or passing output variables + awesomeness = calculate_awesomeness() + if awesomeness > 1000: + self.do_raise("Over awesome, I cannot handle it!", update_output={"awesomeness": awesomeness}) + +All exceptions derived from ``Exception`` are captured and translated into a ``fail_json()`` call. +However, if you do want to call ``self.module.fail_json()`` yourself it will work, +just keep in mind that there will be no automatic handling of output variables in that case. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.statemh: + +StateModuleHelper +^^^^^^^^^^^^^^^^^ + +Many modules use a parameter ``state`` that effectively controls the exact action performed by the module, such as +``state=present`` or ``state=absent`` for installing or removing packages. +By using ``StateModuleHelper`` you can make your code like the excerpt from the ``gconftool2`` below: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + class GConftool(StateModuleHelper): + ... + module = dict( + ... + ) + use_old_vardict = False + + def __init_module__(self): + self.runner = gconftool2_runner(self.module, check_rc=True) + ... + + self.vars.set('previous_value', self._get(), fact=True) + self.vars.set('value_type', self.vars.value_type) + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + self.vars.set_meta('value', initial_value=self.vars.previous_value) + self.vars.set('playbook_value', self.vars.value, fact=True) + + ... + + def state_absent(self): + with self.runner("state key", output_process=self._make_process(False)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', None, fact=True) + self.vars._value = None + + def state_present(self): + with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', self._get(), fact=True) + self.vars._value = self.vars.new_value + +Note that the method ``__run__()`` is implemented in ``StateModuleHelper``, all you need to implement are the methods ``state_``. +In the example above, :ansplugin:`community.general.gconftool2#module` only has two states, ``present`` and ``absent``, thus, ``state_present()`` and ``state_absent()``. + +If the controlling parameter is not called ``state``, like in :ansplugin:`community.general.jira#module` module, just let SMH know about it: + +.. code-block:: python + + class JIRA(StateModuleHelper): + state_param = 'operation' + + def operation_create(self): + ... + + def operation_search(self): + ... + +Lastly, if the module is called with ``state=somevalue`` and the method ``state_somevalue`` +is not implemented, SMH will resort to call a method called ``__state_fallback__()``. +By default, this method will raise a ``ValueError`` indicating the method was not found. +Naturally, you can override that method to write a default implementation, as in :ansplugin:`community.general.locale_gen#module`: + +.. code-block:: python + + def __state_fallback__(self): + if self.vars.state_tracking == self.vars.state: + return + if self.vars.ubuntu_mode: + self.apply_change_ubuntu(self.vars.state, self.vars.name) + else: + self.apply_change(self.vars.state, self.vars.name) + +That module has only the states ``present`` and ``absent`` and the code for both is the one in the fallback method. + +.. note:: + + The name of the fallback method **does not change** if you set a different value of ``state_param``. + + +Other Conveniences +^^^^^^^^^^^^^^^^^^ + +Delegations to AnsibleModule +"""""""""""""""""""""""""""" + +The MH properties and methods below are delegated as-is to the underlying ``AnsibleModule`` instance in ``self.module``: + +- ``check_mode`` +- ``get_bin_path()`` +- ``warn()`` +- ``deprecate()`` + +Additionally, MH will also delegate: + +- ``diff_mode`` to ``self.module._diff`` +- ``verbosity`` to ``self.module._verbosity`` + +Decorators +"""""""""" + +The following decorators should only be used within ``ModuleHelper`` class. + +@cause_changes +-------------- + +This decorator will control whether the outcome of the method will cause the module to signal change in its output. +If the method completes without raising an exception it is considered to have succeeded, otherwise, it will have failed. + +The decorator has a parameter ``when`` that accepts three different values: ``success``, ``failure``, and ``always``. +There are also two legacy parameters, ``on_success`` and ``on_failure``, that will be deprecated, so do not use them. +The value of ``changed`` in the module output will be set to ``True``: + +- ``when="success"`` and the method completes without raising an exception. +- ``when="failure"`` and the method raises an exception. +- ``when="always"``, regardless of the method raising an exception or not. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import cause_changes + + # adapted excerpt from the community.general.jira module + class JIRA(StateModuleHelper): + @cause_changes(when="success") + def operation_create(self): + ... + +If ``when`` has a different value or no parameters are specificied, the decorator will have no effect whatsoever. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco: + +@module_fails_on_exception +-------------------------- + +In a method using this decorator, if an exception is raised, the text message of that exception will be captured +by the decorator and used to call ``self.module.fail_json()``. +In most of the cases there will be no need to use this decorator, because ``ModuleHelper.run()`` already uses it. + +@check_mode_skip +---------------- + +If the module is running in check mode, this decorator will prevent the method from executing. +The return value in that case is ``None``. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import check_mode_skip + + # adapted excerpt from the community.general.locale_gen module + class LocaleGen(StateModuleHelper): + @check_mode_skip + def __state_fallback__(self): + ... + + +@check_mode_skip_returns +------------------------ + +This decorator is similar to the previous one, but the developer can control the return value for the method when running in check mode. +It is used with one of two parameters. One is ``callable`` and the return value in check mode will be ``callable(self, *args, **kwargs)``, +where ``self`` is the ``ModuleHelper`` instance and the union of ``args`` and ``kwargs`` will contain all the parameters passed to the method. + +The other option is to use the parameter ``value``, in which case the method will return ``value`` when in check mode. + + +References +^^^^^^^^^^ + +- `Ansible Developer Guide `_ +- `Creating a module `_ +- `Returning ansible facts `_ +- :ref:`ansible_collections.community.general.docsite.guide_vardict` + + +.. versionadded:: 3.1.0 diff --git a/docs/docsite/rst/guide_vardict.rst b/docs/docsite/rst/guide_vardict.rst new file mode 100644 index 0000000000..f65b09055b --- /dev/null +++ b/docs/docsite/rst/guide_vardict.rst @@ -0,0 +1,176 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_vardict: + +VarDict Guide +============= + +Introduction +^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.vardict`` module util provides the +``VarDict`` class to help manage the module variables. That class is a container for module variables, +especially the ones for which the module must keep track of state changes, and the ones that should +be published as return values. + +Each variable has extra behaviors controlled by associated metadata, simplifying the generation of +output values from the module. + +Quickstart +"""""""""" + +The simplest way of using ``VarDict`` is: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.vardict import VarDict + +Then in ``main()``, or any other function called from there: + +.. code-block:: python + + vars = VarDict() + + # Next 3 statements are equivalent + vars.abc = 123 + vars["abc"] = 123 + vars.set("abc", 123) + + vars.xyz = "bananas" + vars.ghi = False + +And by the time the module is about to exit: + +.. code-block:: python + + results = vars.output() + module.exit_json(**results) + +That makes the return value of the module: + +.. code-block:: javascript + + { + "abc": 123, + "xyz": "bananas", + "ghi": false + } + +Metadata +"""""""" + +The metadata values associated with each variable are: + +- ``output: bool`` - marks the variable for module output as a module return value. +- ``fact: bool`` - marks the variable for module output as an Ansible fact. +- ``verbosity: int`` - sets the minimum level of verbosity for which the variable will be included in the output. +- ``change: bool`` - controls the detection of changes in the variable value. +- ``initial_value: any`` - when using ``change`` and need to forcefully set an intial value to the variable. +- ``diff: bool`` - used along with ``change``, this generates an Ansible-style diff ``dict``. + +See the sections below for more details on how to use the metadata. + + +Using VarDict +^^^^^^^^^^^^^ + +Basic Usage +""""""""""" + +As shown above, variables can be accessed using the ``[]`` operator, as in a ``dict`` object, +and also as an object attribute, such as ``vars.abc``. The form using the ``set()`` +method is special in the sense that you can use it to set metadata values: + +.. code-block:: python + + vars.set("abc", 123, output=False) + vars.set("abc", 123, output=True, change=True) + +Another way to set metadata after the variables have been created is: + +.. code-block:: python + + vars.set_meta("abc", output=False) + vars.set_meta("abc", output=True, change=True, diff=True) + +You can use either operator and attribute forms to access the value of the variable. Other ways to +access its value and its metadata are: + +.. code-block:: python + + print("abc value = {0}".format(vars.var("abc")["value"])) # get the value + print("abc output? {0}".format(vars.get_meta("abc")["output"])) # get the metadata like this + +The names of methods, such as ``set``, ``get_meta``, ``output`` amongst others, are reserved and +cannot be used as variable names. If you try to use a reserved name a ``ValueError`` exception +is raised with the message "Name is reserved". + +Generating output +""""""""""""""""" + +By default, every variable create will be enable for output with minimum verbosity set to zero, in +other words, they will always be in the output by default. + +You can control that when creating the variable for the first time or later in the code: + +.. code-block:: python + + vars.set("internal", x + 4, output=False) + vars.set_meta("internal", output=False) + +You can also set the verbosity of some variable, like: + +.. code-block:: python + + vars.set("abc", x + 4) + vars.set("debug_x", x, verbosity=3) + + results = vars.output(module._verbosity) + module.exit_json(**results) + +If the module was invoked with verbosity lower than 3, then the output will only contain +the variable ``abc``. If running at higher verbosity, as in ``ansible-playbook -vvv``, +then the output will also contain ``debug_x``. + +Generating facts is very similar to regular output, but variables are not marked as facts by default. + +.. code-block:: python + + vars.set("modulefact", x + 4, fact=True) + vars.set("debugfact", x, fact=True, verbosity=3) + + results = vars.output(module._verbosity) + results["ansible_facts"] = {"module_name": vars.facts(module._verbosity)} + module.exit_json(**results) + +Handling change +""""""""""""""" + +You can use ``VarDict`` to determine whether variables have had their values changed. + +.. code-block:: python + + vars.set("abc", 42, change=True) + vars.abc = 90 + + results = vars.output() + results["changed"] = vars.has_changed + module.exit_json(**results) + +If tracking changes in variables, you may want to present the difference between the initial and the final +values of it. For that, you want to use: + +.. code-block:: python + + vars.set("abc", 42, change=True, diff=True) + vars.abc = 90 + + results = vars.output() + results["changed"] = vars.has_changed + results["diff"] = vars.diff() + module.exit_json(**results) + +.. versionadded:: 7.1.0 diff --git a/galaxy.yml b/galaxy.yml index 397e104ca2..daffaf9d17 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 9.0.0 +version: 9.5.4 readme: README.md authors: - Ansible (https://github.com/ansible) diff --git a/meta/runtime.yml b/meta/runtime.yml index edeb53005f..5d4ed8cb89 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -6,6 +6,8 @@ requires_ansible: '>=2.13.0' action_groups: consul: + - consul_agent_check + - consul_agent_service - consul_auth_method - consul_binding_rule - consul_policy @@ -73,6 +75,10 @@ plugin_routing: deprecation: removal_version: 10.0.0 warning_text: Use community.general.consul_token and/or community.general.consul_policy instead. + hipchat: + deprecation: + removal_version: 11.0.0 + warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. rax_cbs_attachments: tombstone: removal_version: 9.0.0 diff --git a/plugins/action/iptables_state.py b/plugins/action/iptables_state.py index 4a27ef8a01..5ea55af58c 100644 --- a/plugins/action/iptables_state.py +++ b/plugins/action/iptables_state.py @@ -88,6 +88,10 @@ class ActionModule(ActionBase): max_timeout = self._connection._play_context.timeout module_args = self._task.args + async_status_args = {} + starter_cmd = None + confirm_cmd = None + if module_args.get('state', None) == 'restored': if not wrap_async: if not check_mode: diff --git a/plugins/become/doas.py b/plugins/become/doas.py index 69e730aad4..761e5e1e95 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: Ansible Core Team options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string ini: - section: privilege_escalation key: become_user @@ -26,7 +27,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_DOAS_USER become_exe: - description: Doas executable + description: Doas executable. + type: string default: doas ini: - section: privilege_escalation @@ -40,7 +42,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_DOAS_EXE become_flags: - description: Options to pass to doas + description: Options to pass to doas. + type: string default: '' ini: - section: privilege_escalation @@ -54,7 +57,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_DOAS_FLAGS become_pass: - description: password for doas prompt + description: Password for doas prompt. + type: string required: false vars: - name: ansible_become_password @@ -68,8 +72,10 @@ DOCUMENTATION = ''' key: password prompt_l10n: description: - - List of localized strings to match for prompt detection - - If empty we'll use the built in one + - List of localized strings to match for prompt detection. + - If empty we will use the built in one. + type: list + elements: string default: [] ini: - section: doas_become_plugin diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py index a358e84e39..d94c684d1f 100644 --- a/plugins/become/dzdo.py +++ b/plugins/become/dzdo.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: Ansible Core Team options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string ini: - section: privilege_escalation key: become_user @@ -26,7 +27,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_DZDO_USER become_exe: - description: Dzdo executable + description: Dzdo executable. + type: string default: dzdo ini: - section: privilege_escalation @@ -40,7 +42,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_DZDO_EXE become_flags: - description: Options to pass to dzdo + description: Options to pass to dzdo. + type: string default: -H -S -n ini: - section: privilege_escalation @@ -54,7 +57,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_DZDO_FLAGS become_pass: - description: Options to pass to dzdo + description: Options to pass to dzdo. + type: string required: false vars: - name: ansible_become_password diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index fa2f66864a..2be1832dc2 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: Ansible Core Team options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string ini: - section: privilege_escalation key: become_user @@ -27,7 +28,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_KSU_USER required: true become_exe: - description: Su executable + description: Su executable. + type: string default: ksu ini: - section: privilege_escalation @@ -41,7 +43,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_KSU_EXE become_flags: - description: Options to pass to ksu + description: Options to pass to ksu. + type: string default: '' ini: - section: privilege_escalation @@ -55,7 +58,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_KSU_FLAGS become_pass: - description: ksu password + description: Ksu password. + type: string required: false vars: - name: ansible_ksu_pass @@ -69,8 +73,10 @@ DOCUMENTATION = ''' key: password prompt_l10n: description: - - List of localized strings to match for prompt detection - - If empty we'll use the built in one + - List of localized strings to match for prompt detection. + - If empty we will use the built in one. + type: list + elements: string default: [] ini: - section: ksu_become_plugin diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py index e2773ed6a5..a0467c2c36 100644 --- a/plugins/become/machinectl.py +++ b/plugins/become/machinectl.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: Ansible Core Team options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string default: '' ini: - section: privilege_escalation @@ -27,7 +28,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_MACHINECTL_USER become_exe: - description: Machinectl executable + description: Machinectl executable. + type: string default: machinectl ini: - section: privilege_escalation @@ -41,7 +43,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_MACHINECTL_EXE become_flags: - description: Options to pass to machinectl + description: Options to pass to machinectl. + type: string default: '' ini: - section: privilege_escalation @@ -55,7 +58,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_MACHINECTL_FLAGS become_pass: - description: Password for machinectl + description: Password for machinectl. + type: string required: false vars: - name: ansible_become_password diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py index 7d1437191e..8a96b75797 100644 --- a/plugins/become/pbrun.py +++ b/plugins/become/pbrun.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: Ansible Core Team options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string default: '' ini: - section: privilege_escalation @@ -27,7 +28,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_PBRUN_USER become_exe: - description: Sudo executable + description: Sudo executable. + type: string default: pbrun ini: - section: privilege_escalation @@ -41,7 +43,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_PBRUN_EXE become_flags: - description: Options to pass to pbrun + description: Options to pass to pbrun. + type: string default: '' ini: - section: privilege_escalation @@ -55,7 +58,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_PBRUN_FLAGS become_pass: - description: Password for pbrun + description: Password for pbrun. + type: string required: false vars: - name: ansible_become_password @@ -68,7 +72,7 @@ DOCUMENTATION = ''' - section: pbrun_become_plugin key: password wrap_exe: - description: Toggle to wrap the command pbrun calls in 'shell -c' or not + description: Toggle to wrap the command pbrun calls in C(shell -c) or not. default: false type: bool ini: diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py index 2468a28a94..d48d622713 100644 --- a/plugins/become/pfexec.py +++ b/plugins/become/pfexec.py @@ -14,9 +14,10 @@ DOCUMENTATION = ''' options: become_user: description: - - User you 'become' to execute the task + - User you 'become' to execute the task. - This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out, but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions. + type: string default: root ini: - section: privilege_escalation @@ -30,7 +31,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_PFEXEC_USER become_exe: - description: Sudo executable + description: Sudo executable. + type: string default: pfexec ini: - section: privilege_escalation @@ -44,7 +46,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_PFEXEC_EXE become_flags: - description: Options to pass to pfexec + description: Options to pass to pfexec. + type: string default: -H -S -n ini: - section: privilege_escalation @@ -58,7 +61,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_PFEXEC_FLAGS become_pass: - description: pfexec password + description: pfexec password. + type: string required: false vars: - name: ansible_become_password @@ -71,7 +75,7 @@ DOCUMENTATION = ''' - section: pfexec_become_plugin key: password wrap_exe: - description: Toggle to wrap the command pfexec calls in 'shell -c' or not + description: Toggle to wrap the command pfexec calls in C(shell -c) or not. default: false type: bool ini: @@ -82,7 +86,7 @@ DOCUMENTATION = ''' env: - name: ANSIBLE_PFEXEC_WRAP_EXECUTION notes: - - This plugin ignores O(become_user) as pfexec uses it's own C(exec_attr) to figure this out. + - This plugin ignores O(become_user) as pfexec uses its own C(exec_attr) to figure this out. ''' from ansible.plugins.become import BecomeBase diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py index 74b633f09a..908c5e759d 100644 --- a/plugins/become/pmrun.py +++ b/plugins/become/pmrun.py @@ -14,6 +14,7 @@ DOCUMENTATION = ''' options: become_exe: description: Sudo executable + type: string default: pmrun ini: - section: privilege_escalation @@ -27,7 +28,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_PMRUN_EXE become_flags: - description: Options to pass to pmrun + description: Options to pass to pmrun. + type: string default: '' ini: - section: privilege_escalation @@ -41,7 +43,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_PMRUN_FLAGS become_pass: - description: pmrun password + description: pmrun password. + type: string required: false vars: - name: ansible_become_password diff --git a/plugins/become/sesu.py b/plugins/become/sesu.py index 5958c1bfca..4dcb837e70 100644 --- a/plugins/become/sesu.py +++ b/plugins/become/sesu.py @@ -13,7 +13,8 @@ DOCUMENTATION = ''' author: ansible (@nekonyuu) options: become_user: - description: User you 'become' to execute the task + description: User you 'become' to execute the task. + type: string default: '' ini: - section: privilege_escalation @@ -27,7 +28,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_USER - name: ANSIBLE_SESU_USER become_exe: - description: sesu executable + description: sesu executable. + type: string default: sesu ini: - section: privilege_escalation @@ -41,7 +43,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_SESU_EXE become_flags: - description: Options to pass to sesu + description: Options to pass to sesu. + type: string default: -H -S -n ini: - section: privilege_escalation @@ -55,7 +58,8 @@ DOCUMENTATION = ''' - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_SESU_FLAGS become_pass: - description: Password to pass to sesu + description: Password to pass to sesu. + type: string required: false vars: - name: ansible_become_password diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py index 60bb2aa517..5454fd2316 100644 --- a/plugins/become/sudosu.py +++ b/plugins/become/sudosu.py @@ -16,6 +16,7 @@ DOCUMENTATION = """ options: become_user: description: User you 'become' to execute the task. + type: string default: root ini: - section: privilege_escalation @@ -30,6 +31,7 @@ DOCUMENTATION = """ - name: ANSIBLE_SUDO_USER become_flags: description: Options to pass to C(sudo). + type: string default: -H -S -n ini: - section: privilege_escalation @@ -44,6 +46,7 @@ DOCUMENTATION = """ - name: ANSIBLE_SUDO_FLAGS become_pass: description: Password to pass to C(sudo). + type: string required: false vars: - name: ansible_become_password @@ -55,6 +58,21 @@ DOCUMENTATION = """ ini: - section: sudo_become_plugin key: password + alt_method: + description: + - Whether to use an alternative method to call C(su). Instead of running C(su -l user /path/to/shell -c command), + it runs C(su -l user -c command). + - Use this when the default one is not working on your system. + required: false + type: boolean + ini: + - section: community.general.sudosu + key: alternative_method + vars: + - name: ansible_sudosu_alt_method + env: + - name: ANSIBLE_SUDOSU_ALT_METHOD + version_added: 9.2.0 """ @@ -89,4 +107,7 @@ class BecomeModule(BecomeBase): if user: user = '%s' % (user) - return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)]) + if self.get_option('alt_method'): + return ' '.join([becomecmd, flags, prompt, "su -l", user, "-c", self._build_success_command(cmd, shell, True)]) + else: + return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)]) diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index 0bc5256b3f..5e066d626c 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -7,44 +7,46 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: memcached - short_description: Use memcached DB for cache +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: memcached +short_description: Use memcached DB for cache +description: + - This cache uses JSON formatted, per host records saved in memcached. +requirements: + - memcache (python lib) +options: + _uri: description: - - This cache uses JSON formatted, per host records saved in memcached. - requirements: - - memcache (python lib) - options: - _uri: - description: - - List of connection information for the memcached DBs - default: ['127.0.0.1:11211'] - type: list - elements: string - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the DB entries - default: ansible_facts - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - List of connection information for the memcached DBs. + default: ['127.0.0.1:11211'] + type: list + elements: string + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the DB entries. + type: string + default: ansible_facts + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + type: integer + # TODO: determine whether it is OK to change to: type: float + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults +""" import collections import os diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py index 06b673921e..60b1ea74e0 100644 --- a/plugins/cache/pickle.py +++ b/plugins/cache/pickle.py @@ -8,38 +8,41 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: pickle - short_description: Pickle formatted files. +DOCUMENTATION = r""" +name: pickle +short_description: Pickle formatted files +description: + - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem. +author: Brian Coca (@bcoca) +options: + _uri: + required: true description: - - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem. - author: Brian Coca (@bcoca) - options: - _uri: - required: true - description: - - Path in which the cache plugin will save the files - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the files - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults -''' + - Path in which the cache plugin will save the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + type: path + _prefix: + description: User defined prefix to use when creating the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + type: string + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: float +""" try: import cPickle as pickle diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index c43b1dbb5e..6ceda73910 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -6,62 +6,67 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: redis - short_description: Use Redis DB for cache +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: redis +short_description: Use Redis DB for cache +description: + - This cache uses JSON formatted, per host records saved in Redis. +requirements: + - redis>=2.4.5 (python lib) +options: + _uri: description: - - This cache uses JSON formatted, per host records saved in Redis. - requirements: - - redis>=2.4.5 (python lib) - options: - _uri: - description: - - A colon separated string of connection information for Redis. - - The format is V(host:port:db:password), for example V(localhost:6379:0:changeme). - - To use encryption in transit, prefix the connection with V(tls://), as in V(tls://localhost:6379:0:changeme). - - To use redis sentinel, use separator V(;), for example V(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0. - required: true - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the DB entries - default: ansible_facts - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _keyset_name: - description: User defined name for cache keyset name. - default: ansible_cache_keys - env: - - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME - ini: - - key: fact_caching_redis_keyset_name - section: defaults - version_added: 1.3.0 - _sentinel_service_name: - description: The redis sentinel service name (or referenced as cluster name). - env: - - name: ANSIBLE_CACHE_REDIS_SENTINEL - ini: - - key: fact_caching_redis_sentinel - section: defaults - version_added: 1.3.0 - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - A colon separated string of connection information for Redis. + - The format is V(host:port:db:password), for example V(localhost:6379:0:changeme). + - To use encryption in transit, prefix the connection with V(tls://), as in V(tls://localhost:6379:0:changeme). + - To use redis sentinel, use separator V(;), for example V(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0. + type: string + required: true + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the DB entries. + type: string + default: ansible_facts + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _keyset_name: + description: User defined name for cache keyset name. + type: string + default: ansible_cache_keys + env: + - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME + ini: + - key: fact_caching_redis_keyset_name + section: defaults + version_added: 1.3.0 + _sentinel_service_name: + description: The redis sentinel service name (or referenced as cluster name). + type: string + env: + - name: ANSIBLE_CACHE_REDIS_SENTINEL + ini: + - key: fact_caching_redis_sentinel + section: defaults + version_added: 1.3.0 + _timeout: + default: 86400 + type: integer + # TODO: determine whether it is OK to change to: type: float + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults +""" import re import time @@ -222,7 +227,7 @@ class CacheModule(BaseCacheModule): def copy(self): # TODO: there is probably a better way to do this in redis - ret = dict([(k, self.get(k)) for k in self.keys()]) + ret = {k: self.get(k) for k in self.keys()} return ret def __getstate__(self): diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py index 3a5ddf3e6f..88cdad2acb 100644 --- a/plugins/cache/yaml.py +++ b/plugins/cache/yaml.py @@ -8,39 +8,42 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: yaml - short_description: YAML formatted files. +DOCUMENTATION = r""" +name: yaml +short_description: YAML formatted files +description: + - This cache uses YAML formatted, per host, files saved to the filesystem. +author: Brian Coca (@bcoca) +options: + _uri: + required: true description: - - This cache uses YAML formatted, per host, files saved to the filesystem. - author: Brian Coca (@bcoca) - options: - _uri: - required: true - description: - - Path in which the cache plugin will save the files - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the files - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - Path in which the cache plugin will save the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + type: string + _prefix: + description: User defined prefix to use when creating the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + type: string + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer + # TODO: determine whether it is OK to change to: type: float +""" import codecs diff --git a/plugins/callback/cgroup_memory_recap.py b/plugins/callback/cgroup_memory_recap.py index d3961bf0c8..d33bc091d1 100644 --- a/plugins/callback/cgroup_memory_recap.py +++ b/plugins/callback/cgroup_memory_recap.py @@ -7,38 +7,41 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cgroup_memory_recap - type: aggregate - requirements: - - whitelist in configuration - - cgroups - short_description: Profiles maximum memory usage of tasks and full execution using cgroups - description: - - This is an ansible callback plugin that profiles maximum memory usage of ansible and individual tasks, and displays a recap at the end using cgroups. - notes: - - Requires ansible to be run from within a cgroup, such as with C(cgexec -g memory:ansible_profile ansible-playbook ...). - - This cgroup should only be used by ansible to get accurate results. - - To create the cgroup, first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile). - options: - max_mem_file: - required: true - description: Path to cgroups C(memory.max_usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes). - env: - - name: CGROUP_MAX_MEM_FILE - ini: - - section: callback_cgroupmemrecap - key: max_mem_file - cur_mem_file: - required: true - description: Path to C(memory.usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes). - env: - - name: CGROUP_CUR_MEM_FILE - ini: - - section: callback_cgroupmemrecap - key: cur_mem_file -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cgroup_memory_recap +type: aggregate +requirements: + - whitelist in configuration + - cgroups +short_description: Profiles maximum memory usage of tasks and full execution using cgroups +description: + - This is an Ansible callback plugin that profiles maximum memory usage of Ansible and individual tasks, and displays a + recap at the end using cgroups. +notes: + - Requires ansible to be run from within a C(cgroup), such as with C(cgexec -g memory:ansible_profile ansible-playbook ...). + - This C(cgroup) should only be used by Ansible to get accurate results. + - To create the C(cgroup), first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile). +options: + max_mem_file: + required: true + description: Path to cgroups C(memory.max_usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes). + type: str + env: + - name: CGROUP_MAX_MEM_FILE + ini: + - section: callback_cgroupmemrecap + key: max_mem_file + cur_mem_file: + required: true + description: Path to C(memory.usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes). + type: str + env: + - name: CGROUP_CUR_MEM_FILE + ini: + - section: callback_cgroupmemrecap + key: cur_mem_file +""" import time import threading diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py index b9558fc064..335a765340 100644 --- a/plugins/callback/context_demo.py +++ b/plugins/callback/context_demo.py @@ -7,17 +7,17 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: context_demo - type: aggregate - short_description: demo callback that adds play/task context - description: - - Displays some play and task context along with normal output. - - This is mostly for demo purposes. - requirements: - - whitelist in configuration -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: context_demo +type: aggregate +short_description: demo callback that adds play/task context +description: + - Displays some play and task context along with normal output. + - This is mostly for demo purposes. +requirements: + - whitelist in configuration +""" from ansible.plugins.callback import CallbackBase diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py index 27adc97a6c..b441ae97f5 100644 --- a/plugins/callback/counter_enabled.py +++ b/plugins/callback/counter_enabled.py @@ -9,20 +9,20 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: counter_enabled - type: stdout - short_description: adds counters to the output items (tasks and hosts/task) - description: - - Use this callback when you need a kind of progress bar on a large environments. - - You will know how many tasks has the playbook to run, and which one is actually running. - - You will know how many hosts may run a task, and which of them is actually running. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout callback in C(ansible.cfg) (C(stdout_callback = counter_enabled)) -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: counter_enabled +type: stdout +short_description: adds counters to the output items (tasks and hosts/task) +description: + - Use this callback when you need a kind of progress bar on a large environments. + - You will know how many tasks has the playbook to run, and which one is actually running. + - You will know how many hosts may run a task, and which of them is actually running. +extends_documentation_fragment: + - default_callback +requirements: + - set as stdout callback in C(ansible.cfg) (C(stdout_callback = counter_enabled)) +""" from ansible import constants as C from ansible.plugins.callback import CallbackBase diff --git a/plugins/callback/default_without_diff.py b/plugins/callback/default_without_diff.py index c138cd4455..8f300d8e4f 100644 --- a/plugins/callback/default_without_diff.py +++ b/plugins/callback/default_without_diff.py @@ -7,23 +7,22 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' - name: default_without_diff - type: stdout - short_description: The default ansible callback without diff output - version_added: 8.4.0 - description: - - This is basically the default ansible callback plugin (P(ansible.builtin.default#callback)) without - showing diff output. This can be useful when using another callback which sends more detailed information - to another service, like the L(ARA, https://ara.recordsansible.org/) callback, and you want diff output - sent to that plugin but not shown on the console output. - author: Felix Fontein (@felixfontein) - extends_documentation_fragment: - - ansible.builtin.default_callback - - ansible.builtin.result_format_callback -''' +DOCUMENTATION = r""" +name: default_without_diff +type: stdout +short_description: The default ansible callback without diff output +version_added: 8.4.0 +description: + - This is basically the default ansible callback plugin (P(ansible.builtin.default#callback)) without showing diff output. + This can be useful when using another callback which sends more detailed information to another service, like the L(ARA, + https://ara.recordsansible.org/) callback, and you want diff output sent to that plugin but not shown on the console output. +author: Felix Fontein (@felixfontein) +extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback +""" -EXAMPLES = r''' +EXAMPLES = r""" # Enable callback in ansible.cfg: ansible_config: | [defaults] @@ -32,7 +31,7 @@ ansible_config: | # Enable callback with environment variables: environment_variable: | ANSIBLE_STDOUT_CALLBACK=community.general.default_without_diff -''' +""" from ansible.plugins.callback.default import CallbackModule as Default diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py index 490705fd27..5757d5115c 100644 --- a/plugins/callback/dense.py +++ b/plugins/callback/dense.py @@ -7,19 +7,19 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: dense type: stdout short_description: minimal stdout output extends_documentation_fragment: -- default_callback + - default_callback description: -- When in verbose mode it will act the same as the default callback. + - When in verbose mode it will act the same as the default callback. author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) requirements: -- set as stdout in configuration -''' + - set as stdout in configuration +""" HAS_OD = False try: diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py index cf9369e4b4..09b6be1e88 100644 --- a/plugins/callback/diy.py +++ b/plugins/callback/diy.py @@ -7,602 +7,597 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' - name: diy - type: stdout - short_description: Customize the output - version_added: 0.2.0 - description: - - Callback plugin that allows you to supply your own custom callback templates to be output. - author: Trevor Highfill (@theque5t) - extends_documentation_fragment: - - default_callback - notes: - - Uses the P(ansible.builtin.default#callback) callback plugin output when a custom callback V(message(msg\)) is not provided. - - Makes the callback event data available via the C(ansible_callback_diy) dictionary, which can be used in the templating context for the options. - The dictionary is only available in the templating context for the options. It is not a variable that is available via the other - various execution contexts, such as playbook, play, task etc. - - Options being set by their respective variable input can only be set using the variable if the variable was set in a context that is available to the - respective callback. - Use the C(ansible_callback_diy) dictionary to see what is available to a callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output - the top level variable names available to the callback. - - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For example, - C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}") - - "**Condition** for all C(msg) options: - if value C(is None or omit), - then the option is not being used. - **Effect**: use of the C(default) callback plugin for output" - - "**Condition** for all C(msg) options: - if value C(is not None and not omit and length is not greater than 0), - then the option is being used without output. - **Effect**: suppress output" - - "**Condition** for all C(msg) options: - if value C(is not None and not omit and length is greater than 0), - then the option is being used with output. - **Effect**: render value as template and output" - - "Valid color values: V(black), V(bright gray), V(blue), V(white), V(green), V(bright blue), V(cyan), V(bright green), V(red), V(bright cyan), - V(purple), V(bright red), V(yellow), V(bright purple), V(dark gray), V(bright yellow), V(magenta), V(bright magenta), V(normal)" - seealso: - - name: default – default Ansible screen output - description: The official documentation on the B(default) callback plugin. - link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html - requirements: - - set as stdout_callback in configuration - options: - on_any_msg: - description: Output to be used for callback on_any. - ini: - - section: callback_diy - key: on_any_msg - env: - - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG - vars: - - name: ansible_callback_diy_on_any_msg - type: str +DOCUMENTATION = r""" +name: diy +type: stdout +short_description: Customize the output +version_added: 0.2.0 +description: + - Callback plugin that allows you to supply your own custom callback templates to be output. +author: Trevor Highfill (@theque5t) +extends_documentation_fragment: + - default_callback +notes: + - Uses the P(ansible.builtin.default#callback) callback plugin output when a custom callback V(message(msg\)) is not provided. + - Makes the callback event data available using the C(ansible_callback_diy) dictionary, which can be used in the templating + context for the options. The dictionary is only available in the templating context for the options. It is not a variable + that is available using the other various execution contexts, such as playbook, play, task, and so on so forth. + - Options being set by their respective variable input can only be set using the variable if the variable was set in a context + that is available to the respective callback. Use the C(ansible_callback_diy) dictionary to see what is available to a + callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output the top level variable names available + to the callback. + - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For + example, C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}"). + - 'B(Condition) for all C(msg) options: if value C(is None or omit), then the option is not being used. B(Effect): use + of the C(default) callback plugin for output.' + - 'B(Condition) for all C(msg) options: if value C(is not None and not omit and length is not greater than 0), then the + option is being used without output. B(Effect): suppress output.' + - 'B(Condition) for all C(msg) options: if value C(is not None and not omit and length is greater than 0), then the option + is being used with output. B(Effect): render value as template and output.' + - 'Valid color values: V(black), V(bright gray), V(blue), V(white), V(green), V(bright blue), V(cyan), V(bright green), + V(red), V(bright cyan), V(purple), V(bright red), V(yellow), V(bright purple), V(dark gray), V(bright yellow), V(magenta), + V(bright magenta), V(normal).' +seealso: + - name: default – default Ansible screen output + description: The official documentation on the B(default) callback plugin. + link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html +requirements: + - set as stdout_callback in configuration +options: + on_any_msg: + description: Output to be used for callback on_any. + ini: + - section: callback_diy + key: on_any_msg + env: + - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG + vars: + - name: ansible_callback_diy_on_any_msg + type: str - on_any_msg_color: - description: - - Output color to be used for O(on_any_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: on_any_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR - vars: - - name: ansible_callback_diy_on_any_msg_color - type: str + on_any_msg_color: + description: + - Output color to be used for O(on_any_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: on_any_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR + vars: + - name: ansible_callback_diy_on_any_msg_color + type: str - runner_on_failed_msg: - description: Output to be used for callback runner_on_failed. - ini: - - section: callback_diy - key: runner_on_failed_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG - vars: - - name: ansible_callback_diy_runner_on_failed_msg - type: str + runner_on_failed_msg: + description: Output to be used for callback runner_on_failed. + ini: + - section: callback_diy + key: runner_on_failed_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG + vars: + - name: ansible_callback_diy_runner_on_failed_msg + type: str - runner_on_failed_msg_color: - description: - - Output color to be used for O(runner_on_failed_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_failed_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_failed_msg_color - type: str + runner_on_failed_msg_color: + description: + - Output color to be used for O(runner_on_failed_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_failed_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_failed_msg_color + type: str - runner_on_ok_msg: - description: Output to be used for callback runner_on_ok. - ini: - - section: callback_diy - key: runner_on_ok_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG - vars: - - name: ansible_callback_diy_runner_on_ok_msg - type: str + runner_on_ok_msg: + description: Output to be used for callback runner_on_ok. + ini: + - section: callback_diy + key: runner_on_ok_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG + vars: + - name: ansible_callback_diy_runner_on_ok_msg + type: str - runner_on_ok_msg_color: - description: - - Output color to be used for O(runner_on_ok_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_ok_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_ok_msg_color - type: str + runner_on_ok_msg_color: + description: + - Output color to be used for O(runner_on_ok_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_ok_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_ok_msg_color + type: str - runner_on_skipped_msg: - description: Output to be used for callback runner_on_skipped. - ini: - - section: callback_diy - key: runner_on_skipped_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG - vars: - - name: ansible_callback_diy_runner_on_skipped_msg - type: str + runner_on_skipped_msg: + description: Output to be used for callback runner_on_skipped. + ini: + - section: callback_diy + key: runner_on_skipped_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG + vars: + - name: ansible_callback_diy_runner_on_skipped_msg + type: str - runner_on_skipped_msg_color: - description: - - Output color to be used for O(runner_on_skipped_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_skipped_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_skipped_msg_color - type: str + runner_on_skipped_msg_color: + description: + - Output color to be used for O(runner_on_skipped_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_skipped_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_skipped_msg_color + type: str - runner_on_unreachable_msg: - description: Output to be used for callback runner_on_unreachable. - ini: - - section: callback_diy - key: runner_on_unreachable_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG - vars: - - name: ansible_callback_diy_runner_on_unreachable_msg - type: str + runner_on_unreachable_msg: + description: Output to be used for callback runner_on_unreachable. + ini: + - section: callback_diy + key: runner_on_unreachable_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG + vars: + - name: ansible_callback_diy_runner_on_unreachable_msg + type: str - runner_on_unreachable_msg_color: - description: - - Output color to be used for O(runner_on_unreachable_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_unreachable_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_unreachable_msg_color - type: str + runner_on_unreachable_msg_color: + description: + - Output color to be used for O(runner_on_unreachable_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_unreachable_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_unreachable_msg_color + type: str - playbook_on_start_msg: - description: Output to be used for callback playbook_on_start. - ini: - - section: callback_diy - key: playbook_on_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_start_msg - type: str + playbook_on_start_msg: + description: Output to be used for callback playbook_on_start. + ini: + - section: callback_diy + key: playbook_on_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_start_msg + type: str - playbook_on_start_msg_color: - description: - - Output color to be used for O(playbook_on_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_start_msg_color - type: str + playbook_on_start_msg_color: + description: + - Output color to be used for O(playbook_on_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_start_msg_color + type: str - playbook_on_notify_msg: - description: Output to be used for callback playbook_on_notify. - ini: - - section: callback_diy - key: playbook_on_notify_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG - vars: - - name: ansible_callback_diy_playbook_on_notify_msg - type: str + playbook_on_notify_msg: + description: Output to be used for callback playbook_on_notify. + ini: + - section: callback_diy + key: playbook_on_notify_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG + vars: + - name: ansible_callback_diy_playbook_on_notify_msg + type: str - playbook_on_notify_msg_color: - description: - - Output color to be used for O(playbook_on_notify_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_notify_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_notify_msg_color - type: str + playbook_on_notify_msg_color: + description: + - Output color to be used for O(playbook_on_notify_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_notify_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_notify_msg_color + type: str - playbook_on_no_hosts_matched_msg: - description: Output to be used for callback playbook_on_no_hosts_matched. - ini: - - section: callback_diy - key: playbook_on_no_hosts_matched_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg - type: str + playbook_on_no_hosts_matched_msg: + description: Output to be used for callback playbook_on_no_hosts_matched. + ini: + - section: callback_diy + key: playbook_on_no_hosts_matched_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg + type: str - playbook_on_no_hosts_matched_msg_color: - description: - - Output color to be used for O(playbook_on_no_hosts_matched_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_no_hosts_matched_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color - type: str + playbook_on_no_hosts_matched_msg_color: + description: + - Output color to be used for O(playbook_on_no_hosts_matched_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_no_hosts_matched_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color + type: str - playbook_on_no_hosts_remaining_msg: - description: Output to be used for callback playbook_on_no_hosts_remaining. - ini: - - section: callback_diy - key: playbook_on_no_hosts_remaining_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg - type: str + playbook_on_no_hosts_remaining_msg: + description: Output to be used for callback playbook_on_no_hosts_remaining. + ini: + - section: callback_diy + key: playbook_on_no_hosts_remaining_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg + type: str - playbook_on_no_hosts_remaining_msg_color: - description: - - Output color to be used for O(playbook_on_no_hosts_remaining_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_no_hosts_remaining_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color - type: str + playbook_on_no_hosts_remaining_msg_color: + description: + - Output color to be used for O(playbook_on_no_hosts_remaining_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_no_hosts_remaining_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color + type: str - playbook_on_task_start_msg: - description: Output to be used for callback playbook_on_task_start. - ini: - - section: callback_diy - key: playbook_on_task_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_task_start_msg - type: str + playbook_on_task_start_msg: + description: Output to be used for callback playbook_on_task_start. + ini: + - section: callback_diy + key: playbook_on_task_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_task_start_msg + type: str - playbook_on_task_start_msg_color: - description: - - Output color to be used for O(playbook_on_task_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_task_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_task_start_msg_color - type: str + playbook_on_task_start_msg_color: + description: + - Output color to be used for O(playbook_on_task_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_task_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_task_start_msg_color + type: str - playbook_on_handler_task_start_msg: - description: Output to be used for callback playbook_on_handler_task_start. - ini: - - section: callback_diy - key: playbook_on_handler_task_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_handler_task_start_msg - type: str + playbook_on_handler_task_start_msg: + description: Output to be used for callback playbook_on_handler_task_start. + ini: + - section: callback_diy + key: playbook_on_handler_task_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_handler_task_start_msg + type: str - playbook_on_handler_task_start_msg_color: - description: - - Output color to be used for O(playbook_on_handler_task_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_handler_task_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color - type: str + playbook_on_handler_task_start_msg_color: + description: + - Output color to be used for O(playbook_on_handler_task_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_handler_task_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color + type: str - playbook_on_vars_prompt_msg: - description: Output to be used for callback playbook_on_vars_prompt. - ini: - - section: callback_diy - key: playbook_on_vars_prompt_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG - vars: - - name: ansible_callback_diy_playbook_on_vars_prompt_msg - type: str + playbook_on_vars_prompt_msg: + description: Output to be used for callback playbook_on_vars_prompt. + ini: + - section: callback_diy + key: playbook_on_vars_prompt_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG + vars: + - name: ansible_callback_diy_playbook_on_vars_prompt_msg + type: str - playbook_on_vars_prompt_msg_color: - description: - - Output color to be used for O(playbook_on_vars_prompt_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_vars_prompt_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color - type: str + playbook_on_vars_prompt_msg_color: + description: + - Output color to be used for O(playbook_on_vars_prompt_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_vars_prompt_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color + type: str - playbook_on_play_start_msg: - description: Output to be used for callback playbook_on_play_start. - ini: - - section: callback_diy - key: playbook_on_play_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_play_start_msg - type: str + playbook_on_play_start_msg: + description: Output to be used for callback playbook_on_play_start. + ini: + - section: callback_diy + key: playbook_on_play_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_play_start_msg + type: str - playbook_on_play_start_msg_color: - description: - - Output color to be used for O(playbook_on_play_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_play_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_play_start_msg_color - type: str + playbook_on_play_start_msg_color: + description: + - Output color to be used for O(playbook_on_play_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_play_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_play_start_msg_color + type: str - playbook_on_stats_msg: - description: Output to be used for callback playbook_on_stats. - ini: - - section: callback_diy - key: playbook_on_stats_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG - vars: - - name: ansible_callback_diy_playbook_on_stats_msg - type: str + playbook_on_stats_msg: + description: Output to be used for callback playbook_on_stats. + ini: + - section: callback_diy + key: playbook_on_stats_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG + vars: + - name: ansible_callback_diy_playbook_on_stats_msg + type: str - playbook_on_stats_msg_color: - description: - - Output color to be used for O(playbook_on_stats_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_stats_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_stats_msg_color - type: str + playbook_on_stats_msg_color: + description: + - Output color to be used for O(playbook_on_stats_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_stats_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_stats_msg_color + type: str - on_file_diff_msg: - description: Output to be used for callback on_file_diff. - ini: - - section: callback_diy - key: on_file_diff_msg - env: - - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG - vars: - - name: ansible_callback_diy_on_file_diff_msg - type: str + on_file_diff_msg: + description: Output to be used for callback on_file_diff. + ini: + - section: callback_diy + key: on_file_diff_msg + env: + - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG + vars: + - name: ansible_callback_diy_on_file_diff_msg + type: str - on_file_diff_msg_color: - description: - - Output color to be used for O(on_file_diff_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: on_file_diff_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR - vars: - - name: ansible_callback_diy_on_file_diff_msg_color - type: str + on_file_diff_msg_color: + description: + - Output color to be used for O(on_file_diff_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: on_file_diff_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR + vars: + - name: ansible_callback_diy_on_file_diff_msg_color + type: str - playbook_on_include_msg: - description: Output to be used for callback playbook_on_include. - ini: - - section: callback_diy - key: playbook_on_include_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG - vars: - - name: ansible_callback_diy_playbook_on_include_msg - type: str + playbook_on_include_msg: + description: Output to be used for callback playbook_on_include. + ini: + - section: callback_diy + key: playbook_on_include_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG + vars: + - name: ansible_callback_diy_playbook_on_include_msg + type: str - playbook_on_include_msg_color: - description: - - Output color to be used for O(playbook_on_include_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_include_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_include_msg_color - type: str + playbook_on_include_msg_color: + description: + - Output color to be used for O(playbook_on_include_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_include_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_include_msg_color + type: str - runner_item_on_ok_msg: - description: Output to be used for callback runner_item_on_ok. - ini: - - section: callback_diy - key: runner_item_on_ok_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG - vars: - - name: ansible_callback_diy_runner_item_on_ok_msg - type: str + runner_item_on_ok_msg: + description: Output to be used for callback runner_item_on_ok. + ini: + - section: callback_diy + key: runner_item_on_ok_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG + vars: + - name: ansible_callback_diy_runner_item_on_ok_msg + type: str - runner_item_on_ok_msg_color: - description: - - Output color to be used for O(runner_item_on_ok_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_ok_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_ok_msg_color - type: str + runner_item_on_ok_msg_color: + description: + - Output color to be used for O(runner_item_on_ok_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_ok_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_ok_msg_color + type: str - runner_item_on_failed_msg: - description: Output to be used for callback runner_item_on_failed. - ini: - - section: callback_diy - key: runner_item_on_failed_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG - vars: - - name: ansible_callback_diy_runner_item_on_failed_msg - type: str + runner_item_on_failed_msg: + description: Output to be used for callback runner_item_on_failed. + ini: + - section: callback_diy + key: runner_item_on_failed_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG + vars: + - name: ansible_callback_diy_runner_item_on_failed_msg + type: str - runner_item_on_failed_msg_color: - description: - - Output color to be used for O(runner_item_on_failed_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_failed_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_failed_msg_color - type: str + runner_item_on_failed_msg_color: + description: + - Output color to be used for O(runner_item_on_failed_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_failed_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_failed_msg_color + type: str - runner_item_on_skipped_msg: - description: Output to be used for callback runner_item_on_skipped. - ini: - - section: callback_diy - key: runner_item_on_skipped_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG - vars: - - name: ansible_callback_diy_runner_item_on_skipped_msg - type: str + runner_item_on_skipped_msg: + description: Output to be used for callback runner_item_on_skipped. + ini: + - section: callback_diy + key: runner_item_on_skipped_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG + vars: + - name: ansible_callback_diy_runner_item_on_skipped_msg + type: str - runner_item_on_skipped_msg_color: - description: - - Output color to be used for O(runner_item_on_skipped_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_skipped_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_skipped_msg_color - type: str + runner_item_on_skipped_msg_color: + description: + - Output color to be used for O(runner_item_on_skipped_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_skipped_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_skipped_msg_color + type: str - runner_retry_msg: - description: Output to be used for callback runner_retry. - ini: - - section: callback_diy - key: runner_retry_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG - vars: - - name: ansible_callback_diy_runner_retry_msg - type: str + runner_retry_msg: + description: Output to be used for callback runner_retry. + ini: + - section: callback_diy + key: runner_retry_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG + vars: + - name: ansible_callback_diy_runner_retry_msg + type: str - runner_retry_msg_color: - description: - - Output color to be used for O(runner_retry_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_retry_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_retry_msg_color - type: str + runner_retry_msg_color: + description: + - Output color to be used for O(runner_retry_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_retry_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_retry_msg_color + type: str - runner_on_start_msg: - description: Output to be used for callback runner_on_start. - ini: - - section: callback_diy - key: runner_on_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG - vars: - - name: ansible_callback_diy_runner_on_start_msg - type: str + runner_on_start_msg: + description: Output to be used for callback runner_on_start. + ini: + - section: callback_diy + key: runner_on_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG + vars: + - name: ansible_callback_diy_runner_on_start_msg + type: str - runner_on_start_msg_color: - description: - - Output color to be used for O(runner_on_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_start_msg_color - type: str + runner_on_start_msg_color: + description: + - Output color to be used for O(runner_on_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_start_msg_color + type: str - runner_on_no_hosts_msg: - description: Output to be used for callback runner_on_no_hosts. - ini: - - section: callback_diy - key: runner_on_no_hosts_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG - vars: - - name: ansible_callback_diy_runner_on_no_hosts_msg - type: str + runner_on_no_hosts_msg: + description: Output to be used for callback runner_on_no_hosts. + ini: + - section: callback_diy + key: runner_on_no_hosts_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG + vars: + - name: ansible_callback_diy_runner_on_no_hosts_msg + type: str - runner_on_no_hosts_msg_color: - description: - - Output color to be used for O(runner_on_no_hosts_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_no_hosts_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_no_hosts_msg_color - type: str + runner_on_no_hosts_msg_color: + description: + - Output color to be used for O(runner_on_no_hosts_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_no_hosts_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_no_hosts_msg_color + type: str - playbook_on_setup_msg: - description: Output to be used for callback playbook_on_setup. - ini: - - section: callback_diy - key: playbook_on_setup_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG - vars: - - name: ansible_callback_diy_playbook_on_setup_msg - type: str + playbook_on_setup_msg: + description: Output to be used for callback playbook_on_setup. + ini: + - section: callback_diy + key: playbook_on_setup_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG + vars: + - name: ansible_callback_diy_playbook_on_setup_msg + type: str - playbook_on_setup_msg_color: - description: - - Output color to be used for O(playbook_on_setup_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_setup_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_setup_msg_color - type: str -''' + playbook_on_setup_msg_color: + description: + - Output color to be used for O(playbook_on_setup_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_setup_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_setup_msg_color + type: str +""" -EXAMPLES = r''' +EXAMPLES = r""" ansible.cfg: > # Enable plugin [defaults] @@ -623,7 +618,7 @@ ansible.cfg: > # Newline after every callback # on_any_msg='{{ " " | join("\n") }}' -playbook.yml: > +playbook.yml: >- --- - name: "Default plugin output: play example" hosts: localhost @@ -782,7 +777,7 @@ playbook.yml: > {{ white }}{{ ansible_callback_diy[key] }} {% endfor %} -''' +""" import sys from contextlib import contextmanager diff --git a/plugins/callback/elastic.py b/plugins/callback/elastic.py index 0c94d1ba33..b9aa6adf94 100644 --- a/plugins/callback/elastic.py +++ b/plugins/callback/elastic.py @@ -5,69 +5,69 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Victor Martinez (@v1v) - name: elastic - type: notification - short_description: Create distributed traces for each Ansible task in Elastic APM - version_added: 3.8.0 +DOCUMENTATION = r""" +author: Victor Martinez (@v1v) +name: elastic +type: notification +short_description: Create distributed traces for each Ansible task in Elastic APM +version_added: 3.8.0 +description: + - This callback creates distributed traces for each Ansible task in Elastic APM. + - You can configure the plugin with environment variables. + - See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html). +options: + hide_task_arguments: + default: false + type: bool description: - - This callback creates distributed traces for each Ansible task in Elastic APM. - - You can configure the plugin with environment variables. - - See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html). - options: - hide_task_arguments: - default: false - type: bool - description: - - Hide the arguments for a task. - env: - - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS - apm_service_name: - default: ansible - type: str - description: - - The service name resource attribute. - env: - - name: ELASTIC_APM_SERVICE_NAME - apm_server_url: - type: str - description: - - Use the APM server and its environment variables. - env: - - name: ELASTIC_APM_SERVER_URL - apm_secret_token: - type: str - description: - - Use the APM server token - env: - - name: ELASTIC_APM_SECRET_TOKEN - apm_api_key: - type: str - description: - - Use the APM API key - env: - - name: ELASTIC_APM_API_KEY - apm_verify_server_cert: - default: true - type: bool - description: - - Verifies the SSL certificate if an HTTPS connection. - env: - - name: ELASTIC_APM_VERIFY_SERVER_CERT - traceparent: - type: str - description: - - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). - env: - - name: TRACEPARENT - requirements: - - elastic-apm (Python library) -''' + - Hide the arguments for a task. + env: + - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS + apm_service_name: + default: ansible + type: str + description: + - The service name resource attribute. + env: + - name: ELASTIC_APM_SERVICE_NAME + apm_server_url: + type: str + description: + - Use the APM server and its environment variables. + env: + - name: ELASTIC_APM_SERVER_URL + apm_secret_token: + type: str + description: + - Use the APM server token. + env: + - name: ELASTIC_APM_SECRET_TOKEN + apm_api_key: + type: str + description: + - Use the APM API key. + env: + - name: ELASTIC_APM_API_KEY + apm_verify_server_cert: + default: true + type: bool + description: + - Verifies the SSL certificate if an HTTPS connection. + env: + - name: ELASTIC_APM_VERIFY_SERVER_CERT + traceparent: + type: str + description: + - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). + env: + - name: TRACEPARENT +requirements: + - elastic-apm (Python library) +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Enable the plugin in ansible.cfg: [defaults] callbacks_enabled = community.general.elastic @@ -76,7 +76,7 @@ examples: | export ELASTIC_APM_SERVER_URL= export ELASTIC_APM_SERVICE_NAME=your_service_name export ELASTIC_APM_API_KEY=your_APM_API_KEY -''' +""" import getpass import socket diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py index afd9e20559..bf0d425303 100644 --- a/plugins/callback/hipchat.py +++ b/plugins/callback/hipchat.py @@ -25,6 +25,7 @@ DOCUMENTATION = ''' options: token: description: HipChat API token for v1 or v2 API. + type: str required: true env: - name: HIPCHAT_TOKEN @@ -33,6 +34,10 @@ DOCUMENTATION = ''' key: token api_version: description: HipChat API version, v1 or v2. + type: str + choices: + - v1 + - v2 required: false default: v1 env: @@ -42,6 +47,7 @@ DOCUMENTATION = ''' key: api_version room: description: HipChat room to post in. + type: str default: ansible env: - name: HIPCHAT_ROOM @@ -50,6 +56,7 @@ DOCUMENTATION = ''' key: room from: description: Name to post as + type: str default: ansible env: - name: HIPCHAT_FROM diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index d2d00496d8..6cd94d03b7 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -7,38 +7,42 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: jabber - type: notification - short_description: post task events to a jabber server - description: - - The chatty part of ChatOps with a Hipchat server as a target. - - This callback plugin sends status updates to a HipChat channel during playbook execution. - requirements: - - xmpp (Python library U(https://github.com/ArchipelProject/xmpppy)) - options: - server: - description: connection info to jabber server - required: true - env: - - name: JABBER_SERV - user: - description: Jabber user to authenticate as - required: true - env: - - name: JABBER_USER - password: - description: Password for the user to the jabber server - required: true - env: - - name: JABBER_PASS - to: - description: chat identifier that will receive the message - required: true - env: - - name: JABBER_TO -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: jabber +type: notification +short_description: post task events to a Jabber server +description: + - The chatty part of ChatOps with a Hipchat server as a target. + - This callback plugin sends status updates to a HipChat channel during playbook execution. +requirements: + - xmpp (Python library U(https://github.com/ArchipelProject/xmpppy)) +options: + server: + description: Connection info to Jabber server. + type: str + required: true + env: + - name: JABBER_SERV + user: + description: Jabber user to authenticate as. + type: str + required: true + env: + - name: JABBER_USER + password: + description: Password for the user to the Jabber server. + type: str + required: true + env: + - name: JABBER_PASS + to: + description: Chat identifier that will receive the message. + type: str + required: true + env: + - name: JABBER_TO +""" import os diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index e99054e176..0b658717f6 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -7,26 +7,27 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: log_plays - type: notification - short_description: write playbook output to log file - description: - - This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory. - requirements: - - Whitelist in configuration - - A writeable C(/var/log/ansible/hosts) directory by the user executing Ansible on the controller - options: - log_folder: - default: /var/log/ansible/hosts - description: The folder where log files will be created. - env: - - name: ANSIBLE_LOG_FOLDER - ini: - - section: callback_log_plays - key: log_folder -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: log_plays +type: notification +short_description: write playbook output to log file +description: + - This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory. +requirements: + - Whitelist in configuration + - A writeable C(/var/log/ansible/hosts) directory by the user executing Ansible on the controller +options: + log_folder: + default: /var/log/ansible/hosts + description: The folder where log files will be created. + type: str + env: + - name: ANSIBLE_LOG_FOLDER + ini: + - section: callback_log_plays + key: log_folder +""" import os import time diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index ed7e47b2e2..9411dc8c0d 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -6,39 +6,41 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: loganalytics - type: notification - short_description: Posts task results to Azure Log Analytics - author: "Cyrus Li (@zhcli) " - description: - - This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace. - - Credits to authors of splunk callback plugin. - version_added: "2.4.0" - requirements: - - Whitelisting this callback plugin. - - An Azure log analytics work space has been established. - options: - workspace_id: - description: Workspace ID of the Azure log analytics workspace. - required: true - env: - - name: WORKSPACE_ID - ini: - - section: callback_loganalytics - key: workspace_id - shared_key: - description: Shared key to connect to Azure log analytics workspace. - required: true - env: - - name: WORKSPACE_SHARED_KEY - ini: - - section: callback_loganalytics - key: shared_key -''' +DOCUMENTATION = r""" +name: loganalytics +type: notification +short_description: Posts task results to Azure Log Analytics +author: "Cyrus Li (@zhcli) " +description: + - This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace. + - Credits to authors of splunk callback plugin. +version_added: "2.4.0" +requirements: + - Whitelisting this callback plugin. + - An Azure log analytics work space has been established. +options: + workspace_id: + description: Workspace ID of the Azure log analytics workspace. + type: str + required: true + env: + - name: WORKSPACE_ID + ini: + - section: callback_loganalytics + key: workspace_id + shared_key: + description: Shared key to connect to Azure log analytics workspace. + type: str + required: true + env: + - name: WORKSPACE_SHARED_KEY + ini: + - section: callback_loganalytics + key: shared_key +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Whitelist the plugin in ansible.cfg: [defaults] callback_whitelist = community.general.loganalytics @@ -49,7 +51,7 @@ examples: | [callback_loganalytics] workspace_id = 01234567-0123-0123-0123-01234567890a shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA== -''' +""" import hashlib import hmac diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index fc9a81ac8a..f5cfb4800c 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -6,56 +6,56 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: logdna - type: notification - short_description: Sends playbook logs to LogDNA - description: - - This callback will report logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)). - requirements: - - LogDNA Python Library (U(https://github.com/logdna/python)) - - whitelisting in configuration - options: - conf_key: - required: true - description: LogDNA Ingestion Key. - type: string - env: - - name: LOGDNA_INGESTION_KEY - ini: - - section: callback_logdna - key: conf_key - plugin_ignore_errors: - required: false - description: Whether to ignore errors on failing or not. - type: boolean - env: - - name: ANSIBLE_IGNORE_ERRORS - ini: - - section: callback_logdna - key: plugin_ignore_errors - default: false - conf_hostname: - required: false - description: Alternative Host Name; the current host name by default. - type: string - env: - - name: LOGDNA_HOSTNAME - ini: - - section: callback_logdna - key: conf_hostname - conf_tags: - required: false - description: Tags. - type: string - env: - - name: LOGDNA_TAGS - ini: - - section: callback_logdna - key: conf_tags - default: ansible -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: logdna +type: notification +short_description: Sends playbook logs to LogDNA +description: + - This callback will report logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)). +requirements: + - LogDNA Python Library (U(https://github.com/logdna/python)) + - whitelisting in configuration +options: + conf_key: + required: true + description: LogDNA Ingestion Key. + type: string + env: + - name: LOGDNA_INGESTION_KEY + ini: + - section: callback_logdna + key: conf_key + plugin_ignore_errors: + required: false + description: Whether to ignore errors on failing or not. + type: boolean + env: + - name: ANSIBLE_IGNORE_ERRORS + ini: + - section: callback_logdna + key: plugin_ignore_errors + default: false + conf_hostname: + required: false + description: Alternative Host Name; the current host name by default. + type: string + env: + - name: LOGDNA_HOSTNAME + ini: + - section: callback_logdna + key: conf_hostname + conf_tags: + required: false + description: Tags. + type: string + env: + - name: LOGDNA_TAGS + ini: + - section: callback_logdna + key: conf_tags + default: ansible +""" import logging import json diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index d3feceb72e..a7adfbf3aa 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -6,75 +6,77 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: logentries - type: notification - short_description: Sends events to Logentries +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: logentries +type: notification +short_description: Sends events to Logentries +description: + - This callback plugin will generate JSON objects and send them to Logentries using TCP for auditing/debugging purposes. +requirements: + - whitelisting in configuration + - certifi (Python library) + - flatdict (Python library), if you want to use the O(flatten) option +options: + api: + description: URI to the Logentries API. + type: str + env: + - name: LOGENTRIES_API + default: data.logentries.com + ini: + - section: callback_logentries + key: api + port: + description: HTTP port to use when connecting to the API. + type: int + env: + - name: LOGENTRIES_PORT + default: 80 + ini: + - section: callback_logentries + key: port + tls_port: + description: Port to use when connecting to the API when TLS is enabled. + type: int + env: + - name: LOGENTRIES_TLS_PORT + default: 443 + ini: + - section: callback_logentries + key: tls_port + token: + description: The logentries C(TCP token). + type: str + env: + - name: LOGENTRIES_ANSIBLE_TOKEN + required: true + ini: + - section: callback_logentries + key: token + use_tls: description: - - This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes. - - Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named C(logentries.ini). - - In 2.4 and above you can just put it in the main Ansible configuration file. - requirements: - - whitelisting in configuration - - certifi (Python library) - - flatdict (Python library), if you want to use the O(flatten) option - options: - api: - description: URI to the Logentries API. - env: - - name: LOGENTRIES_API - default: data.logentries.com - ini: - - section: callback_logentries - key: api - port: - description: HTTP port to use when connecting to the API. - env: - - name: LOGENTRIES_PORT - default: 80 - ini: - - section: callback_logentries - key: port - tls_port: - description: Port to use when connecting to the API when TLS is enabled. - env: - - name: LOGENTRIES_TLS_PORT - default: 443 - ini: - - section: callback_logentries - key: tls_port - token: - description: The logentries C(TCP token). - env: - - name: LOGENTRIES_ANSIBLE_TOKEN - required: true - ini: - - section: callback_logentries - key: token - use_tls: - description: - - Toggle to decide whether to use TLS to encrypt the communications with the API server. - env: - - name: LOGENTRIES_USE_TLS - default: false - type: boolean - ini: - - section: callback_logentries - key: use_tls - flatten: - description: Flatten complex data structures into a single dictionary with complex keys. - type: boolean - default: false - env: - - name: LOGENTRIES_FLATTEN - ini: - - section: callback_logentries - key: flatten -''' + - Toggle to decide whether to use TLS to encrypt the communications with the API server. + env: + - name: LOGENTRIES_USE_TLS + default: false + type: boolean + ini: + - section: callback_logentries + key: use_tls + flatten: + description: Flatten complex data structures into a single dictionary with complex keys. + type: boolean + default: false + env: + - name: LOGENTRIES_FLATTEN + ini: + - section: callback_logentries + key: flatten +""" -EXAMPLES = ''' -examples: > +EXAMPLES = r""" +examples: >- To enable, add this to your ansible.cfg file in the defaults block [defaults] @@ -93,7 +95,7 @@ examples: > use_tls = true token = dd21fc88-f00a-43ff-b977-e3a4233c53af flatten = false -''' +""" import os import socket diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index f3725e465a..088a84bf78 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -7,91 +7,94 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' - author: Yevhen Khmelenko (@ujenmr) - name: logstash - type: notification - short_description: Sends events to Logstash - description: - - This callback will report facts and task events to Logstash U(https://www.elastic.co/products/logstash). - requirements: - - whitelisting in configuration - - logstash (Python library) - options: - server: - description: Address of the Logstash server. - env: - - name: LOGSTASH_SERVER - ini: - - section: callback_logstash - key: server - version_added: 1.0.0 - default: localhost - port: - description: Port on which logstash is listening. - env: - - name: LOGSTASH_PORT - ini: - - section: callback_logstash - key: port - version_added: 1.0.0 - default: 5000 - type: - description: Message type. - env: - - name: LOGSTASH_TYPE - ini: - - section: callback_logstash - key: type - version_added: 1.0.0 - default: ansible - pre_command: - description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field. - version_added: 2.0.0 - ini: - - section: callback_logstash - key: pre_command - env: - - name: LOGSTASH_PRE_COMMAND - format_version: - description: Logging format. - type: str - version_added: 2.0.0 - ini: - - section: callback_logstash - key: format_version - env: - - name: LOGSTASH_FORMAT_VERSION - default: v1 - choices: - - v1 - - v2 +DOCUMENTATION = r""" +author: Yevhen Khmelenko (@ujenmr) +name: logstash +type: notification +short_description: Sends events to Logstash +description: + - This callback will report facts and task events to Logstash U(https://www.elastic.co/products/logstash). +requirements: + - whitelisting in configuration + - logstash (Python library) +options: + server: + description: Address of the Logstash server. + type: str + env: + - name: LOGSTASH_SERVER + ini: + - section: callback_logstash + key: server + version_added: 1.0.0 + default: localhost + port: + description: Port on which logstash is listening. + type: int + env: + - name: LOGSTASH_PORT + ini: + - section: callback_logstash + key: port + version_added: 1.0.0 + default: 5000 + type: + description: Message type. + type: str + env: + - name: LOGSTASH_TYPE + ini: + - section: callback_logstash + key: type + version_added: 1.0.0 + default: ansible + pre_command: + description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field. + type: str + version_added: 2.0.0 + ini: + - section: callback_logstash + key: pre_command + env: + - name: LOGSTASH_PRE_COMMAND + format_version: + description: Logging format. + type: str + version_added: 2.0.0 + ini: + - section: callback_logstash + key: format_version + env: + - name: LOGSTASH_FORMAT_VERSION + default: v1 + choices: + - v1 + - v2 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" ansible.cfg: | - # Enable Callback plugin - [defaults] - callback_whitelist = community.general.logstash + # Enable Callback plugin + [defaults] + callback_whitelist = community.general.logstash - [callback_logstash] - server = logstash.example.com - port = 5000 - pre_command = git rev-parse HEAD - type = ansible + [callback_logstash] + server = logstash.example.com + port = 5000 + pre_command = git rev-parse HEAD + type = ansible -11-input-tcp.conf: | - # Enable Logstash TCP Input - input { - tcp { - port => 5000 - codec => json - add_field => { "[@metadata][beat]" => "notify" } - add_field => { "[@metadata][type]" => "ansible" } - } - } -''' +11-input-tcp.conf: |- + # Enable Logstash TCP Input + input { + tcp { + port => 5000 + codec => json + add_field => { "[@metadata][beat]" => "notify" } + add_field => { "[@metadata][type]" => "ansible" } + } + } +""" import os import json diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py index 1b847ea34c..a6b6f4c1ef 100644 --- a/plugins/callback/mail.py +++ b/plugins/callback/mail.py @@ -7,81 +7,80 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: mail type: notification -short_description: Sends failure events via email +short_description: Sends failure events through email description: -- This callback will report failures via email. + - This callback will report failures through email. author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) requirements: -- whitelisting in configuration + - whitelisting in configuration options: mta: description: - - Mail Transfer Agent, server that accepts SMTP. + - Mail Transfer Agent, server that accepts SMTP. type: str env: - - name: SMTPHOST + - name: SMTPHOST ini: - - section: callback_mail - key: smtphost + - section: callback_mail + key: smtphost default: localhost mtaport: description: - - Mail Transfer Agent Port. - - Port at which server SMTP. + - Mail Transfer Agent Port. + - Port at which server SMTP. type: int ini: - - section: callback_mail - key: smtpport + - section: callback_mail + key: smtpport default: 25 to: description: - - Mail recipient. + - Mail recipient. type: list elements: str ini: - - section: callback_mail - key: to + - section: callback_mail + key: to default: [root] sender: description: - - Mail sender. - - This is required since community.general 6.0.0. + - Mail sender. + - This is required since community.general 6.0.0. type: str required: true ini: - - section: callback_mail - key: sender + - section: callback_mail + key: sender cc: description: - - CC'd recipients. + - CC'd recipients. type: list elements: str ini: - - section: callback_mail - key: cc + - section: callback_mail + key: cc bcc: description: - - BCC'd recipients. + - BCC'd recipients. type: list elements: str ini: - - section: callback_mail - key: bcc + - section: callback_mail + key: bcc message_id_domain: description: - - The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID). - - The default is the hostname of the control node. + - The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID). + - The default is the hostname of the control node. type: str ini: - - section: callback_mail - key: message_id_domain + - section: callback_mail + key: message_id_domain version_added: 8.2.0 - -''' +""" import json import os diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py index 62f4a89ec8..83b5fbf9dc 100644 --- a/plugins/callback/nrdp.py +++ b/plugins/callback/nrdp.py @@ -7,65 +7,65 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: nrdp - type: notification - author: "Remi VERCHERE (@rverchere)" - short_description: Post task results to a Nagios server through nrdp - description: - - This callback send playbook result to Nagios. - - Nagios shall use NRDP to receive passive events. - - The passive check is sent to a dedicated host/service for Ansible. - options: - url: - description: URL of the nrdp server. - required: true - env: - - name : NRDP_URL - ini: - - section: callback_nrdp - key: url - type: string - validate_certs: - description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs.) - env: - - name: NRDP_VALIDATE_CERTS - ini: - - section: callback_nrdp - key: validate_nrdp_certs - - section: callback_nrdp - key: validate_certs - type: boolean - default: false - aliases: [ validate_nrdp_certs ] - token: - description: Token to be allowed to push nrdp events. - required: true - env: - - name: NRDP_TOKEN - ini: - - section: callback_nrdp - key: token - type: string - hostname: - description: Hostname where the passive check is linked to. - required: true - env: - - name : NRDP_HOSTNAME - ini: - - section: callback_nrdp - key: hostname - type: string - servicename: - description: Service where the passive check is linked to. - required: true - env: - - name : NRDP_SERVICENAME - ini: - - section: callback_nrdp - key: servicename - type: string -''' +DOCUMENTATION = r""" +name: nrdp +type: notification +author: "Remi VERCHERE (@rverchere)" +short_description: Post task results to a Nagios server through nrdp +description: + - This callback send playbook result to Nagios. + - Nagios shall use NRDP to receive passive events. + - The passive check is sent to a dedicated host/service for Ansible. +options: + url: + description: URL of the nrdp server. + required: true + env: + - name: NRDP_URL + ini: + - section: callback_nrdp + key: url + type: string + validate_certs: + description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs). + env: + - name: NRDP_VALIDATE_CERTS + ini: + - section: callback_nrdp + key: validate_nrdp_certs + - section: callback_nrdp + key: validate_certs + type: boolean + default: false + aliases: [validate_nrdp_certs] + token: + description: Token to be allowed to push nrdp events. + required: true + env: + - name: NRDP_TOKEN + ini: + - section: callback_nrdp + key: token + type: string + hostname: + description: Hostname where the passive check is linked to. + required: true + env: + - name: NRDP_HOSTNAME + ini: + - section: callback_nrdp + key: hostname + type: string + servicename: + description: Service where the passive check is linked to. + required: true + env: + - name: NRDP_SERVICENAME + ini: + - section: callback_nrdp + key: servicename + type: string +""" from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.common.text.converters import to_bytes diff --git a/plugins/callback/null.py b/plugins/callback/null.py index 6aeeba313a..0cc722f63b 100644 --- a/plugins/callback/null.py +++ b/plugins/callback/null.py @@ -7,16 +7,16 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: 'null' - type: stdout - requirements: - - set as main display callback - short_description: Don't display stuff to screen - description: - - This callback prevents outputting events to screen. -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: 'null' +type: stdout +requirements: + - set as main display callback +short_description: do not display stuff to screen +description: + - This callback prevents outputting events to screen. +""" from ansible.plugins.callback import CallbackBase diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index 58cfa057b7..8cb77f3cf8 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -6,120 +6,120 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Victor Martinez (@v1v) - name: opentelemetry - type: notification - short_description: Create distributed traces with OpenTelemetry - version_added: 3.7.0 +DOCUMENTATION = r""" +author: Victor Martinez (@v1v) +name: opentelemetry +type: notification +short_description: Create distributed traces with OpenTelemetry +version_added: 3.7.0 +description: + - This callback creates distributed traces for each Ansible task with OpenTelemetry. + - You can configure the OpenTelemetry exporter and SDK with environment variables. + - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html). + - See + U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables). +options: + hide_task_arguments: + default: false + type: bool description: - - This callback creates distributed traces for each Ansible task with OpenTelemetry. - - You can configure the OpenTelemetry exporter and SDK with environment variables. - - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html). - - See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables). - options: - hide_task_arguments: - default: false - type: bool - description: - - Hide the arguments for a task. - env: - - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS - ini: - - section: callback_opentelemetry - key: hide_task_arguments - version_added: 5.3.0 - enable_from_environment: - type: str - description: - - Whether to enable this callback only if the given environment variable exists and it is set to V(true). - - This is handy when you use Configuration as Code and want to send distributed traces - if running in the CI rather when running Ansible locally. - - For such, it evaluates the given O(enable_from_environment) value as environment variable - and if set to true this plugin will be enabled. - env: - - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT - ini: - - section: callback_opentelemetry - key: enable_from_environment - version_added: 5.3.0 - version_added: 3.8.0 - otel_service_name: - default: ansible - type: str - description: - - The service name resource attribute. - env: - - name: OTEL_SERVICE_NAME - ini: - - section: callback_opentelemetry - key: otel_service_name - version_added: 5.3.0 - traceparent: - default: None - type: str - description: - - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). - env: - - name: TRACEPARENT - disable_logs: - default: false - type: bool - description: - - Disable sending logs. - env: - - name: ANSIBLE_OPENTELEMETRY_DISABLE_LOGS - ini: - - section: callback_opentelemetry - key: disable_logs - version_added: 5.8.0 - disable_attributes_in_logs: - default: false - type: bool - description: - - Disable populating span attributes to the logs. - env: - - name: ANSIBLE_OPENTELEMETRY_DISABLE_ATTRIBUTES_IN_LOGS - ini: - - section: callback_opentelemetry - key: disable_attributes_in_logs - version_added: 7.1.0 - store_spans_in_file: - default: None - type: str - description: - - It stores the exported spans in the given file - env: - - name: ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE - ini: - - section: callback_opentelemetry - key: store_spans_in_file - version_added: 9.0.0 - otel_exporter_otlp_traces_protocol: - type: str - description: - - E(OTEL_EXPORTER_OTLP_TRACES_PROTOCOL) represents the the transport protocol for spans. - - See - U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#envvar-OTEL_EXPORTER_OTLP_TRACES_PROTOCOL). - default: grpc - choices: - - grpc - - http/protobuf - env: - - name: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL - ini: - - section: callback_opentelemetry - key: otel_exporter_otlp_traces_protocol - version_added: 9.0.0 - requirements: - - opentelemetry-api (Python library) - - opentelemetry-exporter-otlp (Python library) - - opentelemetry-sdk (Python library) -''' + - Hide the arguments for a task. + env: + - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS + ini: + - section: callback_opentelemetry + key: hide_task_arguments + version_added: 5.3.0 + enable_from_environment: + type: str + description: + - Whether to enable this callback only if the given environment variable exists and it is set to V(true). + - This is handy when you use Configuration as Code and want to send distributed traces if running in the CI rather when + running Ansible locally. + - For such, it evaluates the given O(enable_from_environment) value as environment variable and if set to true this + plugin will be enabled. + env: + - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT + ini: + - section: callback_opentelemetry + key: enable_from_environment + version_added: 5.3.0 + version_added: 3.8.0 + otel_service_name: + default: ansible + type: str + description: + - The service name resource attribute. + env: + - name: OTEL_SERVICE_NAME + ini: + - section: callback_opentelemetry + key: otel_service_name + version_added: 5.3.0 + traceparent: + default: None + type: str + description: + - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). + env: + - name: TRACEPARENT + disable_logs: + default: false + type: bool + description: + - Disable sending logs. + env: + - name: ANSIBLE_OPENTELEMETRY_DISABLE_LOGS + ini: + - section: callback_opentelemetry + key: disable_logs + version_added: 5.8.0 + disable_attributes_in_logs: + default: false + type: bool + description: + - Disable populating span attributes to the logs. + env: + - name: ANSIBLE_OPENTELEMETRY_DISABLE_ATTRIBUTES_IN_LOGS + ini: + - section: callback_opentelemetry + key: disable_attributes_in_logs + version_added: 7.1.0 + store_spans_in_file: + type: str + description: + - It stores the exported spans in the given file. + env: + - name: ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE + ini: + - section: callback_opentelemetry + key: store_spans_in_file + version_added: 9.0.0 + otel_exporter_otlp_traces_protocol: + type: str + description: + - E(OTEL_EXPORTER_OTLP_TRACES_PROTOCOL) represents the the transport protocol for spans. + - See + U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#envvar-OTEL_EXPORTER_OTLP_TRACES_PROTOCOL). + default: grpc + choices: + - grpc + - http/protobuf + env: + - name: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL + ini: + - section: callback_opentelemetry + key: otel_exporter_otlp_traces_protocol + version_added: 9.0.0 +requirements: + - opentelemetry-api (Python library) + - opentelemetry-exporter-otlp (Python library) + - opentelemetry-sdk (Python library) +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Enable the plugin in ansible.cfg: [defaults] callbacks_enabled = community.general.opentelemetry @@ -131,7 +131,7 @@ examples: | export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token" export OTEL_SERVICE_NAME=your_service_name export ANSIBLE_OPENTELEMETRY_ENABLED=true -''' +""" import getpass import json @@ -356,6 +356,7 @@ class OpenTelemetrySource(object): status = Status(status_code=StatusCode.OK) if host_data.status != 'included': # Support loops + enriched_error_message = None if 'results' in host_data.result._result: if host_data.status == 'failed': message = self.get_error_message_from_results(host_data.result._result['results'], task_data.action) @@ -556,11 +557,19 @@ class CallbackModule(CallbackBase): self.otel_exporter_otlp_traces_protocol = self.get_option('otel_exporter_otlp_traces_protocol') - def dump_results(self, result): + def dump_results(self, task, result): """ dump the results if disable_logs is not enabled """ if self.disable_logs: return "" - return self._dump_results(result._result) + # ansible.builtin.uri contains the response in the json field + save = dict(result._result) + + if "json" in save and task.action in ("ansible.builtin.uri", "ansible.legacy.uri", "uri"): + save.pop("json") + # ansible.builtin.slurp contains the response in the content field + if "content" in save and task.action in ("ansible.builtin.slurp", "ansible.legacy.slurp", "slurp"): + save.pop("content") + return self._dump_results(save) def v2_playbook_on_start(self, playbook): self.ansible_playbook = basename(playbook._file_name) @@ -611,7 +620,7 @@ class CallbackModule(CallbackBase): self.tasks_data, status, result, - self.dump_results(result) + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_ok(self, result): @@ -619,7 +628,7 @@ class CallbackModule(CallbackBase): self.tasks_data, 'ok', result, - self.dump_results(result) + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_skipped(self, result): @@ -627,7 +636,7 @@ class CallbackModule(CallbackBase): self.tasks_data, 'skipped', result, - self.dump_results(result) + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_playbook_on_include(self, included_file): diff --git a/plugins/callback/say.py b/plugins/callback/say.py index 9d96ad74d9..b1ab31f98a 100644 --- a/plugins/callback/say.py +++ b/plugins/callback/say.py @@ -8,17 +8,17 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: say - type: notification - requirements: - - whitelisting in configuration - - the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program - short_description: notify using software speech synthesizer - description: - - This plugin will use the C(say) or C(espeak) program to "speak" about play events. -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: say +type: notification +requirements: + - whitelisting in configuration + - the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program +short_description: notify using software speech synthesizer +description: + - This plugin will use the C(say) or C(espeak) program to "speak" about play events. +""" import platform import subprocess diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 0696757837..7915f1e8f3 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -7,35 +7,35 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: selective - type: stdout - requirements: - - set as main display callback - short_description: only print certain tasks - description: - - This callback only prints tasks that have been tagged with C(print_action) or that have failed. - This allows operators to focus on the tasks that provide value only. - - Tasks that are not printed are placed with a C(.). - - If you increase verbosity all tasks are printed. - options: - nocolor: - default: false - description: This setting allows suppressing colorizing output. - env: - - name: ANSIBLE_NOCOLOR - - name: ANSIBLE_SELECTIVE_DONT_COLORIZE - ini: - - section: defaults - key: nocolor - type: boolean -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: selective +type: stdout +requirements: + - set as main display callback +short_description: only print certain tasks +description: + - This callback only prints tasks that have been tagged with C(print_action) or that have failed. This allows operators + to focus on the tasks that provide value only. + - Tasks that are not printed are placed with a C(.). + - If you increase verbosity all tasks are printed. +options: + nocolor: + default: false + description: This setting allows suppressing colorizing output. + env: + - name: ANSIBLE_NOCOLOR + - name: ANSIBLE_SELECTIVE_DONT_COLORIZE + ini: + - section: defaults + key: nocolor + type: boolean +""" -EXAMPLES = """ - - ansible.builtin.debug: msg="This will not be printed" - - ansible.builtin.debug: msg="But this will" - tags: [print_action] +EXAMPLES = r""" +- ansible.builtin.debug: msg="This will not be printed" +- ansible.builtin.debug: msg="But this will" + tags: [print_action] """ import difflib diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index e7a2743ec5..5a99797039 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -8,51 +8,54 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: slack - type: notification - requirements: - - whitelist in configuration - - prettytable (python library) - short_description: Sends play events to a Slack channel - description: - - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. - options: - webhook_url: - required: true - description: Slack Webhook URL. - env: - - name: SLACK_WEBHOOK_URL - ini: - - section: callback_slack - key: webhook_url - channel: - default: "#ansible" - description: Slack room to post in. - env: - - name: SLACK_CHANNEL - ini: - - section: callback_slack - key: channel - username: - description: Username to post as. - env: - - name: SLACK_USERNAME - default: ansible - ini: - - section: callback_slack - key: username - validate_certs: - description: Validate the SSL certificate of the Slack server for HTTPS URLs. - env: - - name: SLACK_VALIDATE_CERTS - ini: - - section: callback_slack - key: validate_certs - default: true - type: bool -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: slack +type: notification +requirements: + - whitelist in configuration + - prettytable (python library) +short_description: Sends play events to a Slack channel +description: + - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. +options: + webhook_url: + required: true + description: Slack Webhook URL. + type: str + env: + - name: SLACK_WEBHOOK_URL + ini: + - section: callback_slack + key: webhook_url + channel: + default: "#ansible" + description: Slack room to post in. + type: str + env: + - name: SLACK_CHANNEL + ini: + - section: callback_slack + key: channel + username: + description: Username to post as. + type: str + env: + - name: SLACK_USERNAME + default: ansible + ini: + - section: callback_slack + key: username + validate_certs: + description: Validate the SSL certificate of the Slack server for HTTPS URLs. + env: + - name: SLACK_VALIDATE_CERTS + ini: + - section: callback_slack + key: validate_certs + default: true + type: bool +""" import json import os diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py index a3e401bc21..41b3b0b443 100644 --- a/plugins/callback/splunk.py +++ b/plugins/callback/splunk.py @@ -6,71 +6,73 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: splunk - type: notification - short_description: Sends task result events to Splunk HTTP Event Collector - author: "Stuart Hirst (!UNKNOWN) " +DOCUMENTATION = r""" +name: splunk +type: notification +short_description: Sends task result events to Splunk HTTP Event Collector +author: "Stuart Hirst (!UNKNOWN) " +description: + - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector. + - The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/). + - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based. +requirements: + - Whitelisting this callback plugin + - 'Create a HTTP Event Collector in Splunk' + - 'Define the URL and token in C(ansible.cfg)' +options: + url: + description: URL to the Splunk HTTP collector source. + type: str + env: + - name: SPLUNK_URL + ini: + - section: callback_splunk + key: url + authtoken: + description: Token to authenticate the connection to the Splunk HTTP collector. + type: str + env: + - name: SPLUNK_AUTHTOKEN + ini: + - section: callback_splunk + key: authtoken + validate_certs: + description: Whether to validate certificates for connections to HEC. It is not recommended to set to V(false) except + when you are sure that nobody can intercept the connection between this plugin and HEC, as setting it to V(false) allows + man-in-the-middle attacks! + env: + - name: SPLUNK_VALIDATE_CERTS + ini: + - section: callback_splunk + key: validate_certs + type: bool + default: true + version_added: '1.0.0' + include_milliseconds: + description: Whether to include milliseconds as part of the generated timestamp field in the event sent to the Splunk + HTTP collector. + env: + - name: SPLUNK_INCLUDE_MILLISECONDS + ini: + - section: callback_splunk + key: include_milliseconds + type: bool + default: false + version_added: 2.0.0 + batch: description: - - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector. - - The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/). - - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based. - requirements: - - Whitelisting this callback plugin - - 'Create a HTTP Event Collector in Splunk' - - 'Define the URL and token in C(ansible.cfg)' - options: - url: - description: URL to the Splunk HTTP collector source. - env: - - name: SPLUNK_URL - ini: - - section: callback_splunk - key: url - authtoken: - description: Token to authenticate the connection to the Splunk HTTP collector. - env: - - name: SPLUNK_AUTHTOKEN - ini: - - section: callback_splunk - key: authtoken - validate_certs: - description: Whether to validate certificates for connections to HEC. It is not recommended to set to - V(false) except when you are sure that nobody can intercept the connection - between this plugin and HEC, as setting it to V(false) allows man-in-the-middle attacks! - env: - - name: SPLUNK_VALIDATE_CERTS - ini: - - section: callback_splunk - key: validate_certs - type: bool - default: true - version_added: '1.0.0' - include_milliseconds: - description: Whether to include milliseconds as part of the generated timestamp field in the event - sent to the Splunk HTTP collector. - env: - - name: SPLUNK_INCLUDE_MILLISECONDS - ini: - - section: callback_splunk - key: include_milliseconds - type: bool - default: false - version_added: 2.0.0 - batch: - description: - - Correlation ID which can be set across multiple playbook executions. - env: - - name: SPLUNK_BATCH - ini: - - section: callback_splunk - key: batch - type: str - version_added: 3.3.0 -''' + - Correlation ID which can be set across multiple playbook executions. + env: + - name: SPLUNK_BATCH + ini: + - section: callback_splunk + key: batch + type: str + version_added: 3.3.0 +""" -EXAMPLES = ''' -examples: > +EXAMPLES = r""" +examples: >- To enable, add this to your ansible.cfg file in the defaults block [defaults] callback_whitelist = community.general.splunk @@ -81,7 +83,7 @@ examples: > [callback_splunk] url = http://mysplunkinstance.datapaas.io:8088/services/collector/event authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88 -''' +""" import json import uuid diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py index 0304b9de52..108f324b29 100644 --- a/plugins/callback/sumologic.py +++ b/plugins/callback/sumologic.py @@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" name: sumologic type: notification short_description: Sends task result events to Sumologic @@ -15,20 +15,21 @@ description: - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source. requirements: - Whitelisting this callback plugin - - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of V(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator - of V("timestamp": "(.*\)")' + - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of V(yyyy-MM-dd HH:mm:ss ZZZZ) and + a custom timestamp locator of V("timestamp": "(.*\)")' options: url: description: URL to the Sumologic HTTP collector source. + type: str env: - name: SUMOLOGIC_URL ini: - section: callback_sumologic key: url -''' +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- To enable, add this to your ansible.cfg file in the defaults block [defaults] callback_whitelist = community.general.sumologic @@ -39,7 +40,7 @@ examples: | Set the ansible.cfg variable in the callback_sumologic block [callback_sumologic] url = https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp== -''' +""" import json import uuid diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py index 43d6ff2f9f..d1797455ac 100644 --- a/plugins/callback/syslog_json.py +++ b/plugins/callback/syslog_json.py @@ -7,51 +7,54 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: syslog_json - type: notification - requirements: - - whitelist in configuration - short_description: sends JSON events to syslog - description: - - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format. - options: - server: - description: Syslog server that will receive the event. - env: - - name: SYSLOG_SERVER - default: localhost - ini: - - section: callback_syslog_json - key: syslog_server - port: - description: Port on which the syslog server is listening. - env: - - name: SYSLOG_PORT - default: 514 - ini: - - section: callback_syslog_json - key: syslog_port - facility: - description: Syslog facility to log as. - env: - - name: SYSLOG_FACILITY - default: user - ini: - - section: callback_syslog_json - key: syslog_facility - setup: - description: Log setup tasks. - env: - - name: ANSIBLE_SYSLOG_SETUP - type: bool - default: true - ini: - - section: callback_syslog_json - key: syslog_setup - version_added: 4.5.0 -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: syslog_json +type: notification +requirements: + - whitelist in configuration +short_description: sends JSON events to syslog +description: + - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format. +options: + server: + description: Syslog server that will receive the event. + type: str + env: + - name: SYSLOG_SERVER + default: localhost + ini: + - section: callback_syslog_json + key: syslog_server + port: + description: Port on which the syslog server is listening. + type: int + env: + - name: SYSLOG_PORT + default: 514 + ini: + - section: callback_syslog_json + key: syslog_port + facility: + description: Syslog facility to log as. + type: str + env: + - name: SYSLOG_FACILITY + default: user + ini: + - section: callback_syslog_json + key: syslog_facility + setup: + description: Log setup tasks. + env: + - name: ANSIBLE_SYSLOG_SETUP + type: bool + default: true + ini: + - section: callback_syslog_json + key: syslog_setup + version_added: 4.5.0 +""" import logging import logging.handlers diff --git a/plugins/callback/timestamp.py b/plugins/callback/timestamp.py index 07cd8d239c..262db47dc9 100644 --- a/plugins/callback/timestamp.py +++ b/plugins/callback/timestamp.py @@ -10,46 +10,45 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = r""" - name: timestamp - type: stdout - short_description: Adds simple timestamp for each header - version_added: 9.0.0 - description: - - This callback adds simple timestamp for each header. - author: kurokobo (@kurokobo) - options: - timezone: - description: - - Timezone to use for the timestamp in IANA time zone format. - - For example C(America/New_York), C(Asia/Tokyo)). Ignored on Python < 3.9. - ini: - - section: callback_timestamp - key: timezone - env: - - name: ANSIBLE_CALLBACK_TIMESTAMP_TIMEZONE - type: string - format_string: - description: - - Format of the timestamp shown to user in 1989 C standard format. - - > - Refer to L(the Python documentation,https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) - for the available format codes. - ini: - - section: callback_timestamp - key: format_string - env: - - name: ANSIBLE_CALLBACK_TIMESTAMP_FORMAT_STRING - default: "%H:%M:%S" - type: string - seealso: - - plugin: ansible.posix.profile_tasks - plugin_type: callback - description: > - You can use P(ansible.posix.profile_tasks#callback) callback plugin to time individual tasks and overall execution time - with detailed timestamps. - extends_documentation_fragment: - - ansible.builtin.default_callback - - ansible.builtin.result_format_callback +name: timestamp +type: stdout +short_description: Adds simple timestamp for each header +version_added: 9.0.0 +description: + - This callback adds simple timestamp for each header. +author: kurokobo (@kurokobo) +options: + timezone: + description: + - Timezone to use for the timestamp in IANA time zone format. + - For example V(America/New_York), V(Asia/Tokyo)). Ignored on Python < 3.9. + ini: + - section: callback_timestamp + key: timezone + env: + - name: ANSIBLE_CALLBACK_TIMESTAMP_TIMEZONE + type: string + format_string: + description: + - Format of the timestamp shown to user in 1989 C standard format. + - Refer to L(the Python documentation,https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) + for the available format codes. + ini: + - section: callback_timestamp + key: format_string + env: + - name: ANSIBLE_CALLBACK_TIMESTAMP_FORMAT_STRING + default: "%H:%M:%S" + type: string +seealso: + - plugin: ansible.posix.profile_tasks + plugin_type: callback + description: >- + You can use P(ansible.posix.profile_tasks#callback) callback plugin to time individual tasks and overall execution time + with detailed timestamps. +extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback """ diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index 4908202c23..e251a5f080 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -8,18 +8,18 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: unixy - type: stdout - author: Al Bowles (@akatch) - short_description: condensed Ansible output - description: - - Consolidated Ansible output in the style of LINUX/UNIX startup logs. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout in configuration -''' +DOCUMENTATION = r""" +name: unixy +type: stdout +author: Al Bowles (@akatch) +short_description: condensed Ansible output +description: + - Consolidated Ansible output in the style of LINUX/UNIX startup logs. +extends_documentation_fragment: + - default_callback +requirements: + - set as stdout in configuration +""" from os.path import basename from ansible import constants as C diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py index ae2c8f8810..1f0dbcac14 100644 --- a/plugins/callback/yaml.py +++ b/plugins/callback/yaml.py @@ -7,19 +7,27 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: yaml - type: stdout - short_description: YAML-ized Ansible screen output - description: - - Ansible output that can be quite a bit easier to read than the - default JSON formatting. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout in configuration -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: yaml +type: stdout +short_description: YAML-ized Ansible screen output +description: + - Ansible output that can be quite a bit easier to read than the default JSON formatting. +extends_documentation_fragment: + - default_callback +requirements: + - set as stdout in configuration +seealso: + - plugin: ansible.builtin.default + plugin_type: callback + description: >- + There is a parameter O(ansible.builtin.default#callback:result_format) in P(ansible.builtin.default#callback) that allows + you to change the output format to YAML. +notes: + - With ansible-core 2.13 or newer, you can instead specify V(yaml) for the parameter O(ansible.builtin.default#callback:result_format) + in P(ansible.builtin.default#callback). +""" import yaml import json diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index 810316aaa5..9e0a0e73a9 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -10,76 +10,66 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Maykel Moya (!UNKNOWN) - name: chroot - short_description: Interact with local chroot +DOCUMENTATION = r""" +author: Maykel Moya (!UNKNOWN) +name: chroot +short_description: Interact with local chroot +description: + - Run commands or put/fetch files to an existing chroot on the Ansible controller. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing chroot on the Ansible controller. - options: - remote_addr: - description: - - The path of the chroot you want to access. - default: inventory_hostname - vars: - - name: inventory_hostname - - name: ansible_host - executable: - description: - - User specified executable shell - ini: - - section: defaults - key: executable - env: - - name: ANSIBLE_EXECUTABLE - vars: - - name: ansible_executable - default: /bin/sh - chroot_exe: - description: - - User specified chroot binary - ini: - - section: chroot_connection - key: exe - env: - - name: ANSIBLE_CHROOT_EXE - vars: - - name: ansible_chroot_exe - default: chroot - disable_root_check: - description: - - Do not check that the user is not root. - ini: - - section: chroot_connection - key: disable_root_check - env: - - name: ANSIBLE_CHROOT_DISABLE_ROOT_CHECK - vars: - - name: ansible_chroot_disable_root_check - default: false - type: bool - version_added: 7.3.0 -''' + - The path of the chroot you want to access. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + executable: + description: + - User specified executable shell. + type: string + ini: + - section: defaults + key: executable + env: + - name: ANSIBLE_EXECUTABLE + vars: + - name: ansible_executable + default: /bin/sh + chroot_exe: + description: + - User specified chroot binary. + type: string + ini: + - section: chroot_connection + key: exe + env: + - name: ANSIBLE_CHROOT_EXE + vars: + - name: ansible_chroot_exe + default: chroot + disable_root_check: + description: + - Do not check that the user is not root. + ini: + - section: chroot_connection + key: disable_root_check + env: + - name: ANSIBLE_CHROOT_DISABLE_ROOT_CHECK + vars: + - name: ansible_chroot_disable_root_check + default: false + type: bool + version_added: 7.3.0 +""" EXAMPLES = r""" -# Plugin requires root privileges for chroot, -E preserves your env (and location of ~/.ansible): -# sudo -E ansible-playbook ... -# -# Static inventory file -# [chroots] -# /path/to/debootstrap -# /path/to/feboostrap -# /path/to/lxc-image -# /path/to/chroot - -# playbook ---- - hosts: chroots connection: community.general.chroot tasks: - debug: msg: "This is coming from chroot environment" - """ import os diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 219a8cccd3..86d085fc09 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -9,23 +9,24 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Michael Scherer (@mscherer) - name: funcd - short_description: Use funcd to connect to target +DOCUMENTATION = r""" +author: Michael Scherer (@mscherer) +name: funcd +short_description: Use funcd to connect to target +description: + - This transport permits you to use Ansible over Func. + - For people who have already setup func and that wish to play with ansible, this permit to move gradually to ansible without + having to redo completely the setup of the network. +options: + remote_addr: description: - - This transport permits you to use Ansible over Func. - - For people who have already setup func and that wish to play with ansible, - this permit to move gradually to ansible without having to redo completely the setup of the network. - options: - remote_addr: - description: - - The path of the chroot you want to access. - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_func_host -''' + - The path of the chroot you want to access. + type: string + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_func_host +""" HAVE_FUNC = False try: diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py index 81d6f971c7..b9226284c2 100644 --- a/plugins/connection/incus.py +++ b/plugins/connection/incus.py @@ -8,43 +8,47 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = """ - author: Stéphane Graber (@stgraber) - name: incus - short_description: Run tasks in Incus instances via the Incus CLI. +DOCUMENTATION = r""" +author: Stéphane Graber (@stgraber) +name: incus +short_description: Run tasks in Incus instances using the Incus CLI +description: + - Run commands or put/fetch files to an existing Incus instance using Incus CLI. +version_added: "8.2.0" +options: + remote_addr: description: - - Run commands or put/fetch files to an existing Incus instance using Incus CLI. - version_added: "8.2.0" - options: - remote_addr: - description: - - The instance identifier. - default: inventory_hostname - vars: - - name: inventory_hostname - - name: ansible_host - - name: ansible_incus_host - executable: - description: - - The shell to use for execution inside the instance. - default: /bin/sh - vars: - - name: ansible_executable - - name: ansible_incus_executable - remote: - description: - - The name of the Incus remote to use (per C(incus remote list)). - - Remotes are used to access multiple servers from a single client. - default: local - vars: - - name: ansible_incus_remote - project: - description: - - The name of the Incus project to use (per C(incus project list)). - - Projects are used to divide the instances running on a server. - default: default - vars: - - name: ansible_incus_project + - The instance identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_incus_host + executable: + description: + - The shell to use for execution inside the instance. + type: string + default: /bin/sh + vars: + - name: ansible_executable + - name: ansible_incus_executable + remote: + description: + - The name of the Incus remote to use (per C(incus remote list)). + - Remotes are used to access multiple servers from a single client. + type: string + default: local + vars: + - name: ansible_incus_remote + project: + description: + - The name of the Incus project to use (per C(incus project list)). + - Projects are used to divide the instances running on a server. + type: string + default: default + vars: + - name: ansible_incus_project """ import os diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index 2e2a6f0937..8c39f07cd3 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -10,26 +10,28 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Stephan Lohse (!UNKNOWN) - name: iocage - short_description: Run tasks in iocage jails +DOCUMENTATION = r""" +author: Stephan Lohse (!UNKNOWN) +name: iocage +short_description: Run tasks in iocage jails +description: + - Run commands or put/fetch files to an existing iocage jail. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing iocage jail - options: - remote_addr: - description: - - Path to the jail - vars: - - name: ansible_host - - name: ansible_iocage_host - remote_user: - description: - - User to execute as inside the jail - vars: - - name: ansible_user - - name: ansible_iocage_user -''' + - Path to the jail. + type: string + vars: + - name: ansible_host + - name: ansible_iocage_host + remote_user: + description: + - User to execute as inside the jail. + type: string + vars: + - name: ansible_user + - name: ansible_iocage_user +""" import subprocess diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index 3a3edd4b18..a5a21ff635 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -10,28 +10,30 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Ansible Core Team - name: jail - short_description: Run tasks in jails +DOCUMENTATION = r""" +author: Ansible Core Team +name: jail +short_description: Run tasks in jails +description: + - Run commands or put/fetch files to an existing jail. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing jail - options: - remote_addr: - description: - - Path to the jail - default: inventory_hostname - vars: - - name: inventory_hostname - - name: ansible_host - - name: ansible_jail_host - remote_user: - description: - - User to execute as inside the jail - vars: - - name: ansible_user - - name: ansible_jail_user -''' + - Path to the jail. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_jail_host + remote_user: + description: + - User to execute as inside the jail. + type: string + vars: + - name: ansible_user + - name: ansible_jail_user +""" import os import os.path diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index 7bb5824fac..7f7f3f9242 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -7,29 +7,31 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Joerg Thalheim (!UNKNOWN) - name: lxc - short_description: Run tasks in lxc containers via lxc python library +DOCUMENTATION = r""" +author: Joerg Thalheim (!UNKNOWN) +name: lxc +short_description: Run tasks in LXC containers using lxc python library +description: + - Run commands or put/fetch files to an existing LXC container using lxc python library. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing lxc container using lxc python library - options: - remote_addr: - description: - - Container identifier - default: inventory_hostname - vars: - - name: inventory_hostname - - name: ansible_host - - name: ansible_lxc_host - executable: - default: /bin/sh - description: - - Shell executable - vars: - - name: ansible_executable - - name: ansible_lxc_executable -''' + - Container identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_lxc_host + executable: + default: /bin/sh + description: + - Shell executable. + type: string + vars: + - name: ansible_executable + - name: ansible_lxc_executable +""" import os import shutil diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index 0e784b85fd..739708eebd 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -7,44 +7,48 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Matt Clay (@mattclay) - name: lxd - short_description: Run tasks in LXD instances via C(lxc) CLI +DOCUMENTATION = r""" +author: Matt Clay (@mattclay) +name: lxd +short_description: Run tasks in LXD instances using C(lxc) CLI +description: + - Run commands or put/fetch files to an existing instance using C(lxc) CLI. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing instance using C(lxc) CLI. - options: - remote_addr: - description: - - Instance (container/VM) identifier. - - Since community.general 8.0.0, a FQDN can be provided; in that case, the first component (the part before C(.)) - is used as the instance identifier. - default: inventory_hostname - vars: - - name: inventory_hostname - - name: ansible_host - - name: ansible_lxd_host - executable: - description: - - Shell to use for execution inside instance. - default: /bin/sh - vars: - - name: ansible_executable - - name: ansible_lxd_executable - remote: - description: - - Name of the LXD remote to use. - default: local - vars: - - name: ansible_lxd_remote - version_added: 2.0.0 - project: - description: - - Name of the LXD project to use. - vars: - - name: ansible_lxd_project - version_added: 2.0.0 -''' + - Instance (container/VM) identifier. + - Since community.general 8.0.0, a FQDN can be provided; in that case, the first component (the part before C(.)) is + used as the instance identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_lxd_host + executable: + description: + - Shell to use for execution inside instance. + type: string + default: /bin/sh + vars: + - name: ansible_executable + - name: ansible_lxd_executable + remote: + description: + - Name of the LXD remote to use. + type: string + default: local + vars: + - name: ansible_lxd_remote + version_added: 2.0.0 + project: + description: + - Name of the LXD project to use. + type: string + vars: + - name: ansible_lxd_project + version_added: 2.0.0 +""" import os from subprocess import Popen, PIPE diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index 25594e952b..c3ad65b18b 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -12,32 +12,33 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: qubes - short_description: Interact with an existing QubesOS AppVM +DOCUMENTATION = r""" +name: qubes +short_description: Interact with an existing QubesOS AppVM +description: + - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools. +author: Kushal Das (@kushaldas) + + +options: + remote_addr: description: - - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools. - - author: Kushal Das (@kushaldas) - - - options: - remote_addr: - description: - - vm name - default: inventory_hostname - vars: - - name: ansible_host - remote_user: - description: - - The user to execute as inside the vm. - default: The *user* account as default in Qubes OS. - vars: - - name: ansible_user + - VM name. + type: string + default: inventory_hostname + vars: + - name: ansible_host + remote_user: + description: + - The user to execute as inside the VM. + type: string + default: The I(user) account as default in Qubes OS. + vars: + - name: ansible_user # keyword: # - name: hosts -''' +""" import subprocess @@ -116,7 +117,7 @@ class Connection(ConnectionBase): rc, stdout, stderr = self._qubes(cmd) - display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr)) + display.vvvvv("STDOUT %r STDERR %r" % (stdout, stderr)) return rc, stdout, stderr def put_file(self, in_path, out_path): diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index 1dbc7296c7..316a15aaa8 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -10,13 +10,13 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Michael Scherer (@mscherer) - name: saltstack - short_description: Allow ansible to piggyback on salt minions - description: - - This allows you to use existing Saltstack infrastructure to connect to targets. -''' +DOCUMENTATION = r""" +author: Michael Scherer (@mscherer) +name: saltstack +short_description: Allow ansible to piggyback on salt minions +description: + - This allows you to use existing Saltstack infrastructure to connect to targets. +""" import os import base64 diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index 34827c7e37..ffddad0d46 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -11,21 +11,22 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Ansible Core Team - name: zone - short_description: Run tasks in a zone instance +DOCUMENTATION = r""" +author: Ansible Core Team +name: zone +short_description: Run tasks in a zone instance +description: + - Run commands or put/fetch files to an existing zone. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing zone - options: - remote_addr: - description: - - Zone identifier - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_zone_host -''' + - Zone identifier. + type: string + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_zone_host +""" import os import os.path diff --git a/plugins/doc_fragments/django.py b/plugins/doc_fragments/django.py index d92799937d..f89ec91448 100644 --- a/plugins/doc_fragments/django.py +++ b/plugins/doc_fragments/django.py @@ -51,3 +51,12 @@ seealso: Please make sure that you select the right version of Django in the version selector on that page. link: https://docs.djangoproject.com/en/5.0/ref/django-admin/ ''' + + DATABASE = r''' +options: + database: + description: + - Specify the database to be used. + type: str + default: default +''' diff --git a/plugins/doc_fragments/onepassword.py b/plugins/doc_fragments/onepassword.py index 4035f81796..a67c9e4dc1 100644 --- a/plugins/doc_fragments/onepassword.py +++ b/plugins/doc_fragments/onepassword.py @@ -9,7 +9,7 @@ __metaclass__ = type class ModuleDocFragment(object): - DOCUMENTATION = r''' + DOCUMENTATION = r""" requirements: - See U(https://support.1password.com/command-line/) options: @@ -18,7 +18,8 @@ options: aliases: ['vault_password'] type: str section: - description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. + description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from + any section. domain: description: Domain of 1Password. default: '1password.com' @@ -55,25 +56,25 @@ options: env: - name: OP_CONNECT_TOKEN version_added: 8.1.0 -''' +""" - LOOKUP = r''' + LOOKUP = r""" options: service_account_token: env: - name: OP_SERVICE_ACCOUNT_TOKEN version_added: 8.2.0 notes: - - This lookup will use an existing 1Password session if one exists. If not, and you have already - performed an initial sign in (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the - O(master_password) is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op). + - This lookup will use an existing 1Password session if one exists. If not, and you have already performed an initial sign + in (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the O(master_password) + is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain will be used + by C(op). - This lookup can perform an initial login by providing O(subdomain), O(username), O(secret_key), and O(master_password). - Can target a specific account by providing the O(account_id). - - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials - needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength - to the 1Password master password. - - This lookup stores potentially sensitive data from 1Password as Ansible facts. - Facts are subject to caching if enabled, which means this data could be stored in clear text - on disk or in a database. + - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal + credentials needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or + greater in strength to the 1Password master password. + - This lookup stores potentially sensitive data from 1Password as Ansible facts. Facts are subject to caching if enabled, + which means this data could be stored in clear text on disk or in a database. - Tested with C(op) version 2.7.2. -''' +""" diff --git a/plugins/doc_fragments/oneview.py b/plugins/doc_fragments/oneview.py index a88226d7d7..3caabe4512 100644 --- a/plugins/doc_fragments/oneview.py +++ b/plugins/doc_fragments/oneview.py @@ -11,70 +11,67 @@ __metaclass__ = type class ModuleDocFragment(object): # OneView doc fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - config: - description: - - Path to a JSON configuration file containing the OneView client configuration. - The configuration file is optional and when used should be present in the host running the ansible commands. - If the file path is not provided, the configuration will be loaded from environment variables. - For links to example configuration files or how to use the environment variables verify the notes section. - type: path - api_version: - description: - - OneView API Version. - type: int - image_streamer_hostname: - description: - - IP address or hostname for the HPE Image Streamer REST API. - type: str - hostname: - description: - - IP address or hostname for the appliance. - type: str - username: - description: - - Username for API authentication. - type: str - password: - description: - - Password for API authentication. - type: str + config: + description: + - Path to a JSON configuration file containing the OneView client configuration. The configuration file is optional + and when used should be present in the host running the ansible commands. If the file path is not provided, the configuration + will be loaded from environment variables. For links to example configuration files or how to use the environment + variables verify the notes section. + type: path + api_version: + description: + - OneView API Version. + type: int + image_streamer_hostname: + description: + - IP address or hostname for the HPE Image Streamer REST API. + type: str + hostname: + description: + - IP address or hostname for the appliance. + type: str + username: + description: + - Username for API authentication. + type: str + password: + description: + - Password for API authentication. + type: str requirements: - Python >= 2.7.9 notes: - - "A sample configuration file for the config parameter can be found at: - U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)" - - "Check how to use environment variables for configuration at: - U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)" - - "Additional Playbooks for the HPE OneView Ansible modules can be found at: - U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)" - - "The OneView API version used will directly affect returned and expected fields in resources. - Information on setting the desired API version and can be found at: - U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)" - ''' + - 'A sample configuration file for the config parameter can be found at: + U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json).' + - 'Check how to use environment variables for configuration at: U(https://github.com/HewlettPackard/oneview-ansible#environment-variables).' + - 'Additional Playbooks for the HPE OneView Ansible modules can be found at: U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples).' + - 'The OneView API version used will directly affect returned and expected fields in resources. Information on setting the + desired API version and can be found at: U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version).' +""" - VALIDATEETAG = r''' + VALIDATEETAG = r""" options: - validate_etag: - description: - - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag - for the resource matches the ETag provided in the data. - type: bool - default: true -''' + validate_etag: + description: + - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag for the + resource matches the ETag provided in the data. + type: bool + default: true +""" - FACTSPARAMS = r''' + FACTSPARAMS = r""" options: - params: - description: - - List of parameters to delimit, filter and sort the list of resources. - - "Parameter keys allowed are:" - - "C(start): The first item to return, using 0-based indexing." - - "C(count): The number of resources to return." - - "C(filter): A general filter/query string to narrow the list of items returned." - - "C(sort): The sort order of the returned data set." - type: dict -''' + params: + description: + - List of parameters to delimit, filter and sort the list of resources. + - 'Parameter keys allowed are:' + - 'V(start): The first item to return, using 0-based indexing.' + - 'V(count): The number of resources to return.' + - 'V(filter): A general filter/query string to narrow the list of items returned.' + - 'V(sort): The sort order of the returned data set.' + type: dict +""" diff --git a/plugins/doc_fragments/online.py b/plugins/doc_fragments/online.py index 37e39cfa26..0149872b0a 100644 --- a/plugins/doc_fragments/online.py +++ b/plugins/doc_fragments/online.py @@ -10,26 +10,26 @@ __metaclass__ = type class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_token: description: - Online OAuth token. type: str required: true - aliases: [ oauth_token ] + aliases: [oauth_token] api_url: description: - Online API URL. type: str default: 'https://api.online.net' - aliases: [ base_url ] + aliases: [base_url] api_timeout: description: - HTTP timeout to Online API in seconds. type: int default: 30 - aliases: [ timeout ] + aliases: [timeout] validate_certs: description: - Validate SSL certs of the Online API. @@ -37,9 +37,7 @@ options: default: true notes: - Also see the API documentation on U(https://console.online.net/en/api/). - - If O(api_token) is not set within the module, the following - environment variables can be used in decreasing order of precedence + - If O(api_token) is not set within the module, the following environment variables can be used in decreasing order of precedence E(ONLINE_TOKEN), E(ONLINE_API_KEY), E(ONLINE_OAUTH_TOKEN), E(ONLINE_API_TOKEN). - - If one wants to use a different O(api_url) one can also set the E(ONLINE_API_URL) - environment variable. -''' + - If one wants to use a different O(api_url) one can also set the E(ONLINE_API_URL) environment variable. +""" diff --git a/plugins/doc_fragments/opennebula.py b/plugins/doc_fragments/opennebula.py index 567faf1a77..381f52c272 100644 --- a/plugins/doc_fragments/opennebula.py +++ b/plugins/doc_fragments/opennebula.py @@ -10,36 +10,36 @@ __metaclass__ = type class ModuleDocFragment(object): # OpenNebula common documentation - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - api_url: - description: - - The ENDPOINT URL of the XMLRPC server. - - If not specified then the value of the E(ONE_URL) environment variable, if any, is used. - type: str - aliases: - - api_endpoint - api_username: - description: - - The name of the user for XMLRPC authentication. - - If not specified then the value of the E(ONE_USERNAME) environment variable, if any, is used. - type: str - api_password: - description: - - The password or token for XMLRPC authentication. - - If not specified then the value of the E(ONE_PASSWORD) environment variable, if any, is used. - type: str - aliases: - - api_token - validate_certs: - description: - - Whether to validate the TLS/SSL certificates or not. - - This parameter is ignored if E(PYTHONHTTPSVERIFY) environment variable is used. - type: bool - default: true - wait_timeout: - description: - - Time to wait for the desired state to be reached before timeout, in seconds. - type: int - default: 300 -''' + api_url: + description: + - The ENDPOINT URL of the XMLRPC server. + - If not specified then the value of the E(ONE_URL) environment variable, if any, is used. + type: str + aliases: + - api_endpoint + api_username: + description: + - The name of the user for XMLRPC authentication. + - If not specified then the value of the E(ONE_USERNAME) environment variable, if any, is used. + type: str + api_password: + description: + - The password or token for XMLRPC authentication. + - If not specified then the value of the E(ONE_PASSWORD) environment variable, if any, is used. + type: str + aliases: + - api_token + validate_certs: + description: + - Whether to validate the TLS/SSL certificates or not. + - This parameter is ignored if E(PYTHONHTTPSVERIFY) environment variable is used. + type: bool + default: true + wait_timeout: + description: + - Time to wait for the desired state to be reached before timeout, in seconds. + type: int + default: 300 +""" diff --git a/plugins/doc_fragments/openswitch.py b/plugins/doc_fragments/openswitch.py index a203a3b409..f0e9e87c3d 100644 --- a/plugins/doc_fragments/openswitch.py +++ b/plugins/doc_fragments/openswitch.py @@ -11,75 +11,62 @@ __metaclass__ = type class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: host: description: - - Specifies the DNS host name or address for connecting to the remote - device over the specified transport. The value of host is used as - the destination address for the transport. Note this argument - does not affect the SSH argument. + - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value + of host is used as the destination address for the transport. Note this argument does not affect the SSH argument. type: str port: description: - - Specifies the port to use when building the connection to the remote - device. This value applies to either O(transport=cli) or O(transport=rest). The port - value will default to the appropriate transport common port if - none is provided in the task. (cli=22, http=80, https=443). Note - this argument does not affect the SSH transport. + - Specifies the port to use when building the connection to the remote device. This value applies to either O(transport=cli) + or O(transport=rest). The port value will default to the appropriate transport common port if none is provided in + the task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport. type: int default: 0 (use common port) username: description: - - Configures the username to use to authenticate the connection to - the remote device. This value is used to authenticate - either the CLI login or the eAPI authentication depending on which - transport is used. Note this argument does not affect the SSH - transport. If the value is not specified in the task, the value of - environment variable E(ANSIBLE_NET_USERNAME) will be used instead. + - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate + either the CLI login or the eAPI authentication depending on which transport is used. Note this argument does not + affect the SSH transport. If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_USERNAME) + will be used instead. type: str password: description: - - Specifies the password to use to authenticate the connection to - the remote device. This is a common argument used for either O(transport=cli) - or O(transport=rest). Note this argument does not affect the SSH - transport. If the value is not specified in the task, the value of - environment variable E(ANSIBLE_NET_PASSWORD) will be used instead. + - Specifies the password to use to authenticate the connection to the remote device. This is a common argument used + for either O(transport=cli) or O(transport=rest). Note this argument does not affect the SSH transport. If the value + is not specified in the task, the value of environment variable E(ANSIBLE_NET_PASSWORD) will be used instead. type: str timeout: description: - - Specifies the timeout in seconds for communicating with the network device - for either connecting or sending commands. If the timeout is - exceeded before the operation is completed, the module will error. + - Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands. + If the timeout is exceeded before the operation is completed, the module will error. type: int default: 10 ssh_keyfile: description: - - Specifies the SSH key to use to authenticate the connection to - the remote device. This argument is only used for O(transport=cli). - If the value is not specified in the task, the value of - environment variable E(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + - Specifies the SSH key to use to authenticate the connection to the remote device. This argument is only used for O(transport=cli). + If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_SSH_KEYFILE) will be used + instead. type: path transport: description: - - Configures the transport connection to use when connecting to the - remote device. The transport argument supports connectivity to the - device over SSH (V(ssh)), CLI (V(cli)), or REST (V(rest)). + - Configures the transport connection to use when connecting to the remote device. The transport argument supports connectivity + to the device over SSH (V(ssh)), CLI (V(cli)), or REST (V(rest)). required: true type: str - choices: [ cli, rest, ssh ] + choices: [cli, rest, ssh] default: ssh use_ssl: description: - - Configures the O(transport) to use SSL if set to V(true) only when the - O(transport) argument is configured as rest. If the transport - argument is not V(rest), this value is ignored. + - Configures the O(transport) to use SSL if set to V(true) only when the O(transport) argument is configured as rest. + If the transport argument is not V(rest), this value is ignored. type: bool default: true provider: description: - - Convenience method that allows all C(openswitch) arguments to be passed as - a dict object. All constraints (required, choices, etc) must be - met either by individual arguments or values in this dict. + - Convenience method that allows all C(openswitch) arguments to be passed as a dict object. All constraints (required, + choices, and so on) must be met either by individual arguments or values in this dict. type: dict -''' +""" diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py index ff0ed2fd5b..f657af407b 100644 --- a/plugins/doc_fragments/oracle.py +++ b/plugins/doc_fragments/oracle.py @@ -8,76 +8,68 @@ __metaclass__ = type class ModuleDocFragment(object): - DOCUMENTATION = """ - requirements: - - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io) - notes: - - For OCI Python SDK configuration, please refer to - U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html). - options: - config_file_location: - description: - - Path to configuration file. If not set then the value of the E(OCI_CONFIG_FILE) environment variable, - if any, is used. Otherwise, defaults to C(~/.oci/config). - type: str - config_profile_name: - description: - - The profile to load from the config file referenced by O(config_file_location). If not set, then the - value of the E(OCI_CONFIG_PROFILE) environment variable, if any, is used. Otherwise, defaults to the - C(DEFAULT) profile in O(config_file_location). - default: "DEFAULT" - type: str - api_user: - description: - - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the - value of the E(OCI_USER_OCID) environment variable, if any, is used. This option is required if the user - is not specified through a configuration file (See O(config_file_location)). To get the user's OCID, - please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_fingerprint: - description: - - Fingerprint for the key pair being used. If not set, then the value of the E(OCI_USER_FINGERPRINT) - environment variable, if any, is used. This option is required if the key fingerprint is not - specified through a configuration file (See O(config_file_location)). To get the key pair's - fingerprint value please refer - U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_key_file: - description: - - Full path and filename of the private key (in PEM format). If not set, then the value of the - OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is - not specified through a configuration file (See O(config_file_location)). If the key is encrypted - with a pass-phrase, the O(api_user_key_pass_phrase) option must also be provided. - type: path - api_user_key_pass_phrase: - description: - - Passphrase used by the key referenced in O(api_user_key_file), if it is encrypted. If not set, then - the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the - key passphrase is not specified through a configuration file (See O(config_file_location)). - type: str - auth_type: - description: - - The type of authentication to use for making API requests. By default O(auth_type=api_key) based - authentication is performed and the API key (see O(api_user_key_file)) in your config file will be - used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE, - if any, is used. Use O(auth_type=instance_principal) to use instance principal based authentication - when running ansible playbooks within an OCI compute instance. - choices: ['api_key', 'instance_principal'] - default: 'api_key' - type: str - tenancy: - description: - - OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is - used. This option is required if the tenancy OCID is not specified through a configuration file - (See O(config_file_location)). To get the tenancy OCID, please refer to - U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - region: - description: - - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the - value of the OCI_REGION variable, if any, is used. This option is required if the region is - not specified through a configuration file (See O(config_file_location)). Please refer to - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information - on OCI regions. - type: str - """ + DOCUMENTATION = r""" +requirements: + - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io) +notes: + - For OCI Python SDK configuration, please refer to U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html). +options: + config_file_location: + description: + - Path to configuration file. If not set then the value of the E(OCI_CONFIG_FILE) environment variable, if any, is used. + Otherwise, defaults to C(~/.oci/config). + type: str + config_profile_name: + description: + - The profile to load from the config file referenced by O(config_file_location). If not set, then the value of the + E(OCI_CONFIG_PROFILE) environment variable, if any, is used. Otherwise, defaults to the C(DEFAULT) profile in O(config_file_location). + default: "DEFAULT" + type: str + api_user: + description: + - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the value of the E(OCI_USER_OCID) environment + variable, if any, is used. This option is required if the user is not specified through a configuration file (See + O(config_file_location)). To get the user's OCID, please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + api_user_fingerprint: + description: + - Fingerprint for the key pair being used. If not set, then the value of the E(OCI_USER_FINGERPRINT) environment variable, + if any, is used. This option is required if the key fingerprint is not specified through a configuration file (See + O(config_file_location)). To get the key pair's fingerprint value please refer to + U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + api_user_key_file: + description: + - Full path and filename of the private key (in PEM format). If not set, then the value of the E(OCI_USER_KEY_FILE) variable, + if any, is used. This option is required if the private key is not specified through a configuration file (See O(config_file_location)). + If the key is encrypted with a pass-phrase, the O(api_user_key_pass_phrase) option must also be provided. + type: path + api_user_key_pass_phrase: + description: + - Passphrase used by the key referenced in O(api_user_key_file), if it is encrypted. If not set, then the value of the + E(OCI_USER_KEY_PASS_PHRASE) variable, if any, is used. This option is required if the key passphrase is not specified + through a configuration file (See O(config_file_location)). + type: str + auth_type: + description: + - The type of authentication to use for making API requests. By default O(auth_type=api_key) based authentication is + performed and the API key (see O(api_user_key_file)) in your config file will be used. If this 'auth_type' module + option is not specified, the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal) + to use instance principal based authentication when running ansible playbooks within an OCI compute instance. + choices: ['api_key', 'instance_principal'] + default: 'api_key' + type: str + tenancy: + description: + - OCID of your tenancy. If not set, then the value of the E(OCI_TENANCY) variable, if any, is used. This option is required + if the tenancy OCID is not specified through a configuration file (See O(config_file_location)). To get the tenancy + OCID, please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + region: + description: + - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the value of the E(OCI_REGION) + variable, if any, is used. This option is required if the region is not specified through a configuration file (See + O(config_file_location)). Please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) + for more information on OCI regions. + type: str +""" diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py index 9d2cc07c9f..be0c931db4 100644 --- a/plugins/doc_fragments/oracle_creatable_resource.py +++ b/plugins/doc_fragments/oracle_creatable_resource.py @@ -8,19 +8,18 @@ __metaclass__ = type class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - force_create: - description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an - idempotent operation, and doesn't create the resource if it already exists. Setting this option - to true, forcefully creates a copy of the resource, even if it already exists.This option is - mutually exclusive with O(key_by). - default: false - type: bool - key_by: - description: The list of comma-separated attributes of this resource which should be used to uniquely - identify an instance of the resource. By default, all the attributes of a resource except - O(freeform_tags) are used to uniquely identify a resource. - type: list - elements: str - """ + DOCUMENTATION = r""" +options: + force_create: + description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an idempotent operation, + and does not create the resource if it already exists. Setting this option to V(true), forcefully creates a copy of the + resource, even if it already exists. This option is mutually exclusive with O(key_by). + default: false + type: bool + key_by: + description: The list of comma-separated attributes of this resource which should be used to uniquely identify an instance + of the resource. By default, all the attributes of a resource except O(freeform_tags) are used to uniquely identify + a resource. + type: list + elements: str +""" diff --git a/plugins/doc_fragments/oracle_display_name_option.py b/plugins/doc_fragments/oracle_display_name_option.py index b6bc0f2297..ab219352e9 100644 --- a/plugins/doc_fragments/oracle_display_name_option.py +++ b/plugins/doc_fragments/oracle_display_name_option.py @@ -8,10 +8,10 @@ __metaclass__ = type class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - display_name: - description: Use O(display_name) along with the other options to return only resources that match the given - display name exactly. - type: str - """ + DOCUMENTATION = r""" +options: + display_name: + description: Use O(display_name) along with the other options to return only resources that match the given display name + exactly. + type: str +""" diff --git a/plugins/doc_fragments/oracle_name_option.py b/plugins/doc_fragments/oracle_name_option.py index 523eed702f..5d5c97ef65 100644 --- a/plugins/doc_fragments/oracle_name_option.py +++ b/plugins/doc_fragments/oracle_name_option.py @@ -8,10 +8,9 @@ __metaclass__ = type class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - name: - description: Use O(name) along with the other options to return only resources that match the given name - exactly. - type: str - """ + DOCUMENTATION = r""" +options: + name: + description: Use O(name) along with the other options to return only resources that match the given name exactly. + type: str +""" diff --git a/plugins/doc_fragments/oracle_tags.py b/plugins/doc_fragments/oracle_tags.py index 3789dbe912..9cd35f9c7e 100644 --- a/plugins/doc_fragments/oracle_tags.py +++ b/plugins/doc_fragments/oracle_tags.py @@ -8,16 +8,14 @@ __metaclass__ = type class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - defined_tags: - description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more - information, see - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). - type: dict - freeform_tags: - description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, - type, or namespace. For more information, see - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). - type: dict - """ + DOCUMENTATION = r""" +options: + defined_tags: + description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see + U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). + type: dict + freeform_tags: + description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. + For more information, see U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). + type: dict +""" diff --git a/plugins/doc_fragments/oracle_wait_options.py b/plugins/doc_fragments/oracle_wait_options.py index 0ba2532324..90334711ee 100644 --- a/plugins/doc_fragments/oracle_wait_options.py +++ b/plugins/doc_fragments/oracle_wait_options.py @@ -8,20 +8,19 @@ __metaclass__ = type class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - wait: - description: Whether to wait for create or delete operation to complete. - default: true - type: bool - wait_timeout: - description: Time, in seconds, to wait when O(wait=true). - default: 1200 - type: int - wait_until: - description: The lifecycle state to wait for the resource to transition into when O(wait=true). By default, - when O(wait=true), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ - RUNNING applicable lifecycle state during create operation and to get into DELETED/DETACHED/ - TERMINATED lifecycle state during delete operation. - type: str - """ + DOCUMENTATION = r""" +options: + wait: + description: Whether to wait for create or delete operation to complete. + default: true + type: bool + wait_timeout: + description: Time, in seconds, to wait when O(wait=true). + default: 1200 + type: int + wait_until: + description: The lifecycle state to wait for the resource to transition into when O(wait=true). By default, when O(wait=true), + we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ RUNNING applicable lifecycle state during + create operation and to get into DELETED/DETACHED/ TERMINATED lifecycle state during delete operation. + type: str +""" diff --git a/plugins/doc_fragments/pipx.py b/plugins/doc_fragments/pipx.py new file mode 100644 index 0000000000..b94495d4a1 --- /dev/null +++ b/plugins/doc_fragments/pipx.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + DOCUMENTATION = r""" +options: + global: + description: + - The module will pass the C(--global) argument to C(pipx), to execute actions in global scope. + - The C(--global) is only available in C(pipx>=1.6.0), so make sure to have a compatible version when using this option. + Moreover, a nasty bug with C(--global) was fixed in C(pipx==1.7.0), so it is strongly recommended you used that version + or newer. + type: bool + default: false + executable: + description: + - Path to the C(pipx) installed in the system. + - If not specified, the module will use C(python -m pipx) to run the tool, using the same Python interpreter as ansible + itself. + type: path +notes: + - This module requires C(pipx) version 0.16.2.1 or above. From community.general 11.0.0 onwards, the module will require + C(pipx>=1.7.0). + - Please note that C(pipx) requires Python 3.6 or above. + - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). + - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. + - This module will honor C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed + using the R(environment Ansible keyword, playbooks_environment). +seealso: + - name: C(pipx) command manual page + description: Manual page for the command. + link: https://pipx.pypa.io/latest/docs/ +""" diff --git a/plugins/doc_fragments/pritunl.py b/plugins/doc_fragments/pritunl.py index 396ee0866a..287204c16c 100644 --- a/plugins/doc_fragments/pritunl.py +++ b/plugins/doc_fragments/pritunl.py @@ -13,32 +13,28 @@ class ModuleDocFragment(object): DOCUMENTATION = r""" options: - pritunl_url: - type: str - required: true - description: - - URL and port of the Pritunl server on which the API is enabled. - - pritunl_api_token: - type: str - required: true - description: - - API Token of a Pritunl admin user. - - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication. - - pritunl_api_secret: - type: str - required: true - description: - - API Secret found in Administrators > USERNAME > API Secret. - - validate_certs: - type: bool - required: false - default: true - description: - - If certificates should be validated or not. - - This should never be set to V(false), except if you are very sure that - your connection to the server can not be subject to a Man In The Middle - attack. + pritunl_url: + type: str + required: true + description: + - URL and port of the Pritunl server on which the API is enabled. + pritunl_api_token: + type: str + required: true + description: + - API Token of a Pritunl admin user. + - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication. + pritunl_api_secret: + type: str + required: true + description: + - API Secret found in Administrators > USERNAME > API Secret. + validate_certs: + type: bool + required: false + default: true + description: + - If certificates should be validated or not. + - This should never be set to V(false), except if you are very sure that your connection to the server can not be subject + to a Man In The Middle attack. """ diff --git a/plugins/doc_fragments/proxmox.py b/plugins/doc_fragments/proxmox.py index cb533fefa6..4641c36d3e 100644 --- a/plugins/doc_fragments/proxmox.py +++ b/plugins/doc_fragments/proxmox.py @@ -9,13 +9,20 @@ __metaclass__ = type class ModuleDocFragment(object): # Common parameters for Proxmox VE modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_host: description: - Specify the target host of the Proxmox VE cluster. type: str required: true + api_port: + description: + - Specify the target port of the Proxmox VE cluster. + - Uses the E(PROXMOX_PORT) environment variable if not specified. + type: int + required: false + version_added: 9.1.0 api_user: description: - Specify the user to authenticate with. @@ -44,10 +51,10 @@ options: - This should only be used on personally controlled sites using self-signed certificates. type: bool default: false -requirements: [ "proxmoxer", "requests" ] -''' +requirements: ["proxmoxer", "requests"] +""" - SELECTION = r''' + SELECTION = r""" options: vmid: description: @@ -64,7 +71,7 @@ options: description: - Add the new VM to the specified pool. type: str -''' +""" ACTIONGROUP_PROXMOX = r""" options: {} diff --git a/plugins/doc_fragments/purestorage.py b/plugins/doc_fragments/purestorage.py index 823397763f..7c42a4fec2 100644 --- a/plugins/doc_fragments/purestorage.py +++ b/plugins/doc_fragments/purestorage.py @@ -11,17 +11,17 @@ __metaclass__ = type class ModuleDocFragment(object): # Standard Pure Storage documentation fragment - DOCUMENTATION = r''' -options: - - See separate platform section for more details + DOCUMENTATION = r""" +options: {} +# See separate platform section for more details requirements: - See separate platform section for more details notes: - - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade -''' + - 'Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade.' +""" # Documentation fragment for FlashBlade - FB = r''' + FB = r""" options: fb_url: description: @@ -33,14 +33,14 @@ options: type: str notes: - This module requires the C(purity_fb) Python library. - - You must set E(PUREFB_URL) and E(PUREFB_API) environment variables - if O(fb_url) and O(api_token) arguments are not passed to the module directly. + - You must set E(PUREFB_URL) and E(PUREFB_API) environment variables if O(fb_url) and O(api_token) arguments are not passed + to the module directly. requirements: - purity_fb >= 1.1 -''' +""" # Documentation fragment for FlashArray - FA = r''' + FA = r""" options: fa_url: description: @@ -54,8 +54,8 @@ options: required: true notes: - This module requires the C(purestorage) Python library. - - You must set E(PUREFA_URL) and E(PUREFA_API) environment variables - if O(fa_url) and O(api_token) arguments are not passed to the module directly. + - You must set E(PUREFA_URL) and E(PUREFA_API) environment variables if O(fa_url) and O(api_token) arguments are not passed + to the module directly. requirements: - purestorage -''' +""" diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py index fafb52c86c..149c018d79 100644 --- a/plugins/doc_fragments/redis.py +++ b/plugins/doc_fragments/redis.py @@ -10,7 +10,7 @@ __metaclass__ = type class ModuleDocFragment(object): # Common parameters for Redis modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: login_host: description: @@ -40,19 +40,26 @@ options: validate_certs: description: - Specify whether or not to validate TLS certificates. - - This should only be turned off for personally controlled sites or with - C(localhost) as target. + - This should only be turned off for personally controlled sites or with C(localhost) as target. type: bool default: true ca_certs: description: - - Path to root certificates file. If not set and O(tls) is - set to V(true), certifi ca-certificates will be used. + - Path to root certificates file. If not set and O(tls) is set to V(true), certifi ca-certificates will be used. type: str -requirements: [ "redis", "certifi" ] + client_cert_file: + description: + - Path to the client certificate file. + type: str + version_added: 9.3.0 + client_key_file: + description: + - Path to the client private key file. + type: str + version_added: 9.3.0 +requirements: ["redis", "certifi"] notes: - - Requires the C(redis) Python package on the remote host. You can - install it with pip (C(pip install redis)) or with a package manager. - Information on the library can be found at U(https://github.com/andymccurdy/redis-py). -''' + - Requires the C(redis) Python package on the remote host. You can install it with pip (C(pip install redis)) or with a + package manager. Information on the library can be found at U(https://github.com/andymccurdy/redis-py). +""" diff --git a/plugins/doc_fragments/rundeck.py b/plugins/doc_fragments/rundeck.py index 62c8648e96..b3a8e86753 100644 --- a/plugins/doc_fragments/rundeck.py +++ b/plugins/doc_fragments/rundeck.py @@ -11,7 +11,7 @@ __metaclass__ = type class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: url: type: str @@ -29,4 +29,4 @@ options: description: - Rundeck User API Token. required: true -''' +""" diff --git a/plugins/doc_fragments/scaleway.py b/plugins/doc_fragments/scaleway.py index bdb0dd0561..2988865eea 100644 --- a/plugins/doc_fragments/scaleway.py +++ b/plugins/doc_fragments/scaleway.py @@ -11,29 +11,29 @@ __metaclass__ = type class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_token: description: - Scaleway OAuth token. type: str required: true - aliases: [ oauth_token ] + aliases: [oauth_token] api_url: description: - Scaleway API URL. type: str default: https://api.scaleway.com - aliases: [ base_url ] + aliases: [base_url] api_timeout: description: - HTTP timeout to Scaleway API in seconds. type: int default: 30 - aliases: [ timeout ] + aliases: [timeout] query_parameters: description: - - List of parameters passed to the query string. + - List of parameters passed to the query string. type: dict default: {} validate_certs: @@ -43,9 +43,7 @@ options: default: true notes: - Also see the API documentation on U(https://developer.scaleway.com/). - - If O(api_token) is not set within the module, the following - environment variables can be used in decreasing order of precedence + - If O(api_token) is not set within the module, the following environment variables can be used in decreasing order of precedence E(SCW_TOKEN), E(SCW_API_KEY), E(SCW_OAUTH_TOKEN) or E(SCW_API_TOKEN). - - If one wants to use a different O(api_url) one can also set the E(SCW_API_URL) - environment variable. -''' + - If one wants to use a different O(api_url) one can also set the E(SCW_API_URL) environment variable. +""" diff --git a/plugins/doc_fragments/scaleway_waitable_resource.py b/plugins/doc_fragments/scaleway_waitable_resource.py index 3ab5c7d6f4..f529d8f5c2 100644 --- a/plugins/doc_fragments/scaleway_waitable_resource.py +++ b/plugins/doc_fragments/scaleway_waitable_resource.py @@ -11,23 +11,23 @@ __metaclass__ = type class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: wait: description: - - Wait for the resource to reach its desired state before returning. + - Wait for the resource to reach its desired state before returning. type: bool default: true wait_timeout: type: int description: - - Time to wait for the resource to reach the expected state. + - Time to wait for the resource to reach the expected state. required: false default: 300 wait_sleep_time: type: int description: - - Time to wait before every attempt to check the state of the resource. + - Time to wait before every attempt to check the state of the resource. required: false default: 3 -''' +""" diff --git a/plugins/doc_fragments/utm.py b/plugins/doc_fragments/utm.py index 3e0bc6e10c..f6954a1917 100644 --- a/plugins/doc_fragments/utm.py +++ b/plugins/doc_fragments/utm.py @@ -9,49 +9,48 @@ __metaclass__ = type class ModuleDocFragment(object): - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - headers: - description: - - A dictionary of additional headers to be sent to POST and PUT requests. - - Is needed for some modules. - type: dict - required: false - default: {} - utm_host: - description: - - The REST Endpoint of the Sophos UTM. - type: str - required: true - utm_port: - description: - - The port of the REST interface. - type: int - default: 4444 - utm_token: - description: - - "The token used to identify at the REST-API. See - U(https://www.sophos.com/en-us/medialibrary/PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), - Chapter 2.4.2." - type: str - required: true - utm_protocol: - description: - - The protocol of the REST Endpoint. - choices: [ http, https ] - type: str - default: https - validate_certs: - description: - - Whether the REST interface's ssl certificate should be verified or not. - type: bool - default: true - state: - description: - - The desired state of the object. - - V(present) will create or update an object. - - V(absent) will delete an object if it was present. - type: str - choices: [ absent, present ] - default: present -''' + headers: + description: + - A dictionary of additional headers to be sent to POST and PUT requests. + - Is needed for some modules. + type: dict + required: false + default: {} + utm_host: + description: + - The REST Endpoint of the Sophos UTM. + type: str + required: true + utm_port: + description: + - The port of the REST interface. + type: int + default: 4444 + utm_token: + description: + - The token used to identify at the REST-API. + - See U(https://www.sophos.com/en-us/medialibrary/PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2. + type: str + required: true + utm_protocol: + description: + - The protocol of the REST Endpoint. + choices: [http, https] + type: str + default: https + validate_certs: + description: + - Whether the REST interface's SSL certificate should be verified or not. + type: bool + default: true + state: + description: + - The desired state of the object. + - V(present) will create or update an object. + - V(absent) will delete an object if it was present. + type: str + choices: [absent, present] + default: present +""" diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py index 041f404d28..257a4ccd14 100644 --- a/plugins/doc_fragments/vexata.py +++ b/plugins/doc_fragments/vexata.py @@ -10,14 +10,14 @@ __metaclass__ = type class ModuleDocFragment(object): - DOCUMENTATION = r''' -options: - - See respective platform section for more details + DOCUMENTATION = r""" +options: {} +# See respective platform section for more details requirements: - - See respective platform section for more details + - See respective platform section for more details notes: - - Ansible modules are available for Vexata VX100 arrays. -''' + - Ansible modules are available for Vexata VX100 arrays. +""" # Documentation fragment for Vexata VX100 series VX100 = r''' diff --git a/plugins/doc_fragments/xenserver.py b/plugins/doc_fragments/xenserver.py index 681d959faa..d1377e8964 100644 --- a/plugins/doc_fragments/xenserver.py +++ b/plugins/doc_fragments/xenserver.py @@ -10,32 +10,33 @@ __metaclass__ = type class ModuleDocFragment(object): # Common parameters for XenServer modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: hostname: description: - - The hostname or IP address of the XenServer host or XenServer pool master. - - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) will be used instead. + - The hostname or IP address of the XenServer host or XenServer pool master. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) will be used instead. type: str default: localhost - aliases: [ host, pool ] + aliases: [host, pool] username: description: - - The username to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) will be used instead. + - The username to use for connecting to XenServer. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) will be used instead. type: str default: root - aliases: [ admin, user ] + aliases: [admin, user] password: description: - - The password to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) will be used instead. + - The password to use for connecting to XenServer. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) will be used instead. type: str - aliases: [ pass, pwd ] + aliases: [pass, pwd] validate_certs: description: - - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted. - - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) will be used instead. + - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) will be used + instead. type: bool default: true -''' +""" diff --git a/plugins/filter/dict.py b/plugins/filter/dict.py index 720c9def96..3e0558bb61 100644 --- a/plugins/filter/dict.py +++ b/plugins/filter/dict.py @@ -57,8 +57,8 @@ EXAMPLES = ''' RETURN = ''' _value: - description: The dictionary having the provided key-value pairs. - type: boolean + description: A dictionary with the provided key-value pairs. + type: dictionary ''' diff --git a/plugins/filter/groupby_as_dict.py b/plugins/filter/groupby_as_dict.py index 4a8f4c6dc1..8e29c5863c 100644 --- a/plugins/filter/groupby_as_dict.py +++ b/plugins/filter/groupby_as_dict.py @@ -13,6 +13,8 @@ DOCUMENTATION = ''' author: Felix Fontein (@felixfontein) description: - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute. + - This filter is similar to the Jinja2 C(groupby) filter. Use the Jinja2 C(groupby) filter if you have multiple entries with the same value, + or when you need a dictionary with list values, or when you need to use deeply nested attributes. positional: attribute options: _input: diff --git a/plugins/filter/hashids.py b/plugins/filter/hashids.py index 45fba83c03..ac771e6219 100644 --- a/plugins/filter/hashids.py +++ b/plugins/filter/hashids.py @@ -27,7 +27,7 @@ def initialize_hashids(**kwargs): if not HAS_HASHIDS: raise AnsibleError("The hashids library must be installed in order to use this plugin") - params = dict((k, v) for k, v in kwargs.items() if v) + params = {k: v for k, v in kwargs.items() if v} try: return Hashids(**params) diff --git a/plugins/filter/keep_keys.py b/plugins/filter/keep_keys.py new file mode 100644 index 0000000000..97b706a950 --- /dev/null +++ b/plugins/filter/keep_keys.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: keep_keys + short_description: Keep specific keys from dictionaries in a list + version_added: "9.1.0" + author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) + description: This filter keeps only specified keys from a provided list of dictionaries. + options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to keep, or a list of keys or keys patterns to keep. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +''' + +EXAMPLES = ''' + l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default match keys that equal any of the items in the target. + t: [k0_x0, k1_x1] + r: "{{ l | community.general.keep_keys(target=t) }}" + + # 2) Match keys that start with any of the items in the target. + t: [k0, k1] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Match keys that end with any of the items in target. + t: [x0, x1] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Match keys by the regex. + t: ['^.*[01]_x.*$'] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # 5) Match keys by the regex. + t: '^.*[01]_x.*$' + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-5 are all the same. + r: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + # 6) By default match keys that equal the target. + t: k0_x0 + r: "{{ l | community.general.keep_keys(target=t) }}" + + # 7) Match keys that start with the target. + t: k0 + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" + + # 8) Match keys that end with the target. + t: x0 + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" + + # 9) Match keys by the regex. + t: '^.*0_x.*$' + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 6-9 are all the same. + r: + - {k0_x0: A0} + - {k0_x0: A1} +''' + +RETURN = ''' + _value: + description: The list of dictionaries with selected keys. + type: list + elements: dictionary +''' + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_str) + + +def keep_keys(data, target=None, matching_parameter='equal'): + """keep specific keys from dictionaries in a list""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tt = _keys_filter_target_str(target, matching_parameter) + + if matching_parameter == 'equal': + def keep_key(key): + return key in tt + elif matching_parameter == 'starts_with': + def keep_key(key): + return key.startswith(tt) + elif matching_parameter == 'ends_with': + def keep_key(key): + return key.endswith(tt) + elif matching_parameter == 'regex': + def keep_key(key): + return tt.match(key) is not None + + return [{k: v for k, v in d.items() if keep_key(k)} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'keep_keys': keep_keys, + } diff --git a/plugins/filter/lists_mergeby.py b/plugins/filter/lists_mergeby.py index caf183492c..0e47d50172 100644 --- a/plugins/filter/lists_mergeby.py +++ b/plugins/filter/lists_mergeby.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2020-2022, Vladimir Botka +# Copyright (c) 2020-2024, Vladimir Botka # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -12,22 +12,32 @@ DOCUMENTATION = ''' version_added: 2.0.0 author: Vladimir Botka (@vbotka) description: - - Merge two or more lists by attribute O(index). Optional parameters O(recursive) and O(list_merge) - control the merging of the lists in values. The function merge_hash from ansible.utils.vars - is used. To learn details on how to use the parameters O(recursive) and O(list_merge) see - Ansible User's Guide chapter "Using filters to manipulate data" section "Combining - hashes/dictionaries". + - Merge two or more lists by attribute O(index). Optional + parameters O(recursive) and O(list_merge) control the merging of + the nested dictionaries and lists. + - The function C(merge_hash) from C(ansible.utils.vars) is used. + - To learn details on how to use the parameters O(recursive) and + O(list_merge) see Ansible User's Guide chapter "Using filters to + manipulate data" section R(Combining hashes/dictionaries, combine_filter) or the + filter P(ansible.builtin.combine#filter). + positional: another_list, index options: _input: - description: A list of dictionaries. + description: + - A list of dictionaries, or a list of lists of dictionaries. + - The required type of the C(elements) is set to C(raw) + because all elements of O(_input) can be either dictionaries + or lists. type: list - elements: dictionary + elements: raw required: true another_list: - description: Another list of dictionaries. This parameter can be specified multiple times. + description: + - Another list of dictionaries, or a list of lists of dictionaries. + - This parameter can be specified multiple times. type: list - elements: dictionary + elements: raw index: description: - The dictionary key that must be present in every dictionary in every list that is used to @@ -55,40 +65,134 @@ DOCUMENTATION = ''' ''' EXAMPLES = ''' -- name: Merge two lists +# Some results below are manually formatted for better readability. The +# dictionaries' keys will be sorted alphabetically in real output. + +- name: Example 1. Merge two lists. The results r1 and r2 are the same. ansible.builtin.debug: - msg: >- - {{ list1 | community.general.lists_mergeby( - list2, - 'index', - recursive=True, - list_merge='append' - ) }}" + msg: | + r1: {{ r1 }} + r2: {{ r2 }} vars: list1: - - index: a - value: 123 - - index: b - value: 42 + - {index: a, value: 123} + - {index: b, value: 4} list2: - - index: a - foo: bar - - index: c - foo: baz - # Produces the following list of dictionaries: - # { - # "index": "a", - # "foo": "bar", - # "value": 123 - # }, - # { - # "index": "b", - # "value": 42 - # }, - # { - # "index": "c", - # "foo": "baz" - # } + - {index: a, foo: bar} + - {index: c, foo: baz} + r1: "{{ list1 | community.general.lists_mergeby(list2, 'index') }}" + r2: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r1: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# r2: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} + +- name: Example 2. Merge three lists + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, value: 123} + - {index: b, value: 4} + list2: + - {index: a, foo: bar} + - {index: c, foo: baz} + list3: + - {index: d, foo: qux} + r: "{{ [list1, list2, list3] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# - {index: d, foo: qux} + +- name: Example 3. Merge single list. The result is the same as 2. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, value: 123} + - {index: b, value: 4} + - {index: a, foo: bar} + - {index: c, foo: baz} + - {index: d, foo: qux} + r: "{{ [list1, []] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# - {index: d, foo: qux} + +- name: Example 4. Merge two lists. By default, replace nested lists. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: [X1, X2]} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: [Y1, Y2]} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: [Y1, Y2]} +# - {index: b, foo: [Y1, Y2]} + +- name: Example 5. Merge two lists. Append nested lists. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: [X1, X2]} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: [Y1, Y2]} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index', list_merge='append') }}" + +# r: +# - {index: a, foo: [X1, X2, Y1, Y2]} +# - {index: b, foo: [X1, X2, Y1, Y2]} + +- name: Example 6. Merge two lists. By default, do not merge nested dictionaries. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: {x: 1, y: 2}} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: {y: 3, z: 4}} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: {y: 3, z: 4}} +# - {index: b, foo: [Y1, Y2]} + +- name: Example 7. Merge two lists. Merge nested dictionaries too. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: {x: 1, y: 2}} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: {y: 3, z: 4}} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index', recursive=true) }}" + +# r: +# - {index: a, foo: {x:1, y: 3, z: 4}} +# - {index: b, foo: [Y1, Y2]} ''' RETURN = ''' @@ -108,13 +212,14 @@ from operator import itemgetter def list_mergeby(x, y, index, recursive=False, list_merge='replace'): - ''' Merge 2 lists by attribute 'index'. The function merge_hash from ansible.utils.vars is used. - This function is used by the function lists_mergeby. + '''Merge 2 lists by attribute 'index'. The function 'merge_hash' + from ansible.utils.vars is used. This function is used by the + function lists_mergeby. ''' d = defaultdict(dict) - for l in (x, y): - for elem in l: + for lst in (x, y): + for elem in lst: if not isinstance(elem, Mapping): msg = "Elements of list arguments for lists_mergeby must be dictionaries. %s is %s" raise AnsibleFilterError(msg % (elem, type(elem))) @@ -124,20 +229,9 @@ def list_mergeby(x, y, index, recursive=False, list_merge='replace'): def lists_mergeby(*terms, **kwargs): - ''' Merge 2 or more lists by attribute 'index'. Optional parameters 'recursive' and 'list_merge' - control the merging of the lists in values. The function merge_hash from ansible.utils.vars - is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see - Ansible User's Guide chapter "Using filters to manipulate data" section "Combining - hashes/dictionaries". - - Example: - - debug: - msg: "{{ list1| - community.general.lists_mergeby(list2, - 'index', - recursive=True, - list_merge='append')| - list }}" + '''Merge 2 or more lists by attribute 'index'. To learn details + on how to use the parameters 'recursive' and 'list_merge' see + the filter ansible.builtin.combine. ''' recursive = kwargs.pop('recursive', False) @@ -155,7 +249,7 @@ def lists_mergeby(*terms, **kwargs): "must be lists. %s is %s") raise AnsibleFilterError(msg % (sublist, type(sublist))) if len(sublist) > 0: - if all(isinstance(l, Sequence) for l in sublist): + if all(isinstance(lst, Sequence) for lst in sublist): for item in sublist: flat_list.append(item) else: diff --git a/plugins/filter/remove_keys.py b/plugins/filter/remove_keys.py new file mode 100644 index 0000000000..7a4d912d34 --- /dev/null +++ b/plugins/filter/remove_keys.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: remove_keys + short_description: Remove specific keys from dictionaries in a list + version_added: "9.1.0" + author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) + description: This filter removes only specified keys from a provided list of dictionaries. + options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to remove, or a list of keys or keys patterns to remove. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +''' + +EXAMPLES = ''' + l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default match keys that equal any of the items in the target. + t: [k0_x0, k1_x1] + r: "{{ l | community.general.remove_keys(target=t) }}" + + # 2) Match keys that start with any of the items in the target. + t: [k0, k1] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Match keys that end with any of the items in target. + t: [x0, x1] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Match keys by the regex. + t: ['^.*[01]_x.*$'] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # 5) Match keys by the regex. + t: '^.*[01]_x.*$' + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-5 are all the same. + r: + - {k2_x2: [C0], k3_x3: foo} + - {k2_x2: [C1], k3_x3: bar} + + # 6) By default match keys that equal the target. + t: k0_x0 + r: "{{ l | community.general.remove_keys(target=t) }}" + + # 7) Match keys that start with the target. + t: k0 + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" + + # 8) Match keys that end with the target. + t: x0 + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" + + # 9) Match keys by the regex. + t: '^.*0_x.*$' + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 6-9 are all the same. + r: + - {k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} +''' + +RETURN = ''' + _value: + description: The list of dictionaries with selected keys removed. + type: list + elements: dictionary +''' + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_str) + + +def remove_keys(data, target=None, matching_parameter='equal'): + """remove specific keys from dictionaries in a list""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tt = _keys_filter_target_str(target, matching_parameter) + + if matching_parameter == 'equal': + def keep_key(key): + return key not in tt + elif matching_parameter == 'starts_with': + def keep_key(key): + return not key.startswith(tt) + elif matching_parameter == 'ends_with': + def keep_key(key): + return not key.endswith(tt) + elif matching_parameter == 'regex': + def keep_key(key): + return tt.match(key) is None + + return [{k: v for k, v in d.items() if keep_key(k)} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'remove_keys': remove_keys, + } diff --git a/plugins/filter/replace_keys.py b/plugins/filter/replace_keys.py new file mode 100644 index 0000000000..70b264eba6 --- /dev/null +++ b/plugins/filter/replace_keys.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: replace_keys + short_description: Replace specific keys in a list of dictionaries + version_added: "9.1.0" + author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) + description: This filter replaces specified keys in a provided list of dictionaries. + options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A list of dictionaries with attributes C(before) and C(after). + - The value of O(target[].after) replaces key matching O(target[].before). + type: list + elements: dictionary + required: true + suboptions: + before: + description: + - A key or key pattern to change. + - The interpretation of O(target[].before) depends on O(matching_parameter). + - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) will be used. + type: str + after: + description: A matching key change to. + type: str + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target[].before) items. + starts_with: Matches keys that start with one of the O(target[].before) items. + ends_with: Matches keys that end with one of the O(target[].before) items. + regex: Matches keys that match one of the regular expressions provided in O(target[].before). +''' + +EXAMPLES = ''' + l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default, replace keys that are equal any of the attributes before. + t: + - {before: k0_x0, after: a0} + - {before: k1_x1, after: a1} + r: "{{ l | community.general.replace_keys(target=t) }}" + + # 2) Replace keys that starts with any of the attributes before. + t: + - {before: k0, after: a0} + - {before: k1, after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Replace keys that ends with any of the attributes before. + t: + - {before: x0, after: a0} + - {before: x1, after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Replace keys that match any regex of the attributes before. + t: + - {before: "^.*0_x.*$", after: a0} + - {before: "^.*1_x.*$", after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-4 are all the same. + r: + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + + # 5) If more keys match the same attribute before the last one will be used. + t: + - {before: "^.*_x.*$", after: X} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # gives + + r: + - X: foo + - X: bar + + # 6) If there are items with equal attribute before the first one will be used. + t: + - {before: "^.*_x.*$", after: X} + - {before: "^.*_x.*$", after: Y} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # gives + + r: + - X: foo + - X: bar + + # 7) If there are more matches for a key the first one will be used. + l: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} + t: + - {before: a, after: X} + - {before: aa, after: Y} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" + + # gives + + r: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} +''' + +RETURN = ''' + _value: + description: The list of dictionaries with replaced keys. + type: list + elements: dictionary +''' + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_dict) + + +def replace_keys(data, target=None, matching_parameter='equal'): + """replace specific keys in a list of dictionaries""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tz = _keys_filter_target_dict(target, matching_parameter) + + if matching_parameter == 'equal': + def replace_key(key): + for b, a in tz: + if key == b: + return a + return key + elif matching_parameter == 'starts_with': + def replace_key(key): + for b, a in tz: + if key.startswith(b): + return a + return key + elif matching_parameter == 'ends_with': + def replace_key(key): + for b, a in tz: + if key.endswith(b): + return a + return key + elif matching_parameter == 'regex': + def replace_key(key): + for b, a in tz: + if b.match(key): + return a + return key + + return [{replace_key(k): v for k, v in d.items()} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'replace_keys': replace_keys, + } diff --git a/plugins/filter/reveal_ansible_type.py b/plugins/filter/reveal_ansible_type.py new file mode 100644 index 0000000000..916aaff930 --- /dev/null +++ b/plugins/filter/reveal_ansible_type.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: reveal_ansible_type + short_description: Return input type + version_added: "9.2.0" + author: Vladimir Botka (@vbotka) + description: This filter returns input type. + options: + _input: + description: Input data. + type: raw + required: true + alias: + description: Data type aliases. + default: {} + type: dictionary +''' + +EXAMPLES = ''' +# Substitution converts str to AnsibleUnicode +# ------------------------------------------- + +# String. AnsibleUnicode. +data: "abc" +result: '{{ data | community.general.reveal_ansible_type }}' +# result => AnsibleUnicode + +# String. AnsibleUnicode alias str. +alias: {"AnsibleUnicode": "str"} +data: "abc" +result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => str + +# List. All items are AnsibleUnicode. +data: ["a", "b", "c"] +result: '{{ data | community.general.reveal_ansible_type }}' +# result => list[AnsibleUnicode] + +# Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. +data: {"a": "foo", "b": "bar", "c": "baz"} +result: '{{ data | community.general.reveal_ansible_type }}' +# result => dict[AnsibleUnicode, AnsibleUnicode] + +# No substitution and no alias. Type of strings is str +# ---------------------------------------------------- + +# String +result: '{{ "abc" | community.general.reveal_ansible_type }}' +# result => str + +# Integer +result: '{{ 123 | community.general.reveal_ansible_type }}' +# result => int + +# Float +result: '{{ 123.45 | community.general.reveal_ansible_type }}' +# result => float + +# Boolean +result: '{{ true | community.general.reveal_ansible_type }}' +# result => bool + +# List. All items are strings. +result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' +# result => list[str] + +# List of dictionaries. +result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' +# result => list[dict] + +# Dictionary. All keys are strings. All values are integers. +result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' +# result => dict[str, int] + +# Dictionary. All keys are strings. All values are integers. +result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' +# result => dict[str, int] + +# Type of strings is AnsibleUnicode or str +# ---------------------------------------- + +# Dictionary. The keys are integers or strings. All values are strings. +alias: {"AnsibleUnicode": "str"} +data: {1: 'a', 'b': 'b'} +result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[int|str, str] + +# Dictionary. All keys are integers. All values are keys. +alias: {"AnsibleUnicode": "str"} +data: {1: 'a', 2: 'b'} +result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[int, str] + +# Dictionary. All keys are strings. Multiple types values. +alias: {"AnsibleUnicode": "str"} +data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} +result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[str, bool|dict|float|int|list|str] + +# List. Multiple types items. +alias: {"AnsibleUnicode": "str"} +data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] +result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => list[bool|dict|float|int|list|str] +''' + +RETURN = ''' + _value: + description: Type of the data. + type: str +''' + +from ansible_collections.community.general.plugins.plugin_utils.ansible_type import _ansible_type + + +def reveal_ansible_type(data, alias=None): + """Returns data type""" + + return _ansible_type(data, alias) + + +class FilterModule(object): + + def filters(self): + return { + 'reveal_ansible_type': reveal_ansible_type + } diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index cdef9944a0..664380da8f 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -21,20 +21,24 @@ DOCUMENTATION = ''' options: plugin: description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize it as it's own. + type: string required: true choices: [ 'cobbler', 'community.general.cobbler' ] url: description: URL to cobbler. + type: string default: 'http://cobbler/cobbler_api' env: - name: COBBLER_SERVER user: description: Cobbler authentication user. + type: string required: false env: - name: COBBLER_USER password: description: Cobbler authentication password. + type: string required: false env: - name: COBBLER_PASSWORD diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index e161e086e5..5c9a4718f5 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -35,6 +35,7 @@ DOCUMENTATION = r''' version_added: 4.5.0 plugin: description: Marks this as an instance of the 'linode' plugin. + type: string required: true choices: ['linode', 'community.general.linode'] ip_style: @@ -47,6 +48,7 @@ DOCUMENTATION = r''' version_added: 3.6.0 access_token: description: The Linode account personal access token. + type: string required: true env: - name: LINODE_ACCESS_TOKEN diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index cf64f4ee8c..9ae004f6c5 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -20,6 +20,7 @@ DOCUMENTATION = r''' options: plugin: description: Token that ensures this is a source file for the 'lxd' plugin. + type: string required: true choices: [ 'community.general.lxd' ] url: @@ -27,8 +28,8 @@ DOCUMENTATION = r''' - The unix domain socket path or the https URL for the lxd server. - Sockets in filesystem have to start with C(unix:). - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). + type: string default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str client_key: description: - The client certificate key file path. diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 2ca474a1ff..48f02c446b 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' options: plugin: description: token that ensures this is a source file for the 'nmap' plugin. + type: string required: true choices: ['nmap', 'community.general.nmap'] sudo: @@ -29,6 +30,7 @@ DOCUMENTATION = ''' type: boolean address: description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. + type: string required: true env: - name: ANSIBLE_NMAP_ADDRESS @@ -91,7 +93,7 @@ DOCUMENTATION = ''' default: true version_added: 7.4.0 notes: - - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False. + - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false). - 'TODO: add OS fingerprinting' ''' EXAMPLES = ''' diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index 9355d9d414..70b8d14192 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -16,11 +16,13 @@ DOCUMENTATION = r''' options: plugin: description: token that ensures this is a source file for the 'online' plugin. + type: string required: true choices: ['online', 'community.general.online'] oauth_token: required: true description: Online OAuth token. + type: string env: # in order of precedence - name: ONLINE_TOKEN diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py index b097307c39..077d3da5a3 100644 --- a/plugins/inventory/opennebula.py +++ b/plugins/inventory/opennebula.py @@ -143,7 +143,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable): nic = [nic] for net in nic: - return net['IP'] + if net.get('IP'): + return net['IP'] return False @@ -198,6 +199,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): continue server['name'] = vm.NAME + server['id'] = vm.ID + if hasattr(vm.HISTORY_RECORDS, 'HISTORY') and vm.HISTORY_RECORDS.HISTORY: + server['host'] = vm.HISTORY_RECORDS.HISTORY[-1].HOSTNAME server['LABELS'] = labels server['v4_first_ip'] = self._get_vm_ipv4(vm) server['v6_first_ip'] = self._get_vm_ipv6(vm) diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 774833c488..3ce4f789a3 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -275,18 +275,19 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return self.session def _get_auth(self): - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, }) + + validate_certs = self.get_option('validate_certs') + + if validate_certs is False: + from requests.packages.urllib3 import disable_warnings + disable_warnings() if self.proxmox_password: - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, }) + credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password}) a = self._get_session() - if a.verify is False: - from requests.packages.urllib3 import disable_warnings - disable_warnings() - ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials) json = ret.json() @@ -329,8 +330,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): data = json['data'] break else: - # /hosts 's 'results' is a list of all hosts, returned is paginated - data = data + json['data'] + if json['data']: + # /hosts 's 'results' is a list of all hosts, returned is paginated + data = data + json['data'] break self._cache[self.cache_key][url] = data @@ -362,6 +364,34 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): except Exception: return None + def _get_lxc_interfaces(self, properties, node, vmid): + status_key = self._fact('status') + + if status_key not in properties or not properties[status_key] == 'running': + return + + ret = self._get_json("%s/api2/json/nodes/%s/lxc/%s/interfaces" % (self.proxmox_url, node, vmid), ignore_errors=[501]) + if not ret: + return + + result = [] + + for iface in ret: + result_iface = { + 'name': iface['name'], + 'hwaddr': iface['hwaddr'] + } + + if 'inet' in iface: + result_iface['inet'] = iface['inet'] + + if 'inet6' in iface: + result_iface['inet6'] = iface['inet6'] + + result.append(result_iface) + + properties[self._fact('lxc_interfaces')] = result + def _get_agent_network_interfaces(self, node, vmid, vmtype): result = [] @@ -526,6 +556,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._get_vm_config(properties, node, vmid, ittype, name) self._get_vm_snapshots(properties, node, vmid, ittype, name) + if ittype == 'lxc': + self._get_lxc_interfaces(properties, node, vmid) + # ensure the host satisfies filters if not self._can_add_host(name, properties): return None diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index dc24a17dab..4205caeca7 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -20,6 +20,7 @@ DOCUMENTATION = r''' plugin: description: Token that ensures this is a source file for the 'scaleway' plugin. required: true + type: string choices: ['scaleway', 'community.general.scaleway'] regions: description: Filter results on a specific Scaleway region. @@ -46,6 +47,7 @@ DOCUMENTATION = r''' - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). + type: string env: # in order of precedence - name: SCW_TOKEN diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index 6b48a49f12..8508b4e797 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -24,6 +24,7 @@ DOCUMENTATION = ''' description: - A token that ensures this is a source file for the plugin. required: true + type: string choices: ['community.general.stackpath_compute'] client_id: description: diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 79b04ec722..d48c294fd9 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -14,12 +14,15 @@ DOCUMENTATION = ''' - Get inventory hosts from the local virtualbox installation. - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml). - The inventory_hostname is always the 'Name' of the virtualbox instance. + - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter. + - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation for details. extends_documentation_fragment: - constructed - inventory_cache options: plugin: description: token that ensures this is a source file for the 'virtualbox' plugin + type: string required: true choices: ['virtualbox', 'community.general.virtualbox'] running_only: @@ -28,13 +31,28 @@ DOCUMENTATION = ''' default: false settings_password_file: description: provide a file containing the settings password (equivalent to --settingspwfile) + type: string network_info_path: description: property path to query for network information (ansible_host) + type: string default: "/VirtualBox/GuestInfo/Net/0/V4/IP" query: description: create vars from virtualbox properties type: dictionary default: {} + enable_advanced_group_parsing: + description: + - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based on the V(/) character and + assign the resulting list elements as an Ansible Group. + - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups according to + U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups). + Groups are now split using the V(,) character, and the V(/) character indicates nested groups. + - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3") will result in + the group C(TestGroup2) being a child group of C(TestGroup); and + the VM being a part of C(TestGroup2) and C(TestGroup3). + default: false + type: bool + version_added: 9.2.0 ''' EXAMPLES = ''' @@ -177,14 +195,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # found groups elif k == 'Groups': - for group in v.split('/'): - if group: - group = make_unsafe(group) - group = self.inventory.add_group(group) - self.inventory.add_child(group, current_host) - if group not in cacheable_results: - cacheable_results[group] = {'hosts': []} - cacheable_results[group]['hosts'].append(current_host) + if self.get_option('enable_advanced_group_parsing'): + self._handle_vboxmanage_group_string(v, current_host, cacheable_results) + else: + self._handle_group_string(v, current_host, cacheable_results) continue else: @@ -227,6 +241,64 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return all(find_host(host, inventory)) + def _handle_group_string(self, vboxmanage_group, current_host, cacheable_results): + '''Handles parsing the VM's Group assignment from VBoxManage according to this inventory's initial implementation.''' + # The original implementation of this inventory plugin treated `/` as + # a delimeter to split and use as Ansible Groups. + for group in vboxmanage_group.split('/'): + if group: + group = make_unsafe(group) + group = self.inventory.add_group(group) + self.inventory.add_child(group, current_host) + if group not in cacheable_results: + cacheable_results[group] = {'hosts': []} + cacheable_results[group]['hosts'].append(current_host) + + def _handle_vboxmanage_group_string(self, vboxmanage_group, current_host, cacheable_results): + '''Handles parsing the VM's Group assignment from VBoxManage according to VirtualBox documentation.''' + # Per the VirtualBox documentation, a VM can be part of many groups, + # and it's possible to have nested groups. + # Many groups are separated by commas ",", and nested groups use + # slash "/". + # https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups + # Multi groups: VBoxManage modifyvm "vm01" --groups "/TestGroup,/TestGroup2" + # Nested groups: VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2" + + for group in vboxmanage_group.split(','): + if not group: + # We could get an empty element due how to split works, and + # possible assignments from VirtualBox. e.g. ,/Group1 + continue + + if group == "/": + # This is the "root" group. We get here if the VM was not + # assigned to a particular group. Consider the host to be + # unassigned to a group. + continue + + parent_group = None + for subgroup in group.split('/'): + if not subgroup: + # Similarly to above, we could get an empty element. + # e.g //Group1 + continue + + if subgroup == '/': + # "root" group. + # Consider the host to be unassigned + continue + + subgroup = make_unsafe(subgroup) + subgroup = self.inventory.add_group(subgroup) + if parent_group is not None: + self.inventory.add_child(parent_group, subgroup) + self.inventory.add_child(subgroup, current_host) + if subgroup not in cacheable_results: + cacheable_results[subgroup] = {'hosts': []} + cacheable_results[subgroup]['hosts'].append(current_host) + + parent_group = subgroup + def verify_file(self, path): valid = False diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py index 7584cd98a6..5e31cc6f89 100644 --- a/plugins/lookup/bitwarden.py +++ b/plugins/lookup/bitwarden.py @@ -174,8 +174,9 @@ class Bitwarden(object): else: initial_matches = [initial_matches] - # Filter to only include results from the right field. - return [item for item in initial_matches if not search_value or item[search_field] == search_value] + # Filter to only include results from the right field, if a search is requested by value or field + return [item for item in initial_matches + if not search_value or not search_field or item.get(search_field) == search_value] def get_field(self, field, search_value, search_field="name", collection_id=None, organization_id=None): """Return a list of the specified field for records whose search_field match search_value diff --git a/plugins/lookup/bitwarden_secrets_manager.py b/plugins/lookup/bitwarden_secrets_manager.py index 8cabc693ff..3d08067105 100644 --- a/plugins/lookup/bitwarden_secrets_manager.py +++ b/plugins/lookup/bitwarden_secrets_manager.py @@ -77,6 +77,8 @@ from ansible.module_utils.common.text.converters import to_text from ansible.parsing.ajson import AnsibleJSONDecoder from ansible.plugins.lookup import LookupBase +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + class BitwardenSecretsManagerException(AnsibleLookupError): pass @@ -114,6 +116,15 @@ class BitwardenSecretsManager(object): rc = p.wait() return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict'), rc + def get_bws_version(self): + """Get the version of the Bitwarden Secrets Manager CLI. + """ + out, err, rc = self._run(['--version']) + if rc != 0: + raise BitwardenSecretsManagerException(to_text(err)) + # strip the prefix and grab the last segment, the version number + return out.split()[-1] + def get_secret(self, secret_id, bws_access_token): """Get and return the secret with the given secret_id. """ @@ -122,10 +133,18 @@ class BitwardenSecretsManager(object): # Color output was not always disabled correctly with the default 'auto' setting so explicitly disable it. params = [ '--color', 'no', - '--access-token', bws_access_token, - 'get', 'secret', secret_id + '--access-token', bws_access_token ] + # bws version 0.3.0 introduced a breaking change in the command line syntax: + # pre-0.3.0: verb noun + # 0.3.0 and later: noun verb + bws_version = self.get_bws_version() + if LooseVersion(bws_version) < LooseVersion('0.3.0'): + params.extend(['get', 'secret', secret_id]) + else: + params.extend(['secret', 'get', secret_id]) + out, err, rc = self._run_with_retry(params) if rc != 0: raise BitwardenSecretsManagerException(to_text(err)) diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index b14d924ae8..a116b21e5f 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -22,10 +22,12 @@ DOCUMENTATION = ''' name: description: - Name of the databag + type: string required: true item: description: - Item to fetch + type: string required: true ''' diff --git a/plugins/lookup/collection_version.py b/plugins/lookup/collection_version.py index 33316fc2b0..0f93c03c26 100644 --- a/plugins/lookup/collection_version.py +++ b/plugins/lookup/collection_version.py @@ -63,11 +63,11 @@ RETURN = """ import json import os import re +from importlib import import_module import yaml from ansible.errors import AnsibleLookupError -from ansible.module_utils.compat.importlib import import_module from ansible.plugins.lookup import LookupBase diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index f8aadadc19..79eb65edb1 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -29,13 +29,17 @@ DOCUMENTATION = ''' index: description: - If the key has a value with the specified index then this is returned allowing access to historical values. + type: int datacenter: description: - Retrieve the key from a consul datacenter other than the default for the consul host. + type: str token: description: The acl token to allow access to restricted values. + type: str host: default: localhost + type: str description: - The target to connect to, must be a resolvable address. - Will be determined from E(ANSIBLE_CONSUL_URL) if that is set. @@ -46,22 +50,26 @@ DOCUMENTATION = ''' description: - The port of the target host to connect to. - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there. + type: int default: 8500 scheme: default: http + type: str description: - Whether to use http or https. - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there. validate_certs: default: true - description: Whether to verify the ssl connection or not. + description: Whether to verify the TLS connection or not. + type: bool env: - name: ANSIBLE_CONSUL_VALIDATE_CERTS ini: - section: lookup_consul key: validate_certs client_cert: - description: The client cert to verify the ssl connection. + description: The client cert to verify the TLS connection. + type: str env: - name: ANSIBLE_CONSUL_CLIENT_CERT ini: @@ -94,7 +102,7 @@ EXAMPLES = """ - name: retrieving a KV from a remote cluster on non default port ansible.builtin.debug: - msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}" + msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}" """ RETURN = """ diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index 6a3f58595b..fd284f55c8 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -120,10 +120,10 @@ class LookupModule(LookupBase): aws_secret_access_key = self.get_option('aws_secret_access_key') aws_session_token = self.get_option('aws_session_token') - context = dict( - (k, v) for k, v in kwargs.items() + context = { + k: v for k, v in kwargs.items() if k not in ('version', 'region', 'table', 'profile_name', 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token') - ) + } kwargs_pass = { 'profile_name': profile_name, diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index c3cc427df8..6a08675b3b 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -17,19 +17,23 @@ DOCUMENTATION = ''' options : _command: description: Cyberark CLI utility. + type: string env: - name: AIM_CLIPASSWORDSDK_CMD default: '/opt/CARKaim/sdk/clipasswordsdk' appid: description: Defines the unique ID of the application that is issuing the password request. + type: string required: true query: description: Describes the filter criteria for the password retrieval. + type: string required: true output: description: - Specifies the desired output fields separated by commas. - "They could be: Password, PassProps., PasswordChangeInProcess" + type: string default: 'password' _extra: description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide" diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index 5be57cec78..a7768092c5 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -75,6 +75,11 @@ DOCUMENTATION = ''' default: false type: bool version_added: 7.5.0 + port: + description: Use port as target port when looking up DNS records. + default: 53 + type: int + version_added: 9.5.0 notes: - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary. - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary. @@ -330,11 +335,13 @@ class LookupModule(LookupBase): myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size) domains = [] + nameservers = [] qtype = self.get_option('qtype') flat = self.get_option('flat') fail_on_error = self.get_option('fail_on_error') real_empty = self.get_option('real_empty') tcp = self.get_option('tcp') + port = self.get_option('port') try: rdclass = dns.rdataclass.from_text(self.get_option('class')) except Exception as e: @@ -345,7 +352,6 @@ class LookupModule(LookupBase): if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok. nsset = t[1:].split(',') for ns in nsset: - nameservers = [] # Check if we have a valid IP address. If so, use that, otherwise # try to resolve name to address using system's resolver. If that # fails we bail out. @@ -358,7 +364,6 @@ class LookupModule(LookupBase): nameservers.append(nsaddr) except Exception as e: raise AnsibleError("dns lookup NS: %s" % to_native(e)) - myres.nameservers = nameservers continue if '=' in t: try: @@ -397,6 +402,11 @@ class LookupModule(LookupBase): # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass) + if port: + myres.port = port + if len(nameservers) > 0: + myres.nameservers = nameservers + if qtype.upper() == 'PTR': reversed_domains = [] for domain in domains: @@ -443,12 +453,7 @@ class LookupModule(LookupBase): raise AnsibleError("Lookup failed: %s" % str(err)) if not real_empty: ret.append('NXDOMAIN') - except dns.resolver.NoAnswer as err: - if fail_on_error: - raise AnsibleError("Lookup failed: %s" % str(err)) - if not real_empty: - ret.append("") - except dns.resolver.Timeout as err: + except (dns.resolver.NoAnswer, dns.resolver.Timeout, dns.resolver.NoNameservers) as err: if fail_on_error: raise AnsibleError("Lookup failed: %s" % str(err)) if not real_empty: diff --git a/plugins/lookup/dsv.py b/plugins/lookup/dsv.py index 2dbb7db3ea..5e26c43af4 100644 --- a/plugins/lookup/dsv.py +++ b/plugins/lookup/dsv.py @@ -22,6 +22,7 @@ options: required: true tenant: description: The first format parameter in the default O(url_template). + type: string env: - name: DSV_TENANT ini: @@ -32,6 +33,7 @@ options: default: com description: The top-level domain of the tenant; the second format parameter in the default O(url_template). + type: string env: - name: DSV_TLD ini: @@ -40,6 +42,7 @@ options: required: false client_id: description: The client_id with which to request the Access Grant. + type: string env: - name: DSV_CLIENT_ID ini: @@ -48,6 +51,7 @@ options: required: true client_secret: description: The client secret associated with the specific O(client_id). + type: string env: - name: DSV_CLIENT_SECRET ini: @@ -58,6 +62,7 @@ options: default: https://{}.secretsvaultcloud.{}/v1 description: The path to prepend to the base URL to form a valid REST API request. + type: string env: - name: DSV_URL_TEMPLATE ini: diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index 5135e74877..1dec890b20 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -25,12 +25,14 @@ DOCUMENTATION = ''' url: description: - Environment variable with the URL for the etcd server + type: string default: 'http://127.0.0.1:4001' env: - name: ANSIBLE_ETCD_URL version: description: - Environment variable with the etcd protocol version + type: string default: 'v1' env: - name: ANSIBLE_ETCD_VERSION diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index 2131de99a5..ee7bfe27b7 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -17,8 +17,10 @@ description: This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role. options: _terms: - description: path(s) of files to read + description: Path(s) of files to read. required: true + type: list + elements: string ''' EXAMPLES = r""" diff --git a/plugins/lookup/github_app_access_token.py b/plugins/lookup/github_app_access_token.py index 5cd99b81c7..3ce695942d 100644 --- a/plugins/lookup/github_app_access_token.py +++ b/plugins/lookup/github_app_access_token.py @@ -49,8 +49,8 @@ EXAMPLES = ''' dest: /srv/checkout vars: github_token: >- - lookup('community.general.github_app_access_token', key_path='/home/to_your/key', - app_id='123456', installation_id='64209') + {{ lookup('community.general.github_app_access_token', key_path='/home/to_your/key', + app_id='123456', installation_id='64209') }} ''' RETURN = ''' diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index fa4d0a1999..02669c98dc 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -25,12 +25,14 @@ DOCUMENTATION = ''' executable: description: - Binary file to execute Hiera. + type: string default: '/usr/bin/hiera' env: - name: ANSIBLE_HIERA_BIN config_file: description: - File that describes the hierarchy of Hiera. + type: string default: '/etc/hiera.yaml' env: - name: ANSIBLE_HIERA_CFG diff --git a/plugins/lookup/merge_variables.py b/plugins/lookup/merge_variables.py index ce7621ad23..6287914747 100644 --- a/plugins/lookup/merge_variables.py +++ b/plugins/lookup/merge_variables.py @@ -12,7 +12,7 @@ DOCUMENTATION = """ - Mark Ettema (@m-a-r-k-e) - Alexander Petrenz (@alpex8) name: merge_variables - short_description: merge variables with a certain suffix + short_description: merge variables whose names match a given pattern description: - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or regular expressions, optionally. diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index 8ca95de0bc..921cf9acb8 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -23,6 +23,8 @@ DOCUMENTATION = ''' _terms: description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. required: true + type: list + elements: string account_id: version_added: 7.5.0 domain: @@ -133,7 +135,7 @@ class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)): self._version = None def _check_required_params(self, required_params): - non_empty_attrs = dict((param, getattr(self, param, None)) for param in required_params if getattr(self, param, None)) + non_empty_attrs = {param: getattr(self, param) for param in required_params if getattr(self, param, None)} missing = set(required_params).difference(non_empty_attrs) if missing: prefix = "Unable to sign in to 1Password. Missing required parameter" diff --git a/plugins/lookup/onepassword_doc.py b/plugins/lookup/onepassword_doc.py index ab24795df2..789e51c35a 100644 --- a/plugins/lookup/onepassword_doc.py +++ b/plugins/lookup/onepassword_doc.py @@ -24,6 +24,8 @@ DOCUMENTATION = ''' _terms: description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. required: true + type: list + elements: string extends_documentation_fragment: - community.general.onepassword diff --git a/plugins/lookup/onepassword_raw.py b/plugins/lookup/onepassword_raw.py index 3eef535a1c..dc3e590329 100644 --- a/plugins/lookup/onepassword_raw.py +++ b/plugins/lookup/onepassword_raw.py @@ -23,6 +23,8 @@ DOCUMENTATION = ''' _terms: description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. required: true + type: list + elements: string account_id: version_added: 7.5.0 domain: diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 9814fe133b..f35d268995 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -14,7 +14,7 @@ DOCUMENTATION = ''' short_description: manage passwords with passwordstore.org's pass utility description: - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. - It also retrieves YAML style keys stored as multilines in the passwordfile. + It can also retrieve, create or update YAML style keys stored as multilines in the passwordfile. - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using O(lock=readwrite) instead. options: @@ -33,17 +33,18 @@ DOCUMENTATION = ''' env: - name: PASSWORD_STORE_DIR create: - description: Create the password if it does not already exist. Takes precedence over O(missing). + description: Create the password or the subkey if it does not already exist. Takes precedence over O(missing). type: bool default: false overwrite: - description: Overwrite the password if it does already exist. + description: Overwrite the password or the subkey if it does already exist. type: bool default: false umask: description: - - Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable). + - Sets the umask for the created V(.gpg) files. The first octed must be greater than 3 (user readable). - Note pass' default value is V('077'). + type: string env: - name: PASSWORD_STORE_UMASK version_added: 1.3.0 @@ -52,7 +53,9 @@ DOCUMENTATION = ''' type: bool default: false subkey: - description: Return a specific subkey of the password. When set to V(password), always returns the first line. + description: + - By default return a specific subkey of the password. When set to V(password), always returns the first line. + - With O(overwrite=true), it will create the subkey and return it. type: str default: password userpass: @@ -63,7 +66,7 @@ DOCUMENTATION = ''' type: integer default: 16 backup: - description: Used with O(overwrite=true). Backup the previous password in a subkey. + description: Used with O(overwrite=true). Backup the previous password or subkey in a subkey. type: bool default: false nosymbols: @@ -188,6 +191,17 @@ tasks.yml: | vars: mypassword: "{{ lookup('community.general.passwordstore', 'example/test', missing='create')}}" + - name: >- + Create a random 16 character password in a subkey. If the password file already exists, just add the subkey in it. + If the subkey exists, returns it + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, subkey='foo') }}" + + - name: >- + Create a random 16 character password in a subkey. Overwrite if it already exists and backup the old one. + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, subkey='user', overwrite=true, backup=true) }}" + - name: Prints 'abc' if example/test does not exist, just give the password otherwise ansible.builtin.debug: var: mypassword @@ -410,15 +424,48 @@ class LookupModule(LookupBase): def update_password(self): # generate new password, insert old lines from current result and return new password + # if the target is a subkey, only modify the subkey newpass = self.get_newpass() datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass - if self.paramvals['preserve'] or self.paramvals['timestamp']: - msg += '\n' - if self.paramvals['preserve'] and self.passoutput[1:]: - msg += '\n'.join(self.passoutput[1:]) + '\n' - if self.paramvals['timestamp'] and self.paramvals['backup']: - msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime) + subkey = self.paramvals["subkey"] + + if subkey != "password": + + msg_lines = [] + subkey_exists = False + subkey_line = "{0}: {1}".format(subkey, newpass) + oldpass = None + + for line in self.passoutput: + if line.startswith("{0}: ".format(subkey)): + oldpass = self.passdict[subkey] + line = subkey_line + subkey_exists = True + + msg_lines.append(line) + + if not subkey_exists: + msg_lines.insert(2, subkey_line) + + if self.paramvals["timestamp"] and self.paramvals["backup"] and oldpass and oldpass != newpass: + msg_lines.append( + "lookup_pass: old subkey '{0}' password was {1} (Updated on {2})\n".format( + subkey, oldpass, datetime + ) + ) + + msg = os.linesep.join(msg_lines) + + else: + msg = newpass + + if self.paramvals['preserve'] or self.paramvals['timestamp']: + msg += '\n' + if self.paramvals['preserve'] and self.passoutput[1:]: + msg += '\n'.join(self.passoutput[1:]) + '\n' + if self.paramvals['timestamp'] and self.paramvals['backup']: + msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime) + try: check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: @@ -430,13 +477,21 @@ class LookupModule(LookupBase): # use pwgen to generate the password and insert values with pass -m newpass = self.get_newpass() datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass + subkey = self.paramvals["subkey"] + + if subkey != "password": + msg = "\n\n{0}: {1}".format(subkey, newpass) + else: + msg = newpass + if self.paramvals['timestamp']: msg += '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime) + try: check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output)) + return newpass def get_passresult(self): @@ -468,7 +523,8 @@ class LookupModule(LookupBase): def opt_lock(self, type): if self.get_option('lock') == type: tmpdir = os.environ.get('TMPDIR', '/tmp') - lockfile = os.path.join(tmpdir, '.passwordstore.lock') + user = os.environ.get('USER') + lockfile = os.path.join(tmpdir, '.{0}.passwordstore.lock'.format(user)) with FileLock().lock_file(lockfile, tmpdir, self.lock_timeout): self.locked = type yield @@ -523,7 +579,10 @@ class LookupModule(LookupBase): self.parse_params(term) # parse the input into paramvals with self.opt_lock('readwrite'): if self.check_pass(): # password exists - if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password': + if self.paramvals['overwrite']: + with self.opt_lock('write'): + result.append(self.update_password()) + elif self.paramvals["subkey"] != "password" and not self.passdict.get(self.paramvals['subkey']): # password exists but not the subkey with self.opt_lock('write'): result.append(self.update_password()) else: diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py index d3b29629d7..9b811dd8b3 100644 --- a/plugins/lookup/random_string.py +++ b/plugins/lookup/random_string.py @@ -104,37 +104,37 @@ EXAMPLES = r""" - name: Generate random string ansible.builtin.debug: var: lookup('community.general.random_string') - # Example result: ['DeadBeeF'] + # Example result: 'DeadBeeF' - name: Generate random string with length 12 ansible.builtin.debug: var: lookup('community.general.random_string', length=12) - # Example result: ['Uan0hUiX5kVG'] + # Example result: 'Uan0hUiX5kVG' - name: Generate base64 encoded random string ansible.builtin.debug: var: lookup('community.general.random_string', base64=True) - # Example result: ['NHZ6eWN5Qk0='] + # Example result: 'NHZ6eWN5Qk0=' - name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (at least) ansible.builtin.debug: var: lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1) - # Example result: ['&Qw2|E[-'] + # Example result: '&Qw2|E[-' - name: Generate a random string with all lower case characters - debug: + ansible.builtin.debug: var: query('community.general.random_string', upper=false, numbers=false, special=false) # Example result: ['exolxzyz'] - name: Generate random hexadecimal string - debug: + ansible.builtin.debug: var: query('community.general.random_string', upper=false, lower=false, override_special=hex_chars, numbers=false) vars: hex_chars: '0123456789ABCDEF' # Example result: ['D2A40737'] - name: Generate random hexadecimal string with override_all - debug: + ansible.builtin.debug: var: query('community.general.random_string', override_all=hex_chars) vars: hex_chars: '0123456789ABCDEF' diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index 43b046a798..17cbf120e9 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -19,8 +19,11 @@ DOCUMENTATION = ''' options: _terms: description: list of keys to query + type: list + elements: string host: description: location of Redis host + type: string default: '127.0.0.1' env: - name: ANSIBLE_REDIS_HOST diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 35f1097c8b..70d18338e9 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -15,11 +15,15 @@ DOCUMENTATION = ''' options: _terms: description: Sets of key value pairs of parameters. + type: list + elements: str key: description: Key to query. + type: str required: true file: description: Path to shelve file. + type: path required: true ''' diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index 80105ff715..f2d79ed168 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -25,7 +25,8 @@ options: _terms: description: The integer ID of the secret. required: true - type: int + type: list + elements: int secret_path: description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0. required: false @@ -52,6 +53,7 @@ options: version_added: 7.0.0 base_url: description: The base URL of the server, for example V(https://localhost/SecretServer). + type: string env: - name: TSS_BASE_URL ini: @@ -60,6 +62,7 @@ options: required: true username: description: The username with which to request the OAuth2 Access Grant. + type: string env: - name: TSS_USERNAME ini: @@ -69,6 +72,7 @@ options: description: - The password associated with the supplied username. - Required when O(token) is not provided. + type: string env: - name: TSS_PASSWORD ini: @@ -80,6 +84,7 @@ options: - The domain with which to request the OAuth2 Access Grant. - Optional when O(token) is not provided. - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string env: - name: TSS_DOMAIN ini: @@ -92,6 +97,7 @@ options: - Existing token for Thycotic authorizer. - If provided, O(username) and O(password) are not needed. - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string env: - name: TSS_TOKEN ini: @@ -102,6 +108,7 @@ options: default: /api/v1 description: The path to append to the base URL to form a valid REST API request. + type: string env: - name: TSS_API_PATH_URI required: false @@ -109,6 +116,7 @@ options: default: /oauth2/token description: The path to append to the base URL to form a valid OAuth2 Access Grant request. + type: string env: - name: TSS_TOKEN_PATH_URI required: false diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index 2bf2b32e8c..f9d6e98056 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -11,6 +11,7 @@ from functools import wraps from ansible.module_utils.common.collections import is_sequence from ansible.module_utils.six import iteritems +from ansible.module_utils.common.locale import get_best_parsable_locale def _ensure_list(value): @@ -89,18 +90,31 @@ class FormatError(CmdRunnerException): class _ArgFormat(object): + # DEPRECATION: set default value for ignore_none to True in community.general 12.0.0 def __init__(self, func, ignore_none=None, ignore_missing_value=False): self.func = func self.ignore_none = ignore_none self.ignore_missing_value = ignore_missing_value - def __call__(self, value, ctx_ignore_none): + # DEPRECATION: remove parameter ctx_ignore_none in community.general 12.0.0 + def __call__(self, value, ctx_ignore_none=True): + # DEPRECATION: replace ctx_ignore_none with True in community.general 12.0.0 ignore_none = self.ignore_none if self.ignore_none is not None else ctx_ignore_none if value is None and ignore_none: return [] f = self.func return [str(x) for x in f(value)] + def __str__(self): + return "".format( + self.func, + self.ignore_none, + self.ignore_missing_value, + ) + + def __repr__(self): + return str(self) + class _Format(object): @staticmethod @@ -114,7 +128,7 @@ class _Format(object): @staticmethod def as_bool_not(args): - return _ArgFormat(lambda value: [] if value else _ensure_list(args), ignore_none=False) + return _Format.as_bool([], args, ignore_none=False) @staticmethod def as_optval(arg, ignore_none=None): @@ -184,6 +198,19 @@ class _Format(object): return func(**v) return wrapper + @staticmethod + def stack(fmt): + @wraps(fmt) + def wrapper(*args, **kwargs): + new_func = fmt(ignore_none=True, *args, **kwargs) + + def stacking(value): + stack = [new_func(v) for v in value if v] + stack = [x for args in stack for x in args] + return stack + return _ArgFormat(stacking, ignore_none=True) + return wrapper + class CmdRunner(object): """ @@ -204,9 +231,19 @@ class CmdRunner(object): self.default_args_order = self._prepare_args_order(default_args_order) if arg_formats is None: arg_formats = {} - self.arg_formats = dict(arg_formats) + self.arg_formats = {} + for fmt_name, fmt in arg_formats.items(): + if not isinstance(fmt, _ArgFormat): + fmt = _Format.as_func(func=fmt, ignore_none=True) + self.arg_formats[fmt_name] = fmt self.check_rc = check_rc - self.force_lang = force_lang + if force_lang == "auto": + try: + self.force_lang = get_best_parsable_locale(module) + except RuntimeWarning: + self.force_lang = "C" + else: + self.force_lang = force_lang self.path_prefix = path_prefix if environ_update is None: environ_update = {} @@ -223,7 +260,16 @@ class CmdRunner(object): def binary(self): return self.command[0] - def __call__(self, args_order=None, output_process=None, ignore_value_none=True, check_mode_skip=False, check_mode_return=None, **kwargs): + # remove parameter ignore_value_none in community.general 12.0.0 + def __call__(self, args_order=None, output_process=None, ignore_value_none=None, check_mode_skip=False, check_mode_return=None, **kwargs): + if ignore_value_none is None: + ignore_value_none = True + else: + self.module.deprecate( + "Using ignore_value_none when creating the runner context is now deprecated, " + "and the parameter will be removed in community.general 12.0.0. ", + version="12.0.0", collection_name="community.general" + ) if output_process is None: output_process = _process_as_is if args_order is None: @@ -235,7 +281,7 @@ class CmdRunner(object): return _CmdRunnerContext(runner=self, args_order=args_order, output_process=output_process, - ignore_value_none=ignore_value_none, + ignore_value_none=ignore_value_none, # DEPRECATION: remove in community.general 12.0.0 check_mode_skip=check_mode_skip, check_mode_return=check_mode_return, **kwargs) @@ -251,6 +297,7 @@ class _CmdRunnerContext(object): self.runner = runner self.args_order = tuple(args_order) self.output_process = output_process + # DEPRECATION: parameter ignore_value_none at the context level is deprecated and will be removed in community.general 12.0.0 self.ignore_value_none = ignore_value_none self.check_mode_skip = check_mode_skip self.check_mode_return = check_mode_return @@ -290,6 +337,7 @@ class _CmdRunnerContext(object): value = named_args[arg_name] elif not runner.arg_formats[arg_name].ignore_missing_value: raise MissingArgumentValue(self.args_order, arg_name) + # DEPRECATION: remove parameter ctx_ignore_none in 12.0.0 self.cmd.extend(runner.arg_formats[arg_name](value, ctx_ignore_none=self.ignore_value_none)) except MissingArgumentValue: raise @@ -306,7 +354,7 @@ class _CmdRunnerContext(object): @property def run_info(self): return dict( - ignore_value_none=self.ignore_value_none, + ignore_value_none=self.ignore_value_none, # DEPRECATION: remove in community.general 12.0.0 check_rc=self.check_rc, environ_update=self.environ_update, args_order=self.args_order, diff --git a/plugins/module_utils/consul.py b/plugins/module_utils/consul.py index 68c1a130b4..cd54a105f8 100644 --- a/plugins/module_utils/consul.py +++ b/plugins/module_utils/consul.py @@ -10,6 +10,7 @@ __metaclass__ = type import copy import json +import re from ansible.module_utils.six.moves.urllib import error as urllib_error from ansible.module_utils.six.moves.urllib.parse import urlencode @@ -68,6 +69,25 @@ def camel_case_key(key): return "".join(parts) +def validate_check(check): + validate_duration_keys = ['Interval', 'Ttl', 'Timeout'] + validate_tcp_regex = r"(?P.*):(?P(?:[0-9]+))$" + if check.get('Tcp') is not None: + match = re.match(validate_tcp_regex, check['Tcp']) + if not match: + raise Exception('tcp check must be in host:port format') + for duration in validate_duration_keys: + if duration in check and check[duration] is not None: + check[duration] = validate_duration(check[duration]) + + +def validate_duration(duration): + if duration: + if not re.search(r"\d+(?:ns|us|ms|s|m|h)", duration): + duration = "{0}s".format(duration) + return duration + + STATE_PARAMETER = "state" STATE_PRESENT = "present" STATE_ABSENT = "absent" @@ -81,7 +101,7 @@ OPERATION_DELETE = "remove" def _normalize_params(params, arg_spec): final_params = {} for k, v in params.items(): - if k not in arg_spec: # Alias + if k not in arg_spec or v is None: # Alias continue spec = arg_spec[k] if ( @@ -105,9 +125,10 @@ class _ConsulModule: """ api_endpoint = None # type: str - unique_identifier = None # type: str + unique_identifiers = None # type: list result_key = None # type: str create_only_fields = set() + operational_attributes = set() params = {} def __init__(self, module): @@ -119,6 +140,8 @@ class _ConsulModule: if k not in STATE_PARAMETER and k not in AUTH_ARGUMENTS_SPEC } + self.operational_attributes.update({"CreateIndex", "CreateTime", "Hash", "ModifyIndex"}) + def execute(self): obj = self.read_object() @@ -203,14 +226,24 @@ class _ConsulModule: return False def prepare_object(self, existing, obj): - operational_attributes = {"CreateIndex", "CreateTime", "Hash", "ModifyIndex"} existing = { - k: v for k, v in existing.items() if k not in operational_attributes + k: v for k, v in existing.items() if k not in self.operational_attributes } for k, v in obj.items(): existing[k] = v return existing + def id_from_obj(self, obj, camel_case=False): + def key_func(key): + return camel_case_key(key) if camel_case else key + + if self.unique_identifiers: + for identifier in self.unique_identifiers: + identifier = key_func(identifier) + if identifier in obj: + return obj[identifier] + return None + def endpoint_url(self, operation, identifier=None): if operation == OPERATION_CREATE: return self.api_endpoint @@ -219,7 +252,8 @@ class _ConsulModule: raise RuntimeError("invalid arguments passed") def read_object(self): - url = self.endpoint_url(OPERATION_READ, self.params.get(self.unique_identifier)) + identifier = self.id_from_obj(self.params) + url = self.endpoint_url(OPERATION_READ, identifier) try: return self.get(url) except RequestError as e: @@ -233,25 +267,28 @@ class _ConsulModule: if self._module.check_mode: return obj else: - return self.put(self.api_endpoint, data=self.prepare_object({}, obj)) + url = self.endpoint_url(OPERATION_CREATE) + created_obj = self.put(url, data=self.prepare_object({}, obj)) + if created_obj is None: + created_obj = self.read_object() + return created_obj def update_object(self, existing, obj): - url = self.endpoint_url( - OPERATION_UPDATE, existing.get(camel_case_key(self.unique_identifier)) - ) merged_object = self.prepare_object(existing, obj) if self._module.check_mode: return merged_object else: - return self.put(url, data=merged_object) + url = self.endpoint_url(OPERATION_UPDATE, self.id_from_obj(existing, camel_case=True)) + updated_obj = self.put(url, data=merged_object) + if updated_obj is None: + updated_obj = self.read_object() + return updated_obj def delete_object(self, obj): if self._module.check_mode: return {} else: - url = self.endpoint_url( - OPERATION_DELETE, obj.get(camel_case_key(self.unique_identifier)) - ) + url = self.endpoint_url(OPERATION_DELETE, self.id_from_obj(obj, camel_case=True)) return self.delete(url) def _request(self, method, url_parts, data=None, params=None): @@ -309,7 +346,9 @@ class _ConsulModule: if 400 <= status < 600: raise RequestError(status, response_data) - return json.loads(response_data) + if response_data: + return json.loads(response_data) + return None def get(self, url_parts, **kwargs): return self._request("GET", url_parts, **kwargs) diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py index 200548a46d..46408e4877 100644 --- a/plugins/module_utils/csv.py +++ b/plugins/module_utils/csv.py @@ -43,7 +43,7 @@ def initialize_dialect(dialect, **kwargs): raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect) # Create a dictionary from only set options - dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None) + dialect_params = {k: v for k, v in kwargs.items() if v is not None} if dialect_params: try: csv.register_dialect('custom', dialect, **dialect_params) diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py index b93dabbd2c..5fb375c6fd 100644 --- a/plugins/module_utils/django.py +++ b/plugins/module_utils/django.py @@ -7,6 +7,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type +from ansible.module_utils.common.dict_transformations import dict_merge from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper @@ -33,6 +34,18 @@ _django_std_arg_fmts = dict( skip_checks=cmd_runner_fmt.as_bool("--skip-checks"), ) +_django_database_args = dict( + database=dict(type="str", default="default"), +) + +_args_menu = dict( + std=(django_std_args, _django_std_arg_fmts), + database=(_django_database_args, {"database": cmd_runner_fmt.as_opt_eq_val("--database")}), + noinput=({}, {"noinput": cmd_runner_fmt.as_fixed("--noinput")}), + dry_run=({}, {"dry_run": cmd_runner_fmt.as_bool("--dry-run")}), + check=({}, {"check": cmd_runner_fmt.as_bool("--check")}), +) + class _DjangoRunner(PythonRunner): def __init__(self, module, arg_formats=None, **kwargs): @@ -54,15 +67,31 @@ class DjangoModuleHelper(ModuleHelper): django_admin_cmd = None arg_formats = {} django_admin_arg_order = () + use_old_vardict = False + _django_args = [] + _check_mode_arg = "" def __init__(self): - argument_spec = dict(django_std_args) - argument_spec.update(self.module.get("argument_spec", {})) - self.module["argument_spec"] = argument_spec + self.module["argument_spec"], self.arg_formats = self._build_args(self.module.get("argument_spec", {}), + self.arg_formats, + *(["std"] + self._django_args)) super(DjangoModuleHelper, self).__init__(self.module) if self.django_admin_cmd is not None: self.vars.command = self.django_admin_cmd + @staticmethod + def _build_args(arg_spec, arg_format, *names): + res_arg_spec = {} + res_arg_fmts = {} + for name in names: + args, fmts = _args_menu[name] + res_arg_spec = dict_merge(res_arg_spec, args) + res_arg_fmts = dict_merge(res_arg_fmts, fmts) + res_arg_spec = dict_merge(res_arg_spec, arg_spec) + res_arg_fmts = dict_merge(res_arg_fmts, arg_format) + + return res_arg_spec, res_arg_fmts + def __run__(self): runner = _DjangoRunner(self.module, default_args_order=self.django_admin_arg_order, @@ -70,7 +99,10 @@ class DjangoModuleHelper(ModuleHelper): venv=self.vars.venv, check_rc=True) with runner() as ctx: - results = ctx.run() + run_params = self.vars.as_dict() + if self._check_mode_arg: + run_params.update({self._check_mode_arg: self.check_mode}) + results = ctx.run(**run_params) self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd diff --git a/plugins/module_utils/homebrew.py b/plugins/module_utils/homebrew.py index 2816832109..4b5c4672e4 100644 --- a/plugins/module_utils/homebrew.py +++ b/plugins/module_utils/homebrew.py @@ -113,3 +113,30 @@ class HomebrewValidate(object): return isinstance( package, string_types ) and not cls.INVALID_PACKAGE_REGEX.search(package) + + +def parse_brew_path(module): + # type: (...) -> str + """Attempt to find the Homebrew executable path. + + Requires: + - module has a `path` parameter + - path is a valid path string for the target OS. Otherwise, module.fail_json() + is called with msg="Invalid_path: ". + """ + path = module.params["path"] + if not HomebrewValidate.valid_path(path): + module.fail_json(msg="Invalid path: {0}".format(path)) + + if isinstance(path, string_types): + paths = path.split(":") + elif isinstance(path, list): + paths = path + else: + module.fail_json(msg="Invalid path: {0}".format(path)) + + brew_path = module.get_bin_path("brew", required=True, opt_dirs=paths) + if not HomebrewValidate.valid_brew_path(brew_path): + module.fail_json(msg="Invalid brew path: {0}".format(brew_path)) + + return brew_path diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index b2a1892503..15603331b0 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -19,6 +19,7 @@ from ansible.module_utils.common.text.converters import to_native, to_text URL_REALM_INFO = "{url}/realms/{realm}" URL_REALMS = "{url}/admin/realms" URL_REALM = "{url}/admin/realms/{realm}" +URL_REALM_KEYS_METADATA = "{url}/admin/realms/{realm}/keys" URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token" URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}" @@ -184,8 +185,7 @@ def get_token(module_params): 'password': auth_password, } # Remove empty items, for instance missing client_secret - payload = dict( - (k, v) for k, v in temp_payload.items() if v is not None) + payload = {k: v for k, v in temp_payload.items() if v is not None} try: r = json.loads(to_native(open_url(auth_url, method='POST', validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout, @@ -306,6 +306,37 @@ class KeycloakAPI(object): self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), exception=traceback.format_exc()) + def get_realm_keys_metadata_by_id(self, realm='master'): + """Obtain realm public info by id + + :param realm: realm id + + :return: None, or a 'KeysMetadataRepresentation' + (https://www.keycloak.org/docs-api/latest/rest-api/index.html#KeysMetadataRepresentation) + -- a dict containing the keys 'active' and 'keys', the former containing a mapping + from algorithms to key-ids, the latter containing a list of dicts with key + information. + """ + realm_keys_metadata_url = URL_REALM_KEYS_METADATA.format(url=self.baseurl, realm=realm) + + try: + return json.loads(to_native(open_url(realm_keys_metadata_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, + timeout=self.connection_timeout, + validate_certs=self.validate_certs).read())) + + except HTTPError as e: + if e.code == 404: + return None + else: + self.fail_open_url(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except Exception as e: + self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + def get_realm_by_id(self, realm='master'): """ Obtain realm representation by id @@ -1468,6 +1499,23 @@ class KeycloakAPI(object): self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" % (gid, realm, str(e))) + def get_subgroups(self, parent, realm="master"): + if 'subGroupCount' in parent: + # Since version 23, when GETting a group Keycloak does not + # return subGroups but only a subGroupCount. + # Children must be fetched in a second request. + if parent['subGroupCount'] == 0: + group_children = [] + else: + group_children_url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id']) + group_children = json.loads(to_native(open_url(group_children_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, + timeout=self.connection_timeout, + validate_certs=self.validate_certs).read())) + subgroups = group_children + else: + subgroups = parent['subGroups'] + return subgroups + def get_group_by_name(self, name, realm="master", parents=None): """ Fetch a keycloak group within a realm based on its name. @@ -1488,7 +1536,7 @@ class KeycloakAPI(object): if not parent: return None - all_groups = parent['subGroups'] + all_groups = self.get_subgroups(parent, realm) else: all_groups = self.get_groups(realm=realm) @@ -1537,7 +1585,7 @@ class KeycloakAPI(object): return None for p in name_chain[1:]: - for sg in tmp['subGroups']: + for sg in self.get_subgroups(tmp): pv, is_id = self._get_normed_group_parent(p) if is_id: diff --git a/plugins/module_utils/ilo_redfish_utils.py b/plugins/module_utils/ilo_redfish_utils.py index 9cb6e527a3..808583ae63 100644 --- a/plugins/module_utils/ilo_redfish_utils.py +++ b/plugins/module_utils/ilo_redfish_utils.py @@ -29,6 +29,7 @@ class iLORedfishUtils(RedfishUtils): result['ret'] = True data = response['data'] + current_session = None if 'Oem' in data: if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]: current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"] diff --git a/plugins/module_utils/mh/deco.py b/plugins/module_utils/mh/deco.py index 5138b212c7..408891cb8e 100644 --- a/plugins/module_utils/mh/deco.py +++ b/plugins/module_utils/mh/deco.py @@ -13,23 +13,27 @@ from functools import wraps from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException -def cause_changes(on_success=None, on_failure=None): +def cause_changes(on_success=None, on_failure=None, when=None): + # Parameters on_success and on_failure are deprecated and should be removed in community.general 12.0.0 def deco(func): - if on_success is None and on_failure is None: - return func - @wraps(func) - def wrapper(*args, **kwargs): + def wrapper(self, *args, **kwargs): try: - self = args[0] - func(*args, **kwargs) + func(self, *args, **kwargs) if on_success is not None: self.changed = on_success + elif when == "success": + self.changed = True except Exception: if on_failure is not None: self.changed = on_failure + elif when == "failure": + self.changed = True raise + finally: + if when == "always": + self.changed = True return wrapper @@ -41,17 +45,15 @@ def module_fails_on_exception(func): @wraps(func) def wrapper(self, *args, **kwargs): + def fix_key(k): + return k if k not in conflict_list else "_" + k + def fix_var_conflicts(output): - result = dict([ - (k if k not in conflict_list else "_" + k, v) - for k, v in output.items() - ]) + result = {fix_key(k): v for k, v in output.items()} return result try: func(self, *args, **kwargs) - except SystemExit: - raise except ModuleHelperException as e: if e.update_output: self.update_output(e.update_output) @@ -73,6 +75,7 @@ def check_mode_skip(func): def wrapper(self, *args, **kwargs): if not self.module.check_mode: return func(self, *args, **kwargs) + return wrapper @@ -87,7 +90,7 @@ def check_mode_skip_returns(callable=None, value=None): return func(self, *args, **kwargs) return wrapper_callable - if value is not None: + else: @wraps(func) def wrapper_value(self, *args, **kwargs): if self.module.check_mode: @@ -95,7 +98,4 @@ def check_mode_skip_returns(callable=None, value=None): return func(self, *args, **kwargs) return wrapper_value - if callable is None and value is None: - return check_mode_skip - return deco diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py index 1615609735..7db9904f93 100644 --- a/plugins/module_utils/mh/mixins/vars.py +++ b/plugins/module_utils/mh/mixins/vars.py @@ -113,7 +113,7 @@ class VarDict(object): self._meta[name] = meta def output(self): - return dict((k, v) for k, v in self._data.items() if self.meta(k).output) + return {k: v for k, v in self._data.items() if self.meta(k).output} def diff(self): diff_results = [(k, self.meta(k).diff_result) for k in self._data] @@ -125,7 +125,7 @@ class VarDict(object): return None def facts(self): - facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact) + facts_result = {k: v for k, v in self._data.items() if self._meta[k].fact} return facts_result if facts_result else None def change_vars(self): diff --git a/plugins/module_utils/ocapi_utils.py b/plugins/module_utils/ocapi_utils.py index 232c915060..8b8687199a 100644 --- a/plugins/module_utils/ocapi_utils.py +++ b/plugins/module_utils/ocapi_utils.py @@ -56,7 +56,7 @@ class OcapiUtils(object): follow_redirects='all', use_proxy=True, timeout=self.timeout) data = json.loads(to_native(resp.read())) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, 'msg': "HTTP Error %s on GET request to '%s'" @@ -86,7 +86,7 @@ class OcapiUtils(object): data = json.loads(to_native(resp.read())) else: data = "" - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, 'msg': "HTTP Error %s on DELETE request to '%s'" @@ -113,7 +113,7 @@ class OcapiUtils(object): force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', use_proxy=True, timeout=self.timeout) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, 'msg': "HTTP Error %s on PUT request to '%s'" @@ -144,7 +144,7 @@ class OcapiUtils(object): force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', use_proxy=True, timeout=self.timeout if timeout is None else timeout) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, 'msg': "HTTP Error %s on POST request to '%s'" diff --git a/plugins/module_utils/opennebula.py b/plugins/module_utils/opennebula.py index 94732e4f7c..24833350c6 100644 --- a/plugins/module_utils/opennebula.py +++ b/plugins/module_utils/opennebula.py @@ -16,6 +16,7 @@ from ansible.module_utils.six import string_types from ansible.module_utils.basic import AnsibleModule +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] HAS_PYONE = True try: @@ -347,3 +348,90 @@ class OpenNebulaModule: result: the Ansible result """ raise NotImplementedError("Method requires implementation") + + def get_image_list_id(self, image, element): + """ + This is a helper function for get_image_info to iterate over a simple list of objects + """ + list_of_id = [] + + if element == 'VMS': + image_list = image.VMS + if element == 'CLONES': + image_list = image.CLONES + if element == 'APP_CLONES': + image_list = image.APP_CLONES + + for iter in image_list.ID: + list_of_id.append( + # These are optional so firstly check for presence + getattr(iter, 'ID', 'Null'), + ) + return list_of_id + + def get_image_snapshots_list(self, image): + """ + This is a helper function for get_image_info to iterate over a dictionary + """ + list_of_snapshots = [] + + for iter in image.SNAPSHOTS.SNAPSHOT: + list_of_snapshots.append({ + 'date': iter['DATE'], + 'parent': iter['PARENT'], + 'size': iter['SIZE'], + # These are optional so firstly check for presence + 'allow_orhans': getattr(image.SNAPSHOTS, 'ALLOW_ORPHANS', 'Null'), + 'children': getattr(iter, 'CHILDREN', 'Null'), + 'active': getattr(iter, 'ACTIVE', 'Null'), + 'name': getattr(iter, 'NAME', 'Null'), + }) + return list_of_snapshots + + def get_image_info(self, image): + """ + This method is used by one_image and one_image_info modules to retrieve + information from XSD scheme of an image + Returns: a copy of the parameters that includes the resolved parameters. + """ + info = { + 'id': image.ID, + 'name': image.NAME, + 'state': IMAGE_STATES[image.STATE], + 'running_vms': image.RUNNING_VMS, + 'used': bool(image.RUNNING_VMS), + 'user_name': image.UNAME, + 'user_id': image.UID, + 'group_name': image.GNAME, + 'group_id': image.GID, + 'permissions': { + 'owner_u': image.PERMISSIONS.OWNER_U, + 'owner_m': image.PERMISSIONS.OWNER_M, + 'owner_a': image.PERMISSIONS.OWNER_A, + 'group_u': image.PERMISSIONS.GROUP_U, + 'group_m': image.PERMISSIONS.GROUP_M, + 'group_a': image.PERMISSIONS.GROUP_A, + 'other_u': image.PERMISSIONS.OTHER_U, + 'other_m': image.PERMISSIONS.OTHER_M, + 'other_a': image.PERMISSIONS.OTHER_A + }, + 'type': image.TYPE, + 'disk_type': image.DISK_TYPE, + 'persistent': image.PERSISTENT, + 'regtime': image.REGTIME, + 'source': image.SOURCE, + 'path': image.PATH, + 'fstype': getattr(image, 'FSTYPE', 'Null'), + 'size': image.SIZE, + 'cloning_ops': image.CLONING_OPS, + 'cloning_id': image.CLONING_ID, + 'target_snapshot': image.TARGET_SNAPSHOT, + 'datastore_id': image.DATASTORE_ID, + 'datastore': image.DATASTORE, + 'vms': self.get_image_list_id(image, 'VMS'), + 'clones': self.get_image_list_id(image, 'CLONES'), + 'app_clones': self.get_image_list_id(image, 'APP_CLONES'), + 'snapshots': self.get_image_snapshots_list(image), + 'template': image.TEMPLATE, + } + return info diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py index a385ec93e7..513b9081f6 100644 --- a/plugins/module_utils/pipx.py +++ b/plugins/module_utils/pipx.py @@ -9,41 +9,55 @@ __metaclass__ = type from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt +pipx_common_argspec = { + "global": dict(type='bool', default=False), + "executable": dict(type='path'), +} + + _state_map = dict( install='install', + install_all='install-all', present='install', uninstall='uninstall', absent='uninstall', uninstall_all='uninstall-all', inject='inject', + uninject='uninject', upgrade='upgrade', + upgrade_shared='upgrade-shared', upgrade_all='upgrade-all', reinstall='reinstall', reinstall_all='reinstall-all', + pin='pin', + unpin='unpin', ) def pipx_runner(module, command, **kwargs): + arg_formats = dict( + state=fmt.as_map(_state_map), + name=fmt.as_list(), + name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])), + install_apps=fmt.as_bool("--include-apps"), + install_deps=fmt.as_bool("--include-deps"), + inject_packages=fmt.as_list(), + force=fmt.as_bool("--force"), + include_injected=fmt.as_bool("--include-injected"), + index_url=fmt.as_opt_val('--index-url'), + python=fmt.as_opt_val('--python'), + system_site_packages=fmt.as_bool("--system-site-packages"), + _list=fmt.as_fixed(['list', '--include-injected', '--json']), + editable=fmt.as_bool("--editable"), + pip_args=fmt.as_opt_eq_val('--pip-args'), + suffix=fmt.as_opt_val('--suffix'), + ) + arg_formats["global"] = fmt.as_bool("--global") + runner = CmdRunner( module, command=command, - arg_formats=dict( - - state=fmt.as_map(_state_map), - name=fmt.as_list(), - name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])), - install_apps=fmt.as_bool("--include-apps"), - install_deps=fmt.as_bool("--include-deps"), - inject_packages=fmt.as_list(), - force=fmt.as_bool("--force"), - include_injected=fmt.as_bool("--include-injected"), - index_url=fmt.as_opt_val('--index-url'), - python=fmt.as_opt_val('--python'), - system_site_packages=fmt.as_bool("--system-site-packages"), - _list=fmt.as_fixed(['list', '--include-injected', '--json']), - editable=fmt.as_bool("--editable"), - pip_args=fmt.as_opt_eq_val('--pip-args'), - ), + arg_formats=arg_formats, environ_update={'USE_EMOJI': '0'}, check_rc=True, **kwargs diff --git a/plugins/module_utils/proxmox.py b/plugins/module_utils/proxmox.py index 5fd783d654..b0037dacb3 100644 --- a/plugins/module_utils/proxmox.py +++ b/plugins/module_utils/proxmox.py @@ -8,6 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type import traceback +from time import sleep PROXMOXER_IMP_ERR = None try: @@ -29,6 +30,9 @@ def proxmox_auth_argument_spec(): required=True, fallback=(env_fallback, ['PROXMOX_HOST']) ), + api_port=dict(type='int', + fallback=(env_fallback, ['PROXMOX_PORT']) + ), api_user=dict(type='str', required=True, fallback=(env_fallback, ['PROXMOX_USER']) @@ -67,6 +71,8 @@ def ansible_to_proxmox_bool(value): class ProxmoxAnsible(object): """Base class for Proxmox modules""" + TASK_TIMED_OUT = 'timeout expired' + def __init__(self, module): if not HAS_PROXMOXER: module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR) @@ -82,6 +88,7 @@ class ProxmoxAnsible(object): def _connect(self): api_host = self.module.params['api_host'] + api_port = self.module.params['api_port'] api_user = self.module.params['api_user'] api_password = self.module.params['api_password'] api_token_id = self.module.params['api_token_id'] @@ -89,6 +96,10 @@ class ProxmoxAnsible(object): validate_certs = self.module.params['validate_certs'] auth_args = {'user': api_user} + + if api_port: + auth_args['port'] = api_port + if api_password: auth_args['password'] = api_password else: @@ -159,6 +170,32 @@ class ProxmoxAnsible(object): except Exception as e: self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node, e)) + def api_task_complete(self, node_name, task_id, timeout): + """Wait until the task stops or times out. + + :param node_name: Proxmox node name where the task is running. + :param task_id: ID of the running task. + :param timeout: Timeout in seconds to wait for the task to complete. + :return: Task completion status (True/False) and ``exitstatus`` message when status=False. + """ + status = {} + while timeout: + try: + status = self.proxmox_api.nodes(node_name).tasks(task_id).status.get() + except Exception as e: + self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node_name, e)) + + if status['status'] == 'stopped': + if status['exitstatus'] == 'OK': + return True, None + else: + return False, status['exitstatus'] + else: + timeout -= 1 + if timeout <= 0: + return False, ProxmoxAnsible.TASK_TIMED_OUT + sleep(1) + def get_pool(self, poolid): """Retrieve pool information diff --git a/plugins/module_utils/python_runner.py b/plugins/module_utils/python_runner.py index f678f247b4..b65867c61e 100644 --- a/plugins/module_utils/python_runner.py +++ b/plugins/module_utils/python_runner.py @@ -22,10 +22,12 @@ class PythonRunner(CmdRunner): if (os.path.isabs(python) or '/' in python): self.python = python elif self.has_venv: - path_prefix = os.path.join(venv, "bin") + if path_prefix is None: + path_prefix = [] + path_prefix.append(os.path.join(venv, "bin")) if environ_update is None: environ_update = {} - environ_update["PATH"] = "%s:%s" % (path_prefix, os.environ["PATH"]) + environ_update["PATH"] = "%s:%s" % (":".join(path_prefix), os.environ["PATH"]) environ_update["VIRTUAL_ENV"] = venv python_cmd = [self.python] + _ensure_list(command) diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 6935573d0b..1836bfc7b7 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -11,6 +11,7 @@ import os import random import string import gzip +import time from io import BytesIO from ansible.module_utils.urls import open_url from ansible.module_utils.common.text.converters import to_native @@ -41,7 +42,7 @@ FAIL_MSG = 'Issuing a data modification command without specifying the '\ class RedfishUtils(object): def __init__(self, creds, root_uri, timeout, module, resource_id=None, - data_modification=False, strip_etag_quotes=False): + data_modification=False, strip_etag_quotes=False, ciphers=None): self.root_uri = root_uri self.creds = creds self.timeout = timeout @@ -52,6 +53,7 @@ class RedfishUtils(object): self.resource_id = resource_id self.data_modification = data_modification self.strip_etag_quotes = strip_etag_quotes + self.ciphers = ciphers self._vendor = None self._init_session() @@ -132,11 +134,13 @@ class RedfishUtils(object): return resp # The following functions are to send GET/POST/PATCH/DELETE requests - def get_request(self, uri, override_headers=None, allow_no_resp=False): + def get_request(self, uri, override_headers=None, allow_no_resp=False, timeout=None): req_headers = dict(GET_HEADERS) if override_headers: req_headers.update(override_headers) username, password, basic_auth = self._auth_params(req_headers) + if timeout is None: + timeout = self.timeout try: # Service root is an unauthenticated resource; remove credentials # in case the caller will be using sessions later. @@ -146,8 +150,8 @@ class RedfishUtils(object): url_username=username, url_password=password, force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', - use_proxy=True, timeout=self.timeout) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + use_proxy=True, timeout=timeout, ciphers=self.ciphers) + headers = {k.lower(): v for (k, v) in resp.info().items()} try: if headers.get('content-encoding') == 'gzip' and LooseVersion(ansible_version) < LooseVersion('2.14'): # Older versions of Ansible do not automatically decompress the data @@ -161,11 +165,11 @@ class RedfishUtils(object): if not allow_no_resp: raise except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'" % (uri, e.reason)} @@ -196,19 +200,19 @@ class RedfishUtils(object): url_username=username, url_password=password, force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', - use_proxy=True, timeout=self.timeout) + use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) try: data = json.loads(to_native(resp.read())) except Exception as e: # No response data; this is okay in many cases data = None - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'" % (uri, e.reason)} @@ -250,13 +254,13 @@ class RedfishUtils(object): url_username=username, url_password=password, force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', - use_proxy=True, timeout=self.timeout) + use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'changed': False, 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'changed': False, 'msg': "URL Error on PATCH request to '%s': '%s'" % (uri, e.reason)} @@ -285,13 +289,13 @@ class RedfishUtils(object): url_username=username, url_password=password, force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', - use_proxy=True, timeout=self.timeout) + use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on PUT request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on PUT request to '%s': '%s'" % (uri, e.reason)} @@ -311,13 +315,13 @@ class RedfishUtils(object): url_username=username, url_password=password, force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', - use_proxy=True, timeout=self.timeout) + use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'" % (uri, e.reason)} @@ -387,8 +391,10 @@ class RedfishUtils(object): :param error: an HTTPError exception :type error: HTTPError :return: the ExtendedInfo message if present, else standard HTTP error + :return: the JSON data of the response if present """ msg = http_client.responses.get(error.code, '') + data = None if error.code >= 400: try: body = error.read().decode('utf-8') @@ -402,7 +408,7 @@ class RedfishUtils(object): msg = str(data['error']['@Message.ExtendedInfo']) except Exception: pass - return msg + return msg, data def _init_session(self): pass @@ -606,12 +612,13 @@ class RedfishUtils(object): data = response['data'] if 'Parameters' in data: params = data['Parameters'] - ai = dict((p['Name'], p) - for p in params if 'Name' in p) + ai = {p['Name']: p for p in params if 'Name' in p} if not ai: - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in action.items() - if k.endswith('@Redfish.AllowableValues')) + ai = { + k[:-24]: {'AllowableValues': v} + for k, v in action.items() + if k.endswith('@Redfish.AllowableValues') + } return ai def _get_allowable_values(self, action, name, default_values=None): @@ -624,6 +631,24 @@ class RedfishUtils(object): allowable_values = default_values return allowable_values + def check_service_availability(self): + """ + Checks if the service is accessible. + + :return: dict containing the status of the service + """ + + # Get the service root + # Override the timeout since the service root is expected to be readily + # available. + service_root = self.get_request(self.root_uri + self.service_root, timeout=10) + if service_root['ret'] is False: + # Failed, either due to a timeout or HTTP error; not available + return {'ret': True, 'available': False} + + # Successfully accessed the service root; available + return {'ret': True, 'available': True} + def get_logs(self): log_svcs_uri_list = [] list_of_logs = [] @@ -670,7 +695,7 @@ class RedfishUtils(object): entry[prop] = logEntry.get(prop) if entry: list_of_log_entries.append(entry) - log_name = log_svcs_uri.split('/')[-1] + log_name = log_svcs_uri.rstrip('/').split('/')[-1] logs[log_name] = list_of_log_entries list_of_logs.append(logs) @@ -841,6 +866,7 @@ class RedfishUtils(object): return response data = response['data'] controller_name = 'Controller 1' + storage_id = data['Id'] if 'Controllers' in data: controllers_uri = data['Controllers'][u'@odata.id'] @@ -875,6 +901,7 @@ class RedfishUtils(object): data = response['data'] drive_result = {} + drive_result['RedfishURI'] = data['@odata.id'] for property in properties: if property in data: if data[property] is not None: @@ -886,6 +913,7 @@ class RedfishUtils(object): drive_result[property] = data[property] drive_results.append(drive_result) drives = {'Controller': controller_name, + 'StorageId': storage_id, 'Drives': drive_results} result["entries"].append(drives) @@ -1024,7 +1052,7 @@ class RedfishUtils(object): if 'Drives' in data[u'Links']: for link in data[u'Links'][u'Drives']: drive_id_link = link[u'@odata.id'] - drive_id = drive_id_link.split("/")[-1] + drive_id = drive_id_link.rstrip('/').split('/')[-1] drive_id_list.append({'Id': drive_id}) volume_result['Linked_drives'] = drive_id_list volume_results.append(volume_result) @@ -1083,11 +1111,12 @@ class RedfishUtils(object): return self.manage_power(command, self.systems_uri, '#ComputerSystem.Reset') - def manage_manager_power(self, command): + def manage_manager_power(self, command, wait=False, wait_timeout=120): return self.manage_power(command, self.manager_uri, - '#Manager.Reset') + '#Manager.Reset', wait, wait_timeout) - def manage_power(self, command, resource_uri, action_name): + def manage_power(self, command, resource_uri, action_name, wait=False, + wait_timeout=120): key = "Actions" reset_type_values = ['On', 'ForceOff', 'GracefulShutdown', 'GracefulRestart', 'ForceRestart', 'Nmi', @@ -1147,6 +1176,30 @@ class RedfishUtils(object): response = self.post_request(self.root_uri + action_uri, payload) if response['ret'] is False: return response + + # If requested to wait for the service to be available again, block + # until it's ready + if wait: + elapsed_time = 0 + start_time = time.time() + # Start with a large enough sleep. Some services will process new + # requests while in the middle of shutting down, thus breaking out + # early. + time.sleep(30) + + # Periodically check for the service's availability. + while elapsed_time <= wait_timeout: + status = self.check_service_availability() + if status['available']: + # It's available; we're done + break + time.sleep(5) + elapsed_time = time.time() - start_time + + if elapsed_time > wait_timeout: + # Exhausted the wait timer; error + return {'ret': False, 'changed': True, + 'msg': 'The service did not become available after %d seconds' % wait_timeout} return {'ret': True, 'changed': True} def manager_reset_to_defaults(self, command): @@ -1197,32 +1250,49 @@ class RedfishUtils(object): return response return {'ret': True, 'changed': True} - def _find_account_uri(self, username=None, acct_id=None): + def _find_account_uri(self, username=None, acct_id=None, password_change_uri=None): if not any((username, acct_id)): return {'ret': False, 'msg': 'Must provide either account_id or account_username'} - response = self.get_request(self.root_uri + self.accounts_uri) - if response['ret'] is False: - return response - data = response['data'] - - uris = [a.get('@odata.id') for a in data.get('Members', []) if - a.get('@odata.id')] - for uri in uris: - response = self.get_request(self.root_uri + uri) + if password_change_uri: + # Password change required; go directly to the specified URI + response = self.get_request(self.root_uri + password_change_uri) if response['ret'] is False: - continue + return response data = response['data'] headers = response['headers'] if username: if username == data.get('UserName'): return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + 'headers': headers, 'uri': password_change_uri} if acct_id: if acct_id == data.get('Id'): return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + 'headers': headers, 'uri': password_change_uri} + else: + # Walk the accounts collection to find the desired user + response = self.get_request(self.root_uri + self.accounts_uri) + if response['ret'] is False: + return response + data = response['data'] + + uris = [a.get('@odata.id') for a in data.get('Members', []) if + a.get('@odata.id')] + for uri in uris: + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + continue + data = response['data'] + headers = response['headers'] + if username: + if username == data.get('UserName'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} + if acct_id: + if acct_id == data.get('Id'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} return {'ret': False, 'no_match': True, 'msg': 'No account with the given account_id or account_username found'} @@ -1443,7 +1513,8 @@ class RedfishUtils(object): 'Must provide account_password for UpdateUserPassword command'} response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) + acct_id=user.get('account_id'), + password_change_uri=user.get('account_passwordchangerequired')) if not response['ret']: return response @@ -1486,6 +1557,31 @@ class RedfishUtils(object): resp['msg'] = 'Modified account service' return resp + def check_password_change_required(self, return_data): + """ + Checks a response if a user needs to change their password + + :param return_data: The return data for a failed request + :return: None or the URI of the account to update + """ + uri = None + if 'data' in return_data: + # Find the extended messages in the response payload + extended_messages = return_data['data'].get('error', {}).get('@Message.ExtendedInfo', []) + if len(extended_messages) == 0: + extended_messages = return_data['data'].get('@Message.ExtendedInfo', []) + # Go through each message and look for Base.1.X.PasswordChangeRequired + for message in extended_messages: + message_id = message.get('MessageId') + if message_id is None: + # While this is invalid, treat the lack of a MessageId as "no message" + continue + if message_id.startswith('Base.1.') and message_id.endswith('.PasswordChangeRequired'): + # Password change required; get the URI of the user account + uri = message['MessageArgs'][0] + break + return uri + def get_sessions(self): result = {} # listing all users has always been slower than other operations, why? @@ -1841,7 +1937,7 @@ class RedfishUtils(object): update_uri = data['MultipartHttpPushUri'] # Assemble the JSON payload portion of the request - payload = {"@Redfish.OperationApplyTime": "Immediate"} + payload = {} if targets: payload["Targets"] = targets if apply_time: @@ -2195,7 +2291,7 @@ class RedfishUtils(object): continue # If already set to requested value, remove it from PATCH payload - if data[u'Attributes'][attr_name] == attributes[attr_name]: + if data[u'Attributes'][attr_name] == attr_value: del attrs_to_patch[attr_name] warning = "" @@ -2215,11 +2311,19 @@ class RedfishUtils(object): # Construct payload and issue PATCH command payload = {"Attributes": attrs_to_patch} + + # WORKAROUND + # Dell systems require manually setting the apply time to "OnReset" + # to spawn a proprietary job to apply the BIOS settings + vendor = self._get_vendor()['Vendor'] + if vendor == 'Dell': + payload.update({"@Redfish.SettingsApplyTime": {"ApplyTime": "OnReset"}}) + response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True, - 'msg': "Modified BIOS attributes %s" % (attrs_to_patch), + 'msg': "Modified BIOS attributes %s. A reboot is required" % (attrs_to_patch), 'warning': warning} def set_boot_order(self, boot_list): @@ -2733,9 +2837,11 @@ class RedfishUtils(object): def virtual_media_insert_via_patch(self, options, param_map, uri, data, image_only=False): # get AllowableValues - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in data.items() - if k.endswith('@Redfish.AllowableValues')) + ai = { + k[:-24]: {'AllowableValues': v} + for k, v in data.items() + if k.endswith('@Redfish.AllowableValues') + } # construct payload payload = self._insert_virt_media_payload(options, param_map, data, ai) if 'Inserted' not in payload and not image_only: @@ -3326,7 +3432,7 @@ class RedfishUtils(object): # Capture list of URIs that match a specified HostInterface resource Id if hostinterface_id: - matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.split('/')[-1]] + matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.rstrip('/').split('/')[-1]] if hostinterface_id and matching_hostinterface_uris: hostinterface_uri = list.pop(matching_hostinterface_uris) elif hostinterface_id and not matching_hostinterface_uris: @@ -3445,12 +3551,12 @@ class RedfishUtils(object): result = {} if manager is None: if len(self.manager_uris) == 1: - manager = self.manager_uris[0].split('/')[-1] + manager = self.manager_uris[0].rstrip('/').split('/')[-1] elif len(self.manager_uris) > 1: entries = self.get_multi_manager_inventory()['entries'] managers = [m[0]['manager_uri'] for m in entries if m[1].get('ServiceIdentification')] if len(managers) == 1: - manager = managers[0].split('/')[-1] + manager = managers[0].rstrip('/').split('/')[-1] else: self.module.fail_json(msg=[ "Multiple managers with ServiceIdentification were found: %s" % str(managers), @@ -3482,7 +3588,7 @@ class RedfishUtils(object): def verify_bios_attributes(self, bios_attributes): # This method verifies BIOS attributes against the provided input - server_bios = self.get_multi_bios_attributes() + server_bios = self.get_bios_attributes(self.systems_uri) if server_bios["ret"] is False: return server_bios @@ -3491,8 +3597,8 @@ class RedfishUtils(object): # Verify bios_attributes with BIOS settings available in the server for key, value in bios_attributes.items(): - if key in server_bios["entries"][0][1]: - if server_bios["entries"][0][1][key] != value: + if key in server_bios["entries"]: + if server_bios["entries"][key] != value: bios_dict.update({key: value}) else: wrong_param.update({key: value}) @@ -3608,7 +3714,7 @@ class RedfishUtils(object): # Matching Storage Subsystem ID with user input self.storage_subsystem_uri = "" for storage_subsystem_uri in self.storage_subsystems_uris: - if storage_subsystem_uri.split("/")[-2] == storage_subsystem_id: + if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: self.storage_subsystem_uri = storage_subsystem_uri if not self.storage_subsystem_uri: @@ -3636,7 +3742,7 @@ class RedfishUtils(object): # Delete each volume for volume in self.volume_uris: - if volume.split("/")[-1] in volume_ids: + if volume.rstrip('/').split('/')[-1] in volume_ids: response = self.delete_request(self.root_uri + volume) if response['ret'] is False: return response @@ -3644,7 +3750,7 @@ class RedfishUtils(object): return {'ret': True, 'changed': True, 'msg': "The following volumes were deleted: %s" % str(volume_ids)} - def create_volume(self, volume_details, storage_subsystem_id): + def create_volume(self, volume_details, storage_subsystem_id, storage_none_volume_deletion=False): # Find the Storage resource from the requested ComputerSystem resource response = self.get_request(self.root_uri + self.systems_uri) if response['ret'] is False: @@ -3670,7 +3776,7 @@ class RedfishUtils(object): # Matching Storage Subsystem ID with user input self.storage_subsystem_uri = "" for storage_subsystem_uri in self.storage_subsystems_uris: - if storage_subsystem_uri.split("/")[-2] == storage_subsystem_id: + if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: self.storage_subsystem_uri = storage_subsystem_uri if not self.storage_subsystem_uri: @@ -3679,8 +3785,8 @@ class RedfishUtils(object): 'msg': "Provided Storage Subsystem ID %s does not exist on the server" % storage_subsystem_id} # Validate input parameters - required_parameters = ['RAIDType', 'Drives', 'CapacityBytes'] - allowed_parameters = ['DisplayName', 'InitializeMethod', 'MediaSpanCount', + required_parameters = ['RAIDType', 'Drives'] + allowed_parameters = ['CapacityBytes', 'DisplayName', 'InitializeMethod', 'MediaSpanCount', 'Name', 'ReadCachePolicy', 'StripSizeBytes', 'VolumeUsage', 'WriteCachePolicy'] for parameter in required_parameters: @@ -3696,22 +3802,23 @@ class RedfishUtils(object): data = response['data'] # Deleting any volumes of RAIDType None present on the Storage Subsystem - response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) - if response['ret'] is False: - return response - volume_data = response['data'] + if storage_none_volume_deletion: + response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) + if response['ret'] is False: + return response + volume_data = response['data'] - if "Members" in volume_data: - for member in volume_data["Members"]: - response = self.get_request(self.root_uri + member['@odata.id']) - if response['ret'] is False: - return response - member_data = response['data'] - - if member_data["RAIDType"] == "None": - response = self.delete_request(self.root_uri + member['@odata.id']) + if "Members" in volume_data: + for member in volume_data["Members"]: + response = self.get_request(self.root_uri + member['@odata.id']) if response['ret'] is False: return response + member_data = response['data'] + + if member_data["RAIDType"] == "None": + response = self.delete_request(self.root_uri + member['@odata.id']) + if response['ret'] is False: + return response # Construct payload and issue POST command to create volume volume_details["Links"] = {} @@ -3786,7 +3893,7 @@ class RedfishUtils(object): vendor = self._get_vendor()['Vendor'] rsp_uri = "" for loc in resp_data['Location']: - if loc['Language'] == "en": + if loc['Language'].startswith("en"): rsp_uri = loc['Uri'] if vendor == 'HPE': # WORKAROUND diff --git a/plugins/module_utils/redis.py b/plugins/module_utils/redis.py index c4d87aca51..e823f966dc 100644 --- a/plugins/module_utils/redis.py +++ b/plugins/module_utils/redis.py @@ -57,7 +57,9 @@ def redis_auth_argument_spec(tls_default=True): validate_certs=dict(type='bool', default=True ), - ca_certs=dict(type='str') + ca_certs=dict(type='str'), + client_cert_file=dict(type='str'), + client_key_file=dict(type='str'), ) @@ -71,6 +73,8 @@ def redis_auth_params(module): ca_certs = module.params['ca_certs'] if tls and ca_certs is None: ca_certs = str(certifi.where()) + client_cert_file = module.params['client_cert_file'] + client_key_file = module.params['client_key_file'] if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None: module.fail_json( msg='The option `username` in only supported with redis >= 3.4.0.') @@ -78,6 +82,8 @@ def redis_auth_params(module): 'port': login_port, 'password': login_password, 'ssl_ca_certs': ca_certs, + 'ssl_certfile': client_cert_file, + 'ssl_keyfile': client_key_file, 'ssl_cert_reqs': validate_certs, 'ssl': tls} if login_user is not None: diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index 1310ba5602..4768aafc9c 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -51,11 +51,11 @@ def scaleway_waitable_resource_argument_spec(): def payload_from_object(scw_object): - return dict( - (k, v) + return { + k: v for k, v in scw_object.items() if k != 'id' and v is not None - ) + } class ScalewayException(Exception): @@ -117,10 +117,7 @@ class SecretVariables(object): @staticmethod def list_to_dict(source_list, hashed=False): key_value = 'hashed_value' if hashed else 'value' - return dict( - (var['key'], var[key_value]) - for var in source_list - ) + return {var['key']: var[key_value] for var in source_list} @classmethod def decode(cls, secrets_list, values_list): @@ -143,7 +140,7 @@ def resource_attributes_should_be_changed(target, wished, verifiable_mutable_att diff[attr] = wished[attr] if diff: - return dict((attr, wished[attr]) for attr in mutable_attributes) + return {attr: wished[attr] for attr in mutable_attributes} else: return diff diff --git a/plugins/module_utils/vardict.py b/plugins/module_utils/vardict.py index cfcce4d4d2..9bd104ce37 100644 --- a/plugins/module_utils/vardict.py +++ b/plugins/module_utils/vardict.py @@ -100,7 +100,7 @@ class _Variable(object): return def __str__(self): - return "<_Variable: value={0!r}, initial={1!r}, diff={2}, output={3}, change={4}, verbosity={5}>".format( + return "".format( self.value, self.initial_value, self.diff, self.output, self.change, self.verbosity ) @@ -175,18 +175,18 @@ class VarDict(object): self.__vars__[name] = var def output(self, verbosity=0): - return dict((n, v.value) for n, v in self.__vars__.items() if v.output and v.is_visible(verbosity)) + return {n: v.value for n, v in self.__vars__.items() if v.output and v.is_visible(verbosity)} def diff(self, verbosity=0): diff_results = [(n, v.diff_result) for n, v in self.__vars__.items() if v.diff_result and v.is_visible(verbosity)] if diff_results: - before = dict((n, dr['before']) for n, dr in diff_results) - after = dict((n, dr['after']) for n, dr in diff_results) + before = {n: dr['before'] for n, dr in diff_results} + after = {n: dr['after'] for n, dr in diff_results} return {'before': before, 'after': after} return None def facts(self, verbosity=0): - facts_result = dict((n, v.value) for n, v in self.__vars__.items() if v.fact and v.is_visible(verbosity)) + facts_result = {n: v.value for n, v in self.__vars__.items() if v.fact and v.is_visible(verbosity)} return facts_result if facts_result else None @property @@ -194,4 +194,4 @@ class VarDict(object): return any(var.has_changed for var in self.__vars__.values()) def as_dict(self): - return dict((name, var.value) for name, var in self.__vars__.items()) + return {name: var.value for name, var in self.__vars__.items()} diff --git a/plugins/module_utils/wdc_redfish_utils.py b/plugins/module_utils/wdc_redfish_utils.py index bc4b0c2cd0..8c6fd71bf8 100644 --- a/plugins/module_utils/wdc_redfish_utils.py +++ b/plugins/module_utils/wdc_redfish_utils.py @@ -11,6 +11,7 @@ import datetime import re import time import tarfile +import os from ansible.module_utils.urls import fetch_file from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils @@ -79,19 +80,25 @@ class WdcRedfishUtils(RedfishUtils): return response return self._find_updateservice_additional_uris() - def _is_enclosure_multi_tenant(self): + def _is_enclosure_multi_tenant_and_fetch_gen(self): """Determine if the enclosure is multi-tenant. The serial number of a multi-tenant enclosure will end in "-A" or "-B". + Fetching enclsoure generation. - :return: True/False if the enclosure is multi-tenant or not; None if unable to determine. + :return: True/False if the enclosure is multi-tenant or not and return enclosure generation; + None if unable to determine. """ response = self.get_request(self.root_uri + self.service_root + "Chassis/Enclosure") if response['ret'] is False: return None pattern = r".*-[A,B]" data = response['data'] - return re.match(pattern, data['SerialNumber']) is not None + if 'EnclVersion' not in data: + enc_version = 'G1' + else: + enc_version = data['EnclVersion'] + return re.match(pattern, data['SerialNumber']) is not None, enc_version def _find_updateservice_additional_uris(self): """Find & set WDC-specific update service URIs""" @@ -180,15 +187,44 @@ class WdcRedfishUtils(RedfishUtils): To determine if the bundle is multi-tenant or not, it looks inside the .bin file within the tarfile, and checks the appropriate byte in the file. + If not tarfile, the bundle is checked for 2048th byte to determine whether it is Gen2 bundle. + Gen2 is always single tenant at this time. + :param str bundle_uri: HTTP URI of the firmware bundle. - :return: Firmware version number contained in the bundle, and whether or not the bundle is multi-tenant. - Either value will be None if unable to determine. + :return: Firmware version number contained in the bundle, whether or not the bundle is multi-tenant + and bundle generation. Either value will be None if unable to determine. :rtype: str or None, bool or None """ bundle_temp_filename = fetch_file(module=self.module, url=bundle_uri) + bundle_version = None + is_multi_tenant = None + gen = None + + # If not tarfile, then if the file has "MMG2" or "DPG2" at 2048th byte + # then the bundle is for MM or DP G2 if not tarfile.is_tarfile(bundle_temp_filename): - return None, None + cookie1 = None + with open(bundle_temp_filename, "rb") as bundle_file: + file_size = os.path.getsize(bundle_temp_filename) + if file_size >= 2052: + bundle_file.seek(2048) + cookie1 = bundle_file.read(4) + # It is anticipated that DP firmware bundle will be having the value "DPG2" + # for cookie1 in the header + if cookie1 and cookie1.decode("utf8") == "MMG2" or cookie1.decode("utf8") == "DPG2": + file_name, ext = os.path.splitext(str(bundle_uri.rsplit('/', 1)[1])) + # G2 bundle file name: Ultrastar-Data102_3000_SEP_1010-032_2.1.12 + parsedFileName = file_name.split('_') + if len(parsedFileName) == 5: + bundle_version = parsedFileName[4] + # MM G2 is always single tanant + is_multi_tenant = False + gen = "G2" + + return bundle_version, is_multi_tenant, gen + + # Bundle is for MM or DP G1 tf = tarfile.open(bundle_temp_filename) pattern_pkg = r"oobm-(.+)\.pkg" pattern_bin = r"(.*\.bin)" @@ -205,8 +241,9 @@ class WdcRedfishUtils(RedfishUtils): bin_file.seek(11) byte_11 = bin_file.read(1) is_multi_tenant = byte_11 == b'\x80' + gen = "G1" - return bundle_version, is_multi_tenant + return bundle_version, is_multi_tenant, gen @staticmethod def uri_is_http(uri): @@ -267,15 +304,16 @@ class WdcRedfishUtils(RedfishUtils): # Check the FW version in the bundle file, and compare it to what is already on the IOMs # Bundle version number - bundle_firmware_version, is_bundle_multi_tenant = self._get_bundle_version(bundle_uri) - if bundle_firmware_version is None or is_bundle_multi_tenant is None: + bundle_firmware_version, is_bundle_multi_tenant, bundle_gen = self._get_bundle_version(bundle_uri) + if bundle_firmware_version is None or is_bundle_multi_tenant is None or bundle_gen is None: return { 'ret': False, - 'msg': 'Unable to extract bundle version or multi-tenant status from update image tarfile' + 'msg': 'Unable to extract bundle version or multi-tenant status or generation from update image file' } + is_enclosure_multi_tenant, enclosure_gen = self._is_enclosure_multi_tenant_and_fetch_gen() + # Verify that the bundle is correctly multi-tenant or not - is_enclosure_multi_tenant = self._is_enclosure_multi_tenant() if is_enclosure_multi_tenant != is_bundle_multi_tenant: return { 'ret': False, @@ -285,6 +323,16 @@ class WdcRedfishUtils(RedfishUtils): ) } + # Verify that the bundle is compliant with the target enclosure + if enclosure_gen != bundle_gen: + return { + 'ret': False, + 'msg': 'Enclosure generation is {0} but bundle is of {1}'.format( + enclosure_gen, + bundle_gen, + ) + } + # Version number installed on IOMs firmware_inventory = self.get_firmware_inventory() if not firmware_inventory["ret"]: diff --git a/plugins/modules/aix_inittab.py b/plugins/modules/aix_inittab.py index d4c9aa0b56..79336bab8d 100644 --- a/plugins/modules/aix_inittab.py +++ b/plugins/modules/aix_inittab.py @@ -192,6 +192,7 @@ def main(): rmitab = module.get_bin_path('rmitab') chitab = module.get_bin_path('chitab') rc = 0 + err = None # check if the new entry exists current_entry = check_current_entry(module) diff --git a/plugins/modules/alternatives.py b/plugins/modules/alternatives.py index 0d1b1e8cbe..da578276fa 100644 --- a/plugins/modules/alternatives.py +++ b/plugins/modules/alternatives.py @@ -344,7 +344,7 @@ class AlternativesModule(object): subcmd_path_map = dict(subcmd_path_link_regex.findall(display_output)) if not subcmd_path_map and self.subcommands: - subcmd_path_map = dict((s['name'], s['link']) for s in self.subcommands) + subcmd_path_map = {s['name']: s['link'] for s in self.subcommands} for path, prio, subcmd in alternative_regex.findall(display_output): self.current_alternatives[path] = dict( diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py index 1e2496daed..62de70bb63 100644 --- a/plugins/modules/ansible_galaxy_install.py +++ b/plugins/modules/ansible_galaxy_install.py @@ -9,73 +9,94 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = """ +--- module: ansible_galaxy_install author: - - "Alexei Znamensky (@russoz)" +- "Alexei Znamensky (@russoz)" short_description: Install Ansible roles or collections using ansible-galaxy version_added: 3.5.0 description: - - This module allows the installation of Ansible collections or roles using C(ansible-galaxy). +- This module allows the installation of Ansible collections or roles using C(ansible-galaxy). notes: - - Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0. - - > - The module will try and run using the C(C.UTF-8) locale. - If that fails, it will try C(en_US.UTF-8). - If that one also fails, the module will fail. +- Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0. +- > + The module will try and run using the C(C.UTF-8) locale. + If that fails, it will try C(en_US.UTF-8). + If that one also fails, the module will fail. +seealso: +- name: C(ansible-galaxy) command manual page + description: Manual page for the command. + link: https://docs.ansible.com/ansible/latest/cli/ansible-galaxy.html + requirements: - - ansible-core 2.11 or newer +- ansible-core 2.11 or newer extends_documentation_fragment: - - community.general.attributes +- community.general.attributes attributes: check_mode: support: none diff_mode: support: none options: + state: + description: + - > + If O(state=present) then the collection or role will be installed. + Note that the collections and roles are not updated with this option. + - > + Currently the O(state=latest) is ignored unless O(type=collection), and it will + ensure the collection is installed and updated to the latest available version. + - Please note that O(force=true) can be used to perform upgrade regardless of O(type). + type: str + choices: [present, latest] + default: present + version_added: 9.1.0 type: description: - - The type of installation performed by C(ansible-galaxy). - - If O(type=both), then O(requirements_file) must be passed and it may contain both roles and collections. - - "Note however that the opposite is not true: if using a O(requirements_file), then O(type) can be any of the three choices." + - The type of installation performed by C(ansible-galaxy). + - If O(type=both), then O(requirements_file) must be passed and it may contain both roles and collections. + - "Note however that the opposite is not true: if using a O(requirements_file), then O(type) can be any of the three choices." type: str choices: [collection, role, both] required: true name: description: - - Name of the collection or role being installed. - - > - Versions can be specified with C(ansible-galaxy) usual formats. - For example, the collection V(community.docker:1.6.1) or the role V(ansistrano.deploy,3.8.0). - - O(name) and O(requirements_file) are mutually exclusive. + - Name of the collection or role being installed. + - > + Versions can be specified with C(ansible-galaxy) usual formats. + For example, the collection V(community.docker:1.6.1) or the role V(ansistrano.deploy,3.8.0). + - O(name) and O(requirements_file) are mutually exclusive. type: str requirements_file: description: - - Path to a file containing a list of requirements to be installed. - - It works for O(type) equals to V(collection) and V(role). - - O(name) and O(requirements_file) are mutually exclusive. + - Path to a file containing a list of requirements to be installed. + - It works for O(type) equals to V(collection) and V(role). + - O(name) and O(requirements_file) are mutually exclusive. type: path dest: description: - - The path to the directory containing your collections or roles, according to the value of O(type). - - > - Please notice that C(ansible-galaxy) will not install collections with O(type=both), when O(requirements_file) - contains both roles and collections and O(dest) is specified. + - The path to the directory containing your collections or roles, according to the value of O(type). + - > + Please notice that C(ansible-galaxy) will not install collections with O(type=both), when O(requirements_file) + contains both roles and collections and O(dest) is specified. type: path no_deps: description: - - Refrain from installing dependencies. + - Refrain from installing dependencies. version_added: 4.5.0 type: bool default: false force: description: - - Force overwriting an existing role or collection. - - Using O(force=true) is mandatory when downgrading. + - Force overwriting existing roles and/or collections. + - It can be used for upgrading, but the module output will always report C(changed=true). + - Using O(force=true) is mandatory when downgrading. type: bool default: false """ EXAMPLES = """ +--- - name: Install collection community.network community.general.ansible_galaxy_install: type: collection @@ -97,81 +118,81 @@ EXAMPLES = """ type: collection name: community.network:3.0.2 force: true - """ RETURN = """ - type: - description: The value of the O(type) parameter. - type: str - returned: always - name: - description: The value of the O(name) parameter. - type: str - returned: always - dest: - description: The value of the O(dest) parameter. - type: str - returned: always - requirements_file: - description: The value of the O(requirements_file) parameter. - type: str - returned: always - force: - description: The value of the O(force) parameter. - type: bool - returned: always - installed_roles: - description: - - If O(requirements_file) is specified instead, returns dictionary with all the roles installed per path. - - If O(name) is specified, returns that role name and the version installed per path. - type: dict - returned: always when installing roles - contains: - "": - description: Roles and versions for that path. - type: dict - sample: - /home/user42/.ansible/roles: - ansistrano.deploy: 3.9.0 - baztian.xfce: v0.0.3 - /custom/ansible/roles: - ansistrano.deploy: 3.8.0 - installed_collections: - description: - - If O(requirements_file) is specified instead, returns dictionary with all the collections installed per path. - - If O(name) is specified, returns that collection name and the version installed per path. - type: dict - returned: always when installing collections - contains: - "": - description: Collections and versions for that path - type: dict - sample: - /home/az/.ansible/collections/ansible_collections: - community.docker: 1.6.0 - community.general: 3.0.2 - /custom/ansible/ansible_collections: - community.general: 3.1.0 - new_collections: - description: New collections installed by this module. - returned: success - type: dict - sample: - community.general: 3.1.0 - community.docker: 1.6.1 - new_roles: - description: New roles installed by this module. - returned: success - type: dict - sample: - ansistrano.deploy: 3.8.0 +--- +type: + description: The value of the O(type) parameter. + type: str + returned: always +name: + description: The value of the O(name) parameter. + type: str + returned: always +dest: + description: The value of the O(dest) parameter. + type: str + returned: always +requirements_file: + description: The value of the O(requirements_file) parameter. + type: str + returned: always +force: + description: The value of the O(force) parameter. + type: bool + returned: always +installed_roles: + description: + - If O(requirements_file) is specified instead, returns dictionary with all the roles installed per path. + - If O(name) is specified, returns that role name and the version installed per path. + type: dict + returned: always when installing roles + contains: + "": + description: Roles and versions for that path. + type: dict + sample: + /home/user42/.ansible/roles: + ansistrano.deploy: 3.9.0 baztian.xfce: v0.0.3 + /custom/ansible/roles: + ansistrano.deploy: 3.8.0 +installed_collections: + description: + - If O(requirements_file) is specified instead, returns dictionary with all the collections installed per path. + - If O(name) is specified, returns that collection name and the version installed per path. + type: dict + returned: always when installing collections + contains: + "": + description: Collections and versions for that path + type: dict + sample: + /home/az/.ansible/collections/ansible_collections: + community.docker: 1.6.0 + community.general: 3.0.2 + /custom/ansible/ansible_collections: + community.general: 3.1.0 +new_collections: + description: New collections installed by this module. + returned: success + type: dict + sample: + community.general: 3.1.0 + community.docker: 1.6.1 +new_roles: + description: New roles installed by this module. + returned: success + type: dict + sample: + ansistrano.deploy: 3.8.0 + baztian.xfce: v0.0.3 """ import re -from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException @@ -180,12 +201,15 @@ class AnsibleGalaxyInstall(ModuleHelper): _RE_LIST_PATH = re.compile(r'^# (?P.*)$') _RE_LIST_COLL = re.compile(r'^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$') _RE_LIST_ROLE = re.compile(r'^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$') - _RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__() + _RE_INSTALL_OUTPUT = re.compile( + r'^(?:(?P\w+\.\w+):(?P[\d\.]+)|- (?P\w+\.\w+) \((?P[\d\.]+)\)) was installed successfully$' + ) ansible_version = None output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps') module = dict( argument_spec=dict( + state=dict(type='str', choices=['present', 'latest'], default='present'), type=dict(type='str', choices=('collection', 'role', 'both'), required=True), name=dict(type='str'), requirements_file=dict(type='path'), @@ -198,17 +222,19 @@ class AnsibleGalaxyInstall(ModuleHelper): required_if=[('type', 'both', ['requirements_file'])], supports_check_mode=False, ) + use_old_vardict = False command = 'ansible-galaxy' command_args_formats = dict( - type=fmt.as_func(lambda v: [] if v == 'both' else [v]), - galaxy_cmd=fmt.as_list(), - requirements_file=fmt.as_opt_val('-r'), - dest=fmt.as_opt_val('-p'), - force=fmt.as_bool("--force"), - no_deps=fmt.as_bool("--no-deps"), - version=fmt.as_bool("--version"), - name=fmt.as_list(), + type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), + galaxy_cmd=cmd_runner_fmt.as_list(), + upgrade=cmd_runner_fmt.as_bool("--upgrade"), + requirements_file=cmd_runner_fmt.as_opt_val('-r'), + dest=cmd_runner_fmt.as_opt_val('-p'), + force=cmd_runner_fmt.as_bool("--force"), + no_deps=cmd_runner_fmt.as_bool("--no-deps"), + version=cmd_runner_fmt.as_fixed("--version"), + name=cmd_runner_fmt.as_list(), ) def _make_runner(self, lang): @@ -232,25 +258,16 @@ class AnsibleGalaxyInstall(ModuleHelper): try: runner = self._make_runner("C.UTF-8") with runner("version", check_rc=False, output_process=process) as ctx: - return runner, ctx.run(version=True) - except UnsupportedLocale as e: + return runner, ctx.run() + except UnsupportedLocale: runner = self._make_runner("en_US.UTF-8") with runner("version", check_rc=True, output_process=process) as ctx: - return runner, ctx.run(version=True) + return runner, ctx.run() def __init_module__(self): - # self.runner = CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=self.force_lang) self.runner, self.ansible_version = self._get_ansible_galaxy_version() if self.ansible_version < (2, 11): - self.module.fail_json( - msg="Support for Ansible 2.9 and ansible-base 2.10 has ben removed." - ) - # Collection install output changed: - # ansible-base 2.10: "coll.name (x.y.z)" - # ansible-core 2.11+: "coll.name:x.y.z" - self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P\w+\.\w+)(?: \(|:)(?P[\d\.]+)\)?' - r'|- (?P\w+\.\w+) \((?P[\d\.]+)\))' - r' was installed successfully$') + self.module.fail_json(msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed.") self.vars.set("new_collections", {}, change=True) self.vars.set("new_roles", {}, change=True) if self.vars.type != "collection": @@ -303,8 +320,9 @@ class AnsibleGalaxyInstall(ModuleHelper): elif match.group("role"): self.vars.new_roles[match.group("role")] = match.group("rversion") - with self.runner("type galaxy_cmd force no_deps dest requirements_file name", output_process=process) as ctx: - ctx.run(galaxy_cmd="install") + upgrade = (self.vars.type == "collection" and self.vars.state == "latest") + with self.runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx: + ctx.run(galaxy_cmd="install", upgrade=upgrade) if self.verbosity > 2: self.vars.set("run_info", ctx.run_info) diff --git a/plugins/modules/apache2_mod_proxy.py b/plugins/modules/apache2_mod_proxy.py index 8f561e8ae0..786089d13c 100644 --- a/plugins/modules/apache2_mod_proxy.py +++ b/plugins/modules/apache2_mod_proxy.py @@ -277,7 +277,7 @@ class BalancerMember(object): for valuesset in subsoup[1::1]: if re.search(pattern=self.host, string=str(valuesset)): values = valuesset.findAll('td') - return dict((keys[x].string, values[x].string) for x in range(0, len(keys))) + return {keys[x].string: values[x].string for x in range(0, len(keys))} def get_member_status(self): """ Returns a dictionary of a balancer member's status attributes.""" @@ -286,7 +286,7 @@ class BalancerMember(object): 'hot_standby': 'Stby', 'ignore_errors': 'Ign'} actual_status = str(self.attributes['Status']) - status = dict((mode, patt in actual_status) for mode, patt in iteritems(status_mapping)) + status = {mode: patt in actual_status for mode, patt in iteritems(status_mapping)} return status def set_member_status(self, values): diff --git a/plugins/modules/apk.py b/plugins/modules/apk.py index a6b058b932..7caefd1357 100644 --- a/plugins/modules/apk.py +++ b/plugins/modules/apk.py @@ -74,6 +74,7 @@ options: world: description: - Use a custom world file when checking for explicitly installed packages. + The file is used only when a value is provided for O(name), and O(state) is set to V(present) or V(latest). type: str default: /etc/apk/world version_added: 5.4.0 diff --git a/plugins/modules/bootc_manage.py b/plugins/modules/bootc_manage.py new file mode 100644 index 0000000000..5628ffcca0 --- /dev/null +++ b/plugins/modules/bootc_manage.py @@ -0,0 +1,95 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Ryan Cook +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt +# or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: bootc_manage +version_added: 9.3.0 +author: +- Ryan Cook (@cooktheryan) +short_description: Bootc Switch and Upgrade +description: + - This module manages the switching and upgrading of C(bootc). +options: + state: + description: + - 'Control to apply the latest image or switch the image.' + - 'B(Note:) This will not reboot the system.' + - 'Please use M(ansible.builtin.reboot) to reboot the system.' + required: true + type: str + choices: ['switch', 'latest'] + image: + description: + - 'The image to switch to.' + - 'This is required when O(state=switch).' + required: false + type: str + +''' + +EXAMPLES = ''' +# Switch to a different image +- name: Provide image to switch to a different image and retain the current running image + community.general.bootc_manage: + state: switch + image: "example.com/image:latest" + +# Apply updates of the current running image +- name: Apply updates of the current running image + community.general.bootc_manage: + state: latest +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.locale import get_best_parsable_locale + + +def main(): + argument_spec = dict( + state=dict(type='str', required=True, choices=['switch', 'latest']), + image=dict(type='str', required=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'switch', ['image']), + ], + ) + + state = module.params['state'] + image = module.params['image'] + + if state == 'switch': + command = ['bootc', 'switch', image, '--retain'] + elif state == 'latest': + command = ['bootc', 'upgrade'] + + locale = get_best_parsable_locale(module) + module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale) + rc, stdout, err = module.run_command(command, check_rc=True) + + if 'Queued for next boot: ' in stdout: + result = {'changed': True, 'stdout': stdout} + module.exit_json(**result) + elif 'No changes in ' in stdout or 'Image specification is unchanged.' in stdout: + result = {'changed': False, 'stdout': stdout} + module.exit_json(**result) + else: + result = {'changed': False, 'stderr': err} + module.fail_json(msg='ERROR: Command execution failed.', **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/btrfs_subvolume.py b/plugins/modules/btrfs_subvolume.py index 35327bfe02..0aa38bf0e4 100644 --- a/plugins/modules/btrfs_subvolume.py +++ b/plugins/modules/btrfs_subvolume.py @@ -102,40 +102,40 @@ EXAMPLES = r''' - name: Create a @home subvolume under the root subvolume community.general.btrfs_subvolume: name: /@home - device: /dev/vda2 + filesystem_device: /dev/vda2 - name: Remove the @home subvolume if it exists community.general.btrfs_subvolume: name: /@home state: absent - device: /dev/vda2 + filesystem_device: /dev/vda2 - name: Create a snapshot of the root subvolume named @ community.general.btrfs_subvolume: name: /@ snapshot_source: / - device: /dev/vda2 + filesystem_device: /dev/vda2 - name: Create a snapshot of the root subvolume and make it the new default subvolume community.general.btrfs_subvolume: name: /@ snapshot_source: / default: Yes - device: /dev/vda2 + filesystem_device: /dev/vda2 - name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required community.general.btrfs_subvolume: name: /@snapshots/@2022_06_09 snapshot_source: /@ recursive: True - device: /dev/vda2 + filesystem_device: /dev/vda2 - name: Remove the /@ subvolume and recursively delete child subvolumes as required community.general.btrfs_subvolume: name: /@snapshots/@2022_06_09 snapshot_source: /@ recursive: True - device: /dev/vda2 + filesystem_device: /dev/vda2 ''' diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py index ba9c05ed7b..2fc729da20 100644 --- a/plugins/modules/cargo.py +++ b/plugins/modules/cargo.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2021 Radek Sprta +# Copyright (c) 2024 Colin Nolan # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -65,6 +66,13 @@ options: type: str default: present choices: [ "present", "absent", "latest" ] + directory: + description: + - Path to the source directory to install the Rust package from. + - This is only used when installing packages. + type: path + required: false + version_added: 9.1.0 requirements: - cargo installed """ @@ -98,8 +106,14 @@ EXAMPLES = r""" community.general.cargo: name: ludusavi state: latest + +- name: Install "ludusavi" Rust package from source directory + community.general.cargo: + name: ludusavi + directory: /path/to/ludusavi/source """ +import json import os import re @@ -115,6 +129,7 @@ class Cargo(object): self.state = kwargs["state"] self.version = kwargs["version"] self.locked = kwargs["locked"] + self.directory = kwargs["directory"] @property def path(self): @@ -143,7 +158,7 @@ class Cargo(object): data, dummy = self._exec(cmd, True, False, False) - package_regex = re.compile(r"^([\w\-]+) v(.+):$") + package_regex = re.compile(r"^([\w\-]+) v(\S+).*:$") installed = {} for line in data.splitlines(): package_info = package_regex.match(line) @@ -163,19 +178,53 @@ class Cargo(object): if self.version: cmd.append("--version") cmd.append(self.version) + if self.directory: + cmd.append("--path") + cmd.append(self.directory) return self._exec(cmd) def is_outdated(self, name): installed_version = self.get_installed().get(name) + latest_version = ( + self.get_latest_published_version(name) + if not self.directory + else self.get_source_directory_version(name) + ) + return installed_version != latest_version + def get_latest_published_version(self, name): cmd = ["search", name, "--limit", "1"] data, dummy = self._exec(cmd, True, False, False) match = re.search(r'"(.+)"', data) - if match: - latest_version = match.group(1) + if not match: + self.module.fail_json( + msg="No published version for package %s found" % name + ) + return match.group(1) - return installed_version != latest_version + def get_source_directory_version(self, name): + cmd = [ + "metadata", + "--format-version", + "1", + "--no-deps", + "--manifest-path", + os.path.join(self.directory, "Cargo.toml"), + ] + data, dummy = self._exec(cmd, True, False, False) + manifest = json.loads(data) + + package = next( + (package for package in manifest["packages"] if package["name"] == name), + None, + ) + if not package: + self.module.fail_json( + msg="Package %s not defined in source, found: %s" + % (name, [x["name"] for x in manifest["packages"]]) + ) + return package["version"] def uninstall(self, packages=None): cmd = ["uninstall"] @@ -191,16 +240,21 @@ def main(): state=dict(default="present", choices=["present", "absent", "latest"]), version=dict(default=None, type="str"), locked=dict(default=False, type="bool"), + directory=dict(default=None, type="path"), ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) name = module.params["name"] state = module.params["state"] version = module.params["version"] + directory = module.params["directory"] if not name: module.fail_json(msg="Package name must be specified") + if directory is not None and not os.path.isdir(directory): + module.fail_json(msg="Source directory does not exist") + # Set LANG env since we parse stdout module.run_command_environ_update = dict( LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" diff --git a/plugins/modules/cloudflare_dns.py b/plugins/modules/cloudflare_dns.py index 1904976440..86550966be 100644 --- a/plugins/modules/cloudflare_dns.py +++ b/plugins/modules/cloudflare_dns.py @@ -716,12 +716,14 @@ class CloudflareAPI(object): "port": params['port'], "weight": params['weight'], "priority": params['priority'], - "name": params['record'], - "proto": params['proto'], - "service": params['service'] } - new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data} + new_record = { + "type": params['type'], + "name": params['service'] + '.' + params['proto'] + '.' + params['record'], + "ttl": params['ttl'], + 'data': srv_data, + } search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] diff --git a/plugins/modules/consul_acl.py b/plugins/modules/consul_acl.py index 4617090fd3..2d60af0625 100644 --- a/plugins/modules/consul_acl.py +++ b/plugins/modules/consul_acl.py @@ -273,8 +273,8 @@ def set_acl(consul_client, configuration): :return: the output of setting the ACL """ acls_as_json = decode_acls_as_json(consul_client.acl.list()) - existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None) - existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json) + existing_acls_mapped_by_name = {acl.name: acl for acl in acls_as_json if acl.name is not None} + existing_acls_mapped_by_token = {acl.token: acl for acl in acls_as_json} if None in existing_acls_mapped_by_token: raise AssertionError("expecting ACL list to be associated to a token: %s" % existing_acls_mapped_by_token[None]) diff --git a/plugins/modules/consul_agent_check.py b/plugins/modules/consul_agent_check.py new file mode 100644 index 0000000000..3739260049 --- /dev/null +++ b/plugins/modules/consul_agent_check.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Michael Ilg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: consul_agent_check +short_description: Add, modify, and delete checks within a consul cluster +version_added: 9.1.0 +description: + - Allows the addition, modification and deletion of checks in a consul + cluster via the agent. For more details on using and configuring Checks, + see U(https://developer.hashicorp.com/consul/api-docs/agent/check). + - Currently, there is no complete way to retrieve the script, interval or TTL + metadata for a registered check. Without this metadata it is not possible to + tell if the data supplied with ansible represents a change to a check. As a + result this does not attempt to determine changes and will always report a + changed occurred. An API method is planned to supply this metadata so at that + stage change management will be added. +author: + - Michael Ilg (@Ilgmi) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + details: + - The result is the object as it is defined in the module options and not the object structure of the consul API. + For a better overview of what the object structure looks like, + take a look at U(https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks). + diff_mode: + support: partial + details: + - In check mode the diff will show the object as it is defined in the module options and not the object structure of the consul API. +options: + state: + description: + - Whether the check should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Required name for the service check. + type: str + id: + description: + - Specifies a unique ID for this check on the node. This defaults to the O(name) parameter, but it may be necessary to provide + an ID for uniqueness. This value will return in the response as "CheckId". + type: str + interval: + description: + - The interval at which the service check will be run. + This is a number with a V(s) or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). + If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s). + - Required if one of the parameters O(args), O(http), or O(tcp) is specified. + type: str + notes: + description: + - Notes to attach to check when registering it. + type: str + args: + description: + - Specifies command arguments to run to update the status of the check. + - Requires O(interval) to be provided. + - Mutually exclusive with O(ttl), O(tcp) and O(http). + type: list + elements: str + ttl: + description: + - Checks can be registered with a TTL instead of a O(args) and O(interval) + this means that the service will check in with the agent before the + TTL expires. If it doesn't the check will be considered failed. + Required if registering a check and the script an interval are missing + Similar to the interval this is a number with a V(s) or V(m) suffix to + signify the units of seconds or minutes, for example V(15s) or V(1m). + If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s). + - Mutually exclusive with O(args), O(tcp) and O(http). + type: str + tcp: + description: + - Checks can be registered with a TCP port. This means that consul + will check if the connection attempt to that port is successful (that is, the port is currently accepting connections). + The format is V(host:port), for example V(localhost:80). + - Requires O(interval) to be provided. + - Mutually exclusive with O(args), O(ttl) and O(http). + type: str + version_added: '1.3.0' + http: + description: + - Checks can be registered with an HTTP endpoint. This means that consul + will check that the http endpoint returns a successful HTTP status. + - Requires O(interval) to be provided. + - Mutually exclusive with O(args), O(ttl) and O(tcp). + type: str + timeout: + description: + - A custom HTTP check timeout. The consul default is 10 seconds. + Similar to the interval this is a number with a V(s) or V(m) suffix to + signify the units of seconds or minutes, for example V(15s) or V(1m). + If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s). + type: str + service_id: + description: + - The ID for the service, must be unique per node. If O(state=absent), + defaults to the service name if supplied. + type: str +''' + +EXAMPLES = ''' +- name: Register tcp check for service 'nginx' + community.general.consul_agent_check: + name: nginx_tcp_check + service_id: nginx + interval: 60s + tcp: localhost:80 + notes: "Nginx Check" + +- name: Register http check for service 'nginx' + community.general.consul_agent_check: + name: nginx_http_check + service_id: nginx + interval: 60s + http: http://localhost:80/status + notes: "Nginx Check" + +- name: Remove check for service 'nginx' + community.general.consul_agent_check: + state: absent + id: nginx_http_check + service_id: "{{ nginx_service.ID }}" +''' + +RETURN = """ +check: + description: The check as returned by the consul HTTP API. + returned: always + type: dict + sample: + CheckID: nginx_check + ServiceID: nginx + Interval: 30s + Type: http + Notes: Nginx Check +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_CREATE, + OPERATION_UPDATE, + OPERATION_DELETE, + OPERATION_READ, + _ConsulModule, + validate_check, +) + +_ARGUMENT_SPEC = { + "state": dict(default="present", choices=["present", "absent"]), + "name": dict(type='str'), + "id": dict(type='str'), + "interval": dict(type='str'), + "notes": dict(type='str'), + "args": dict(type='list', elements='str'), + "http": dict(type='str'), + "tcp": dict(type='str'), + "ttl": dict(type='str'), + "timeout": dict(type='str'), + "service_id": dict(type='str'), +} + +_MUTUALLY_EXCLUSIVE = [ + ('args', 'ttl', 'tcp', 'http'), +] + +_REQUIRED_IF = [ + ('state', 'present', ['name']), + ('state', 'absent', ('id', 'name'), True), +] + +_REQUIRED_BY = { + 'args': 'interval', + 'http': 'interval', + 'tcp': 'interval', +} + +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulAgentCheckModule(_ConsulModule): + api_endpoint = "agent/check" + result_key = "check" + unique_identifiers = ["id", "name"] + operational_attributes = {"Node", "CheckID", "Output", "ServiceName", "ServiceTags", + "Status", "Type", "ExposedPort", "Definition"} + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_READ: + return "agent/checks" + if operation in [OPERATION_CREATE, OPERATION_UPDATE]: + return "/".join([self.api_endpoint, "register"]) + if operation == OPERATION_DELETE: + return "/".join([self.api_endpoint, "deregister", identifier]) + + return super(ConsulAgentCheckModule, self).endpoint_url(operation, identifier) + + def read_object(self): + url = self.endpoint_url(OPERATION_READ) + checks = self.get(url) + identifier = self.id_from_obj(self.params) + if identifier in checks: + return checks[identifier] + return None + + def prepare_object(self, existing, obj): + existing = super(ConsulAgentCheckModule, self).prepare_object(existing, obj) + validate_check(existing) + return existing + + def delete_object(self, obj): + if not self._module.check_mode: + self.put(self.endpoint_url(OPERATION_DELETE, obj.get("CheckID"))) + return {} + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + mutually_exclusive=_MUTUALLY_EXCLUSIVE, + required_if=_REQUIRED_IF, + required_by=_REQUIRED_BY, + supports_check_mode=True, + ) + + consul_module = ConsulAgentCheckModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_agent_service.py b/plugins/modules/consul_agent_service.py new file mode 100644 index 0000000000..a8ef098970 --- /dev/null +++ b/plugins/modules/consul_agent_service.py @@ -0,0 +1,289 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Michael Ilg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: consul_agent_service +short_description: Add, modify and delete services within a consul cluster +version_added: 9.1.0 +description: + - Allows the addition, modification and deletion of services in a consul + cluster via the agent. + - There are currently no plans to create services and checks in one. + This is because the Consul API does not provide checks for a service and + the checks themselves do not match the module parameters. + Therefore, only a service without checks can be created in this module. +author: + - Michael Ilg (@Ilgmi) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff will miss operational attributes. +options: + state: + description: + - Whether the service should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Unique name for the service on a node, must be unique per node, + required if registering a service. + type: str + id: + description: + - Specifies a unique ID for this service. This must be unique per agent. This defaults to the O(name) parameter if not provided. + If O(state=absent), defaults to the service name if supplied. + type: str + tags: + description: + - Tags that will be attached to the service registration. + type: list + elements: str + address: + description: + - The address to advertise that the service will be listening on. + This value will be passed as the C(address) parameter to Consul's + C(/v1/agent/service/register) API method, so refer to the Consul API + documentation for further details. + type: str + meta: + description: + - Optional meta data used for filtering. + For keys, the characters C(A-Z), C(a-z), C(0-9), C(_), C(-) are allowed. + Not allowed characters are replaced with underscores. + type: dict + service_port: + description: + - The port on which the service is listening. Can optionally be supplied for + registration of a service, that is if O(name) or O(id) is set. + type: int + enable_tag_override: + description: + - Specifies to disable the anti-entropy feature for this service's tags. + If EnableTagOverride is set to true then external agents can update this service in the catalog and modify the tags. + type: bool + default: False + weights: + description: + - Specifies weights for the service + type: dict + suboptions: + passing: + description: + - Weights for passing. + type: int + default: 1 + warning: + description: + - Weights for warning. + type: int + default: 1 + default: {"passing": 1, "warning": 1} +''' + +EXAMPLES = ''' +- name: Register nginx service with the local consul agent + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register nginx with a tcp check + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register nginx with an http check + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register external service nginx available at 10.1.5.23 + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + address: 10.1.5.23 + +- name: Register nginx with some service tags + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + tags: + - prod + - webservers + +- name: Register nginx with some service meta + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + meta: + nginx_version: 1.25.3 + +- name: Remove nginx service + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + service_id: nginx + state: absent + +- name: Register celery worker service + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: celery-worker + tags: + - prod + - worker +''' + +RETURN = """ +service: + description: The service as returned by the consul HTTP API. + returned: always + type: dict + sample: + ID: nginx + Service: nginx + Address: localhost + Port: 80 + Tags: + - http + Meta: + - nginx_version: 1.23.3 + Datacenter: dc1 + Weights: + Passing: 1 + Warning: 1 + ContentHash: 61a245cd985261ac + EnableTagOverride: false +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_CREATE, + OPERATION_UPDATE, + OPERATION_DELETE, + _ConsulModule +) + +_CHECK_MUTUALLY_EXCLUSIVE = [('args', 'ttl', 'tcp', 'http')] +_CHECK_REQUIRED_BY = { + 'args': 'interval', + 'http': 'interval', + 'tcp': 'interval', +} + +_ARGUMENT_SPEC = { + "state": dict(default="present", choices=["present", "absent"]), + "name": dict(type='str'), + "id": dict(type='str'), + "tags": dict(type='list', elements='str'), + "address": dict(type='str'), + "meta": dict(type='dict'), + "service_port": dict(type='int'), + "enable_tag_override": dict(type='bool', default=False), + "weights": dict(type='dict', options=dict( + passing=dict(type='int', default=1, no_log=False), + warning=dict(type='int', default=1) + ), default={"passing": 1, "warning": 1}) +} + +_REQUIRED_IF = [ + ('state', 'present', ['name']), + ('state', 'absent', ('id', 'name'), True), +] + +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulAgentServiceModule(_ConsulModule): + api_endpoint = "agent/service" + result_key = "service" + unique_identifiers = ["id", "name"] + operational_attributes = {"Service", "ContentHash", "Datacenter"} + + def endpoint_url(self, operation, identifier=None): + if operation in [OPERATION_CREATE, OPERATION_UPDATE]: + return "/".join([self.api_endpoint, "register"]) + if operation == OPERATION_DELETE: + return "/".join([self.api_endpoint, "deregister", identifier]) + + return super(ConsulAgentServiceModule, self).endpoint_url(operation, identifier) + + def prepare_object(self, existing, obj): + existing = super(ConsulAgentServiceModule, self).prepare_object(existing, obj) + if "ServicePort" in existing: + existing["Port"] = existing.pop("ServicePort") + + if "ID" not in existing: + existing["ID"] = existing["Name"] + + return existing + + def needs_update(self, api_obj, module_obj): + obj = {} + if "Service" in api_obj: + obj["Service"] = api_obj["Service"] + api_obj = self.prepare_object(api_obj, obj) + + if "Name" in module_obj: + module_obj["Service"] = module_obj.pop("Name") + if "ServicePort" in module_obj: + module_obj["Port"] = module_obj.pop("ServicePort") + + return super(ConsulAgentServiceModule, self).needs_update(api_obj, module_obj) + + def delete_object(self, obj): + if not self._module.check_mode: + url = self.endpoint_url(OPERATION_DELETE, self.id_from_obj(obj, camel_case=True)) + self.put(url) + return {} + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + required_if=_REQUIRED_IF, + supports_check_mode=True, + ) + + consul_module = ConsulAgentServiceModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_auth_method.py b/plugins/modules/consul_auth_method.py index afe549f6ef..e28474c313 100644 --- a/plugins/modules/consul_auth_method.py +++ b/plugins/modules/consul_auth_method.py @@ -168,7 +168,7 @@ def normalize_ttl(ttl): class ConsulAuthMethodModule(_ConsulModule): api_endpoint = "acl/auth-method" result_key = "auth_method" - unique_identifier = "name" + unique_identifiers = ["name"] def map_param(self, k, v, is_update): if k == "config" and v: diff --git a/plugins/modules/consul_binding_rule.py b/plugins/modules/consul_binding_rule.py index 88496f8675..6a2882cee2 100644 --- a/plugins/modules/consul_binding_rule.py +++ b/plugins/modules/consul_binding_rule.py @@ -124,7 +124,7 @@ from ansible_collections.community.general.plugins.module_utils.consul import ( class ConsulBindingRuleModule(_ConsulModule): api_endpoint = "acl/binding-rule" result_key = "binding_rule" - unique_identifier = "id" + unique_identifiers = ["id"] def read_object(self): url = "acl/binding-rules?authmethod={0}".format(self.params["auth_method"]) diff --git a/plugins/modules/consul_policy.py b/plugins/modules/consul_policy.py index 2ed6021b03..36139ac097 100644 --- a/plugins/modules/consul_policy.py +++ b/plugins/modules/consul_policy.py @@ -145,7 +145,7 @@ _ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) class ConsulPolicyModule(_ConsulModule): api_endpoint = "acl/policy" result_key = "policy" - unique_identifier = "id" + unique_identifiers = ["id"] def endpoint_url(self, operation, identifier=None): if operation == OPERATION_READ: diff --git a/plugins/modules/consul_role.py b/plugins/modules/consul_role.py index e07e2036fe..d6c4e4dd92 100644 --- a/plugins/modules/consul_role.py +++ b/plugins/modules/consul_role.py @@ -212,7 +212,7 @@ from ansible_collections.community.general.plugins.module_utils.consul import ( class ConsulRoleModule(_ConsulModule): api_endpoint = "acl/role" result_key = "role" - unique_identifier = "id" + unique_identifiers = ["id"] def endpoint_url(self, operation, identifier=None): if operation == OPERATION_READ: diff --git a/plugins/modules/consul_token.py b/plugins/modules/consul_token.py index 02bc544da7..c8bc8bc279 100644 --- a/plugins/modules/consul_token.py +++ b/plugins/modules/consul_token.py @@ -235,13 +235,13 @@ def normalize_link_obj(api_obj, module_obj, key): class ConsulTokenModule(_ConsulModule): api_endpoint = "acl/token" result_key = "token" - unique_identifier = "accessor_id" + unique_identifiers = ["accessor_id"] create_only_fields = {"expiration_ttl"} def read_object(self): # if `accessor_id` is not supplied we can only create objects and are not idempotent - if not self.params.get(self.unique_identifier): + if not self.id_from_obj(self.params): return None return super(ConsulTokenModule, self).read_object() diff --git a/plugins/modules/copr.py b/plugins/modules/copr.py index 157a6c1605..809064114a 100644 --- a/plugins/modules/copr.py +++ b/plugins/modules/copr.py @@ -52,6 +52,18 @@ options: for example V(epel-7-x86_64). Default chroot is determined by the operating system, version of the operating system, and architecture on which the module is run. type: str + includepkgs: + description: List of packages to include. + required: false + type: list + elements: str + version_added: 9.4.0 + excludepkgs: + description: List of packages to exclude. + required: false + type: list + elements: str + version_added: 9.4.0 """ EXAMPLES = r""" @@ -255,6 +267,12 @@ class CoprModule(object): """ if not repo_content: repo_content = self._download_repo_info() + if self.ansible_module.params["includepkgs"]: + includepkgs_value = ','.join(self.ansible_module.params['includepkgs']) + repo_content = repo_content.rstrip('\n') + '\nincludepkgs={0}\n'.format(includepkgs_value) + if self.ansible_module.params["excludepkgs"]: + excludepkgs_value = ','.join(self.ansible_module.params['excludepkgs']) + repo_content = repo_content.rstrip('\n') + '\nexcludepkgs={0}\n'.format(excludepkgs_value) if self._compare_repo_content(repo_filename_path, repo_content): return False if not self.check_mode: @@ -470,6 +488,8 @@ def run_module(): name=dict(type="str", required=True), state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"), chroot=dict(type="str"), + includepkgs=dict(type='list', elements="str", required=False), + excludepkgs=dict(type='list', elements="str", required=False), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) params = module.params diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py index 302f995932..25489170dd 100644 --- a/plugins/modules/cpanm.py +++ b/plugins/modules/cpanm.py @@ -10,14 +10,14 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: cpanm short_description: Manages Perl library dependencies description: - - Manage Perl library dependencies using cpanminus. +- Manage Perl library dependencies using cpanminus. extends_documentation_fragment: - - community.general.attributes +- community.general.attributes attributes: check_mode: support: none @@ -27,76 +27,82 @@ options: name: type: str description: - - The Perl library to install. Valid values change according to the O(mode), see notes for more details. - - Note that for installing from a local path the parameter O(from_path) should be used. + - The Perl library to install. Valid values change according to the O(mode), see notes for more details. + - Note that for installing from a local path the parameter O(from_path) should be used. aliases: [pkg] from_path: type: path description: - - The local directory or C(tar.gz) file to install from. + - The local directory or C(tar.gz) file to install from. notest: description: - - Do not run unit tests. + - Do not run unit tests. type: bool default: false locallib: description: - - Specify the install base to install modules. + - Specify the install base to install modules. type: path mirror: description: - - Specifies the base URL for the CPAN mirror to use. + - Specifies the base URL for the CPAN mirror to use. type: str mirror_only: description: - - Use the mirror's index file instead of the CPAN Meta DB. + - Use the mirror's index file instead of the CPAN Meta DB. type: bool default: false installdeps: description: - - Only install dependencies. + - Only install dependencies. type: bool default: false version: description: - - Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted. + - Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted. type: str executable: description: - - Override the path to the cpanm executable. + - Override the path to the cpanm executable. type: path mode: description: - - Controls the module behavior. See notes below for more details. - - The default changed from V(compatibility) to V(new) in community.general 9.0.0. + - Controls the module behavior. See notes below for more details. + - The default changed from V(compatibility) to V(new) in community.general 9.0.0. type: str choices: [compatibility, new] default: new version_added: 3.0.0 name_check: description: - - When O(mode=new), this parameter can be used to check if there is a module O(name) installed (at O(version), when specified). + - When O(mode=new), this parameter can be used to check if there is a module O(name) installed (at O(version), when specified). type: str version_added: 3.0.0 notes: - - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. - - "This module now comes with a choice of execution O(mode): V(compatibility) or V(new)." - - > - O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility. - This was the default mode before community.general 9.0.0. - O(name) must be either a module name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version) - when specified), then nothing happens. Otherwise, it will be installed using the C(cpanm) executable. O(name) cannot be an URL, or a git URL. - C(cpanm) version specifiers do not work in this mode. - - > - O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module name, a distribution file, - a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized. - This is the default mode from community.general 9.0.0 onwards. -author: - - "Franck Cuny (@fcuny)" - - "Alexei Znamensky (@russoz)" -''' +- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. +- "This module now comes with a choice of execution O(mode): V(compatibility) or V(new)." +- > + O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility. + This was the default mode before community.general 9.0.0. + O(name) must be either a module name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version) + when specified), then nothing happens. Otherwise, it will be installed using the C(cpanm) executable. O(name) cannot be an URL, or a git URL. + C(cpanm) version specifiers do not work in this mode. +- > + O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module name, a distribution file, + a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized. + This is the default mode from community.general 9.0.0 onwards. -EXAMPLES = ''' +seealso: +- name: C(cpanm) command manual page + description: Manual page for the command. + link: https://metacpan.org/dist/App-cpanminus/view/bin/cpanm +author: +- "Franck Cuny (@fcuny)" +- "Alexei Znamensky (@russoz)" +""" + +EXAMPLES = """ +--- - name: Install Dancer perl package community.general.cpanm: name: Dancer @@ -134,7 +140,7 @@ EXAMPLES = ''' community.general.cpanm: name: Dancer version: '1.0' -''' +""" import os @@ -170,6 +176,7 @@ class CPANMinus(ModuleHelper): installdeps=cmd_runner_fmt.as_bool("--installdeps"), pkg_spec=cmd_runner_fmt.as_list(), ) + use_old_vardict = False def __init_module__(self): v = self.vars diff --git a/plugins/modules/cronvar.py b/plugins/modules/cronvar.py index fdcbc7d24b..66fa175498 100644 --- a/plugins/modules/cronvar.py +++ b/plugins/modules/cronvar.py @@ -183,6 +183,7 @@ class CronVar(object): fileh = open(backup_file, 'w') elif self.cron_file: fileh = open(self.cron_file, 'w') + path = None else: filed, path = tempfile.mkstemp(prefix='crontab') fileh = os.fdopen(filed, 'w') diff --git a/plugins/modules/django_check.py b/plugins/modules/django_check.py new file mode 100644 index 0000000000..1553da7a30 --- /dev/null +++ b/plugins/modules/django_check.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: django_check +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin check) +version_added: 9.1.0 +description: + - This module is a wrapper for the execution of C(django-admin check). +extends_documentation_fragment: + - community.general.attributes + - community.general.django +options: + database: + description: + - Specify databases to run checks against. + - If not specified, Django will not run database tests. + type: list + elements: str + deploy: + description: + - Include additional checks relevant in a deployment setting. + type: bool + default: false + fail_level: + description: + - Message level that will trigger failure. + - Default is the Django default value. Check the documentation for the version being used. + type: str + choices: [CRITICAL, ERROR, WARNING, INFO, DEBUG] + tags: + description: + - Restrict checks to specific tags. + type: list + elements: str + apps: + description: + - Restrict checks to specific applications. + - Default is to check all applications. + type: list + elements: str +notes: + - The outcome of the module is found in the common return values RV(ignore:stdout), RV(ignore:stderr), RV(ignore:rc). + - The module will fail if RV(ignore:rc) is not zero. +attributes: + check_mode: + support: full + diff_mode: + support: none +""" + +EXAMPLES = """ +- name: Check the entire project + community.general.django_check: + settings: myproject.settings + +- name: Create the project using specific databases + community.general.django_check: + database: + - somedb + - myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = """ +run_info: + description: Command-line execution information. + type: dict + returned: success and C(verbosity) >= 3 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper +from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt + + +class DjangoCheck(DjangoModuleHelper): + module = dict( + argument_spec=dict( + database=dict(type="list", elements="str"), + deploy=dict(type="bool", default=False), + fail_level=dict(type="str", choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]), + tags=dict(type="list", elements="str"), + apps=dict(type="list", elements="str"), + ), + supports_check_mode=True, + ) + arg_formats = dict( + database=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"), + deploy=cmd_runner_fmt.as_bool("--deploy"), + fail_level=cmd_runner_fmt.as_opt_val("--fail-level"), + tags=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--tag"), + apps=cmd_runner_fmt.as_list(), + ) + django_admin_cmd = "check" + django_admin_arg_order = "database deploy fail_level tags apps" + + +def main(): + DjangoCheck.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_command.py b/plugins/modules/django_command.py index 788f4a100e..dcb8d26313 100644 --- a/plugins/modules/django_command.py +++ b/plugins/modules/django_command.py @@ -57,6 +57,8 @@ run_info: returned: success and O(verbosity) >= 3 """ +import shlex + from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt @@ -74,6 +76,9 @@ class DjangoCommand(DjangoModuleHelper): ) django_admin_arg_order = "extra_args" + def __init_module__(self): + self.vars.command = shlex.split(self.vars.command) + def main(): DjangoCommand.execute() diff --git a/plugins/modules/django_createcachetable.py b/plugins/modules/django_createcachetable.py new file mode 100644 index 0000000000..b038e0358f --- /dev/null +++ b/plugins/modules/django_createcachetable.py @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: django_createcachetable +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin createcachetable) +version_added: 9.1.0 +description: + - This module is a wrapper for the execution of C(django-admin createcachetable). +extends_documentation_fragment: + - community.general.attributes + - community.general.django + - community.general.django.database +attributes: + check_mode: + support: full + diff_mode: + support: none +""" + +EXAMPLES = """ +- name: Create cache table in the default database + community.general.django_createcachetable: + settings: myproject.settings + +- name: Create cache table in the other database + community.general.django_createcachetable: + database: myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = """ +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoCreateCacheTable(DjangoModuleHelper): + module = dict( + supports_check_mode=True, + ) + django_admin_cmd = "createcachetable" + django_admin_arg_order = "noinput database dry_run" + _django_args = ["noinput", "database", "dry_run"] + _check_mode_arg = "dry_run" + + +def main(): + DjangoCreateCacheTable.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/dnf_config_manager.py b/plugins/modules/dnf_config_manager.py index 069fd0ddc7..aa2571d9f0 100644 --- a/plugins/modules/dnf_config_manager.py +++ b/plugins/modules/dnf_config_manager.py @@ -153,7 +153,7 @@ def get_repo_states(module): def set_repo_states(module, repo_ids, state): - module.run_command([DNF_BIN, 'config-manager', '--set-{0}'.format(state)] + repo_ids, check_rc=True) + module.run_command([DNF_BIN, 'config-manager', '--assumeyes', '--set-{0}'.format(state)] + repo_ids, check_rc=True) def pack_repo_states_for_return(states): @@ -186,6 +186,7 @@ def main(): argument_spec=module_args, supports_check_mode=True ) + module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') if not os.path.exists(DNF_BIN): module.fail_json(msg="%s was not found" % DNF_BIN) diff --git a/plugins/modules/etcd3.py b/plugins/modules/etcd3.py index 2fdc3f2f83..b1bb181cf4 100644 --- a/plugins/modules/etcd3.py +++ b/plugins/modules/etcd3.py @@ -193,13 +193,8 @@ def run_module(): allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key', 'timeout', 'user', 'password'] - # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is - # the minimum supported version - # client_params = {key: value for key, value in module.params.items() if key in allowed_keys} - client_params = dict() - for key, value in module.params.items(): - if key in allowed_keys: - client_params[key] = value + + client_params = {key: value for key, value in module.params.items() if key in allowed_keys} try: etcd = etcd3.client(**client_params) except Exception as exp: diff --git a/plugins/modules/flatpak.py b/plugins/modules/flatpak.py index 15e404d45b..84e4ea8374 100644 --- a/plugins/modules/flatpak.py +++ b/plugins/modules/flatpak.py @@ -329,13 +329,39 @@ def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method return row.split()[0] +def _is_flatpak_id(part): + # For guidelines on application IDs, refer to the following resources: + # Flatpak: + # https://docs.flatpak.org/en/latest/conventions.html#application-ids + # Flathub: + # https://docs.flathub.org/docs/for-app-authors/requirements#application-id + if '.' not in part: + return False + sections = part.split('.') + if len(sections) < 2: + return False + domain = sections[0] + if not domain.islower(): + return False + for section in sections[1:]: + if not section.isalnum(): + return False + return True + + def _parse_flatpak_name(name): if name.startswith('http://') or name.startswith('https://'): file_name = urlparse(name).path.split('/')[-1] file_name_without_extension = file_name.split('.')[0:-1] common_name = ".".join(file_name_without_extension) else: - common_name = name + parts = name.split('/') + for part in parts: + if _is_flatpak_id(part): + common_name = part + break + else: + common_name = name return common_name @@ -393,6 +419,8 @@ def main(): if not binary: module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') + installed, not_installed = flatpak_exists(module, binary, name, method) if state == 'absent' and installed: uninstall_flat(module, binary, installed, method) diff --git a/plugins/modules/gandi_livedns.py b/plugins/modules/gandi_livedns.py index ad2e96fd15..e90483a49d 100644 --- a/plugins/modules/gandi_livedns.py +++ b/plugins/modules/gandi_livedns.py @@ -8,15 +8,14 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gandi_livedns author: - Gregory Thiemonge (@gthiemonge) version_added: "2.3.0" short_description: Manage Gandi LiveDNS records description: - - "Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/)." + - 'Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/).' extends_documentation_fragment: - community.general.attributes attributes: @@ -27,32 +26,31 @@ attributes: options: personal_access_token: description: - - Scoped API token. - - One of O(personal_access_token) and O(api_key) must be specified. + - Scoped API token. + - One of O(personal_access_token) and O(api_key) must be specified. type: str version_added: 9.0.0 api_key: description: - - Account API token. - - Note that these type of keys are deprecated and might stop working at some point. - Use personal access tokens instead. - - One of O(personal_access_token) and O(api_key) must be specified. + - Account API token. + - Note that these type of keys are deprecated and might stop working at some point. Use personal access tokens instead. + - One of O(personal_access_token) and O(api_key) must be specified. type: str record: description: - - Record to add. + - Record to add. type: str required: true state: description: - - Whether the record(s) should exist or not. + - Whether the record(s) should exist or not. type: str - choices: [ absent, present ] + choices: [absent, present] default: present ttl: description: - - The TTL to give the new record. - - Required when O(state=present). + - The TTL to give the new record. + - Required when O(state=present). type: int type: description: @@ -61,25 +59,25 @@ options: required: true values: description: - - The record values. - - Required when O(state=present). + - The record values. + - Required when O(state=present). type: list elements: str domain: description: - - The name of the Domain to work with (for example, "example.com"). + - The name of the Domain to work with (for example, V(example.com)). required: true type: str -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a test A record to point to 127.0.0.1 in the my.com domain community.general.gandi_livedns: domain: my.com record: test type: A values: - - 127.0.0.1 + - 127.0.0.1 ttl: 7200 personal_access_token: dummytoken register: record @@ -90,7 +88,7 @@ EXAMPLES = r''' type: CNAME record: mail values: - - www + - www ttl: 7200 personal_access_token: dummytoken state: present @@ -101,7 +99,7 @@ EXAMPLES = r''' type: CNAME record: mail values: - - www + - www ttl: 10800 personal_access_token: dummytoken state: present @@ -120,46 +118,46 @@ EXAMPLES = r''' record: test type: A values: - - 127.0.0.1 + - 127.0.0.1 ttl: 7200 api_key: dummyapikey -''' +""" -RETURN = r''' +RETURN = r""" record: - description: A dictionary containing the record data. - returned: success, except on record deletion - type: dict - contains: - values: - description: The record content (details depend on record type). - returned: success - type: list - elements: str - sample: - - 192.0.2.91 - - 192.0.2.92 - record: - description: The record name. - returned: success - type: str - sample: www - ttl: - description: The time-to-live for the record. - returned: success - type: int - sample: 300 - type: - description: The record type. - returned: success - type: str - sample: A - domain: - description: The domain associated with the record. - returned: success - type: str - sample: my.com -''' + description: A dictionary containing the record data. + returned: success, except on record deletion + type: dict + contains: + values: + description: The record content (details depend on record type). + returned: success + type: list + elements: str + sample: + - 192.0.2.91 + - 192.0.2.92 + record: + description: The record name. + returned: success + type: str + sample: www + ttl: + description: The time-to-live for the record. + returned: success + type: int + sample: 300 + type: + description: The record type. + returned: success + type: str + sample: A + domain: + description: The domain associated with the record. + returned: success + type: str + sample: my.com +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py index db7c6dc883..2f66448520 100644 --- a/plugins/modules/gconftool2.py +++ b/plugins/modules/gconftool2.py @@ -9,14 +9,19 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gconftool2 author: - Kenneth D. Evensen (@kevensen) short_description: Edit GNOME Configurations description: - - This module allows for the manipulation of GNOME 2 Configuration via - gconftool-2. Please see the gconftool-2(1) man pages for more details. + - This module allows for the manipulation of GNOME 2 Configuration using C(gconftool-2). Please see the gconftool-2(1) man pages + for more details. +seealso: + - name: C(gconftool-2) command manual page + description: Manual page for the command. + link: https://help.gnome.org/admin//system-admin-guide/2.32/gconf-6.html.en + extends_documentation_fragment: - community.general.attributes attributes: @@ -28,42 +33,37 @@ options: key: type: str description: - - A GConf preference key is an element in the GConf repository - that corresponds to an application preference. See man gconftool-2(1). + - A GConf preference key is an element in the GConf repository that corresponds to an application preference. required: true value: type: str description: - - Preference keys typically have simple values such as strings, - integers, or lists of strings and integers. - This is ignored unless O(state=present). See man gconftool-2(1). + - Preference keys typically have simple values such as strings, integers, or lists of strings and integers. This is + ignored unless O(state=present). value_type: type: str description: - - The type of value being set. - This is ignored unless O(state=present). See man gconftool-2(1). - choices: [ bool, float, int, string ] + - The type of value being set. This is ignored unless O(state=present). + choices: [bool, float, int, string] state: type: str description: - - The action to take upon the key/value. + - The action to take upon the key/value. required: true - choices: [ absent, present ] + choices: [absent, present] config_source: type: str description: - - Specify a configuration source to use rather than the default path. - See man gconftool-2(1). + - Specify a configuration source to use rather than the default path. direct: description: - - Access the config database directly, bypassing server. If O(direct) is - specified then the O(config_source) must be specified as well. - See man gconftool-2(1). + - Access the config database directly, bypassing server. If O(direct) is specified then the O(config_source) must be + specified as well. type: bool default: false -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Change the widget font to "Serif 12" community.general.gconftool2: key: "/desktop/gnome/interface/font_name" @@ -71,33 +71,32 @@ EXAMPLES = """ value: "Serif 12" """ -RETURN = ''' - key: - description: The key specified in the module parameters. - returned: success - type: str - sample: /desktop/gnome/interface/font_name - value_type: - description: The type of the value that was changed. - returned: success - type: str - sample: string - value: - description: - - The value of the preference key after executing the module or V(null) if key is removed. - - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. - returned: success - type: str - sample: "Serif 12" - previous_value: - description: - - The value of the preference key before executing the module. - - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. - returned: success - type: str - sample: "Serif 12" -... -''' +RETURN = r""" +key: + description: The key specified in the module parameters. + returned: success + type: str + sample: /desktop/gnome/interface/font_name +value_type: + description: The type of the value that was changed. + returned: success + type: str + sample: string +value: + description: + - The value of the preference key after executing the module or V(null) if key is removed. + - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. + returned: success + type: str + sample: "Serif 12" +previous_value: + description: + - The value of the preference key before executing the module. + - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. + returned: success + type: str + sample: "Serif 12" +""" from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner @@ -127,9 +126,8 @@ class GConftool(StateModuleHelper): def __init_module__(self): self.runner = gconftool2_runner(self.module, check_rc=True) - if self.vars.state != "get": - if not self.vars.direct and self.vars.config_source is not None: - self.module.fail_json(msg='If the "config_source" is specified then "direct" must be "true"') + if not self.vars.direct and self.vars.config_source is not None: + self.do_raise('If the "config_source" is specified then "direct" must be "true"') self.vars.set('previous_value', self._get(), fact=True) self.vars.set('value_type', self.vars.value_type) @@ -140,7 +138,7 @@ class GConftool(StateModuleHelper): def _make_process(self, fail_on_err): def process(rc, out, err): if err and fail_on_err: - self.ansible.fail_json(msg='gconftool-2 failed with error: %s' % (str(err))) + self.do_raise('gconftool-2 failed with error:\n%s' % err.strip()) out = out.rstrip() self.vars.value = None if out == "" else out return self.vars.value @@ -152,16 +150,14 @@ class GConftool(StateModuleHelper): def state_absent(self): with self.runner("state key", output_process=self._make_process(False)) as ctx: ctx.run() - if self.verbosity >= 4: - self.vars.run_info = ctx.run_info + self.vars.set('run_info', ctx.run_info, verbosity=4) self.vars.set('new_value', None, fact=True) self.vars._value = None def state_present(self): with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: ctx.run() - if self.verbosity >= 4: - self.vars.run_info = ctx.run_info + self.vars.set('run_info', ctx.run_info, verbosity=4) self.vars.set('new_value', self._get(), fact=True) self.vars._value = self.vars.new_value diff --git a/plugins/modules/gconftool2_info.py b/plugins/modules/gconftool2_info.py index 282065b95e..3c1baf9a65 100644 --- a/plugins/modules/gconftool2_info.py +++ b/plugins/modules/gconftool2_info.py @@ -7,10 +7,10 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gconftool2_info author: - - "Alexei Znamensky (@russoz)" + - "Alexei Znamensky (@russoz)" short_description: Retrieve GConf configurations version_added: 5.1.0 description: @@ -21,32 +21,33 @@ extends_documentation_fragment: options: key: description: - - The key name for an element in the GConf database. + - The key name for an element in the GConf database. type: str required: true -notes: - - See man gconftool-2(1) for more details. seealso: + - name: C(gconftool-2) command manual page + description: Manual page for the command. + link: https://help.gnome.org/admin//system-admin-guide/2.32/gconf-6.html.en - name: gconf repository (archived) description: Git repository for the project. It is an archived project, so the repository is read-only. link: https://gitlab.gnome.org/Archive/gconf -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Get value for a certain key in the database. community.general.gconftool2_info: key: /desktop/gnome/background/picture_filename register: result """ -RETURN = ''' - value: - description: +RETURN = r""" +value: + description: - The value of the property. - returned: success - type: str - sample: Monospace 10 -''' + returned: success + type: str + sample: Monospace 10 +""" from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner @@ -60,6 +61,7 @@ class GConftoolInfo(ModuleHelper): ), supports_check_mode=True, ) + use_old_vardict = False def __init_module__(self): self.runner = gconftool2_runner(self.module, check_rc=True) diff --git a/plugins/modules/gem.py b/plugins/modules/gem.py index f51e3350da..c01433cb90 100644 --- a/plugins/modules/gem.py +++ b/plugins/modules/gem.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gem short_description: Manage Ruby gems description: @@ -49,38 +48,37 @@ options: repository: type: str description: - - The repository from which the gem will be installed + - The repository from which the gem will be installed. required: false aliases: [source] user_install: description: - - Install gem in user's local gems cache or for all users + - Install gem in user's local gems cache or for all users. required: false type: bool default: true executable: type: path description: - - Override the path to the gem executable + - Override the path to the gem executable. required: false install_dir: type: path description: - - Install the gems into a specific directory. - These gems will be independent from the global installed ones. - Specifying this requires user_install to be false. + - Install the gems into a specific directory. These gems will be independent from the global installed ones. Specifying + this requires user_install to be false. required: false bindir: type: path description: - - Install executables into a specific directory. + - Install executables into a specific directory. version_added: 3.3.0 norc: type: bool default: true description: - - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2. - - The default changed from V(false) to V(true) in community.general 6.0.0. + - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2. + - The default changed from V(false) to V(true) in community.general 6.0.0. version_added: 3.3.0 env_shebang: description: @@ -108,7 +106,7 @@ options: build_flags: type: str description: - - Allow adding build flags for gem compilation + - Allow adding build flags for gem compilation. required: false force: description: @@ -117,11 +115,11 @@ options: default: false type: bool author: - - "Ansible Core Team" - - "Johan Wiren (@johanwiren)" -''' + - "Ansible Core Team" + - "Johan Wiren (@johanwiren)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install version 1.0 of vagrant community.general.gem: name: vagrant @@ -138,7 +136,7 @@ EXAMPLES = ''' name: rake gem_source: /path/to/gems/rake-1.0.gem state: present -''' +""" import re diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py index 27f90581ef..8ac4b34838 100644 --- a/plugins/modules/gio_mime.py +++ b/plugins/modules/gio_mime.py @@ -7,14 +7,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gio_mime author: - "Alexei Znamensky (@russoz)" short_description: Set default handler for MIME type, for applications using Gnome GIO version_added: 7.5.0 description: - - This module allows configuring the default handler for a specific MIME type, to be used by applications built with th Gnome GIO API. + - This module allows configuring the default handler for a specific MIME type, to be used by applications built with the + Gnome GIO API. extends_documentation_fragment: - community.general.attributes attributes: @@ -37,12 +38,15 @@ notes: - This module is a thin wrapper around the C(gio mime) command (and subcommand). - See man gio(1) for more details. seealso: + - name: C(gio) command manual page + description: Manual page for the command. + link: https://man.archlinux.org/man/gio.1 - name: GIO Documentation description: Reference documentation for the GIO API.. link: https://docs.gtk.org/gio/ -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Set chrome as the default handler for https community.general.gio_mime: mime_type: x-scheme-handler/https @@ -50,26 +54,26 @@ EXAMPLES = """ register: result """ -RETURN = ''' - handler: - description: +RETURN = r""" +handler: + description: - The handler set as default. - returned: success - type: str - sample: google-chrome.desktop - stdout: - description: + returned: success + type: str + sample: google-chrome.desktop +stdout: + description: - The output of the C(gio) command. - returned: success - type: str - sample: Set google-chrome.desktop as the default for x-scheme-handler/https - stderr: - description: + returned: success + type: str + sample: Set google-chrome.desktop as the default for x-scheme-handler/https +stderr: + description: - The error output of the C(gio) command. - returned: failure - type: str - sample: 'gio: Failed to load info for handler "never-existed.desktop"' -''' + returned: failure + type: str + sample: 'gio: Failed to load info for handler "never-existed.desktop"' +""" from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper from ansible_collections.community.general.plugins.module_utils.gio_mime import gio_mime_runner, gio_mime_get @@ -84,6 +88,7 @@ class GioMime(ModuleHelper): ), supports_check_mode=True, ) + use_old_vardict = False def __init_module__(self): self.runner = gio_mime_runner(self.module, check_rc=True) @@ -91,7 +96,7 @@ class GioMime(ModuleHelper): def __run__(self): check_mode_return = (0, 'Module executed in check mode', '') - if self.vars.has_changed("handler"): + if self.vars.has_changed: with self.runner.context(args_order=["mime_type", "handler"], check_mode_skip=True, check_mode_return=check_mode_return) as ctx: rc, out, err = ctx.run() self.vars.stdout = out diff --git a/plugins/modules/git_config.py b/plugins/modules/git_config.py index a8d2ebe979..6a6eff0be2 100644 --- a/plugins/modules/git_config.py +++ b/plugins/modules/git_config.py @@ -11,20 +11,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: git_config author: - Matthew Gamble (@djmattyg007) - Marius Gedminas (@mgedmin) requirements: ['git'] -short_description: Read and write git configuration +short_description: Update git configuration description: - - The M(community.general.git_config) module changes git configuration by invoking C(git config). - This is needed if you do not want to use M(ansible.builtin.template) for the entire git - config file (for example because you need to change just C(user.email) in - /etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or - do not work correctly in check mode. + - The M(community.general.git_config) module changes git configuration by invoking C(git config). This is needed if you + do not want to use M(ansible.builtin.template) for the entire git config file (for example because you need to change + just C(user.email) in C(/etc/.git/config)). Solutions involving M(ansible.builtin.command) are cumbersome or do not work + correctly in check mode. extends_documentation_fragment: - community.general.attributes attributes: @@ -36,17 +34,17 @@ options: list_all: description: - List all settings (optionally limited to a given O(scope)). + - This option is B(deprecated) and will be removed from community.general 11.0.0. Please use M(community.general.git_config_info) + instead. type: bool default: false name: description: - - The name of the setting. If no value is supplied, the value will - be read from the config if it has been set. + - The name of the setting. If no value is supplied, the value will be read from the config if it has been set. type: str repo: description: - - Path to a git repository for reading and writing values from a - specific repo. + - Path to a git repository for reading and writing values from a specific repo. type: path file: description: @@ -60,34 +58,34 @@ options: - If this is set to V(local), you must also specify the O(repo) parameter. - If this is set to V(file), you must also specify the O(file) parameter. - It defaults to system only when not using O(list_all=true). - choices: [ "file", "local", "global", "system" ] + choices: ["file", "local", "global", "system"] type: str state: description: - - "Indicates the setting should be set/unset. - This parameter has higher precedence than O(value) parameter: - when O(state=absent) and O(value) is defined, O(value) is discarded." - choices: [ 'present', 'absent' ] + - 'Indicates the setting should be set/unset. This parameter has higher precedence than O(value) parameter: when O(state=absent) + and O(value) is defined, O(value) is discarded.' + choices: ['present', 'absent'] default: 'present' type: str value: description: - - When specifying the name of a single setting, supply a value to - set that setting to the given value. + - When specifying the name of a single setting, supply a value to set that setting to the given value. + - From community.general 11.0.0 on, O(value) will be required if O(state=present). To read values, use the M(community.general.git_config_info) + module instead. type: str add_mode: description: - - Specify if a value should replace the existing value(s) or if the new - value should be added alongside other values with the same name. - - This option is only relevant when adding/replacing values. If O(state=absent) or - values are just read out, this option is not considered. - choices: [ "add", "replace-all" ] + - Specify if a value should replace the existing value(s) or if the new value should be added alongside other values + with the same name. + - This option is only relevant when adding/replacing values. If O(state=absent) or values are just read out, this option + is not considered. + choices: ["add", "replace-all"] type: str default: "replace-all" version_added: 8.1.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add a setting to ~/.gitconfig community.general.git_config: name: alias.ci @@ -143,41 +141,17 @@ EXAMPLES = ''' repo: /etc scope: local value: 'root@{{ ansible_fqdn }}' +""" -- name: Read individual values from git config - community.general.git_config: - name: alias.ci - scope: global - -- name: Scope system is also assumed when reading values, unless list_all=true - community.general.git_config: - name: alias.diffc - -- name: Read all values from git config - community.general.git_config: - list_all: true - scope: global - -- name: When list_all is yes and no scope is specified, you get configuration from all scopes - community.general.git_config: - list_all: true - -- name: Specify a repository to include local settings - community.general.git_config: - list_all: true - repo: /path/to/repo.git -''' - -RETURN = ''' ---- +RETURN = r""" config_value: - description: When O(list_all=false) and value is not set, a string containing the value of the setting in name + description: When O(list_all=false) and value is not set, a string containing the value of the setting in name. returned: success type: str sample: "vim" config_values: - description: When O(list_all=true), a dict containing key/value pairs of multiple configuration settings + description: When O(list_all=true), a dict containing key/value pairs of multiple configuration settings. returned: success type: dict sample: @@ -185,7 +159,7 @@ config_values: color.ui: "auto" alias.diffc: "diff --cached" alias.remotev: "remote -v" -''' +""" from ansible.module_utils.basic import AnsibleModule @@ -193,7 +167,7 @@ from ansible.module_utils.basic import AnsibleModule def main(): module = AnsibleModule( argument_spec=dict( - list_all=dict(required=False, type='bool', default=False), + list_all=dict(required=False, type='bool', default=False, removed_in_version='11.0.0', removed_from_collection='community.general'), name=dict(type='str'), repo=dict(type='path'), file=dict(type='path'), @@ -222,6 +196,14 @@ def main(): new_value = params['value'] or '' add_mode = params['add_mode'] + if not unset and not new_value and not params['list_all']: + module.deprecate( + 'If state=present, a value must be specified from community.general 11.0.0 on.' + ' To read a config value, use the community.general.git_config_info module instead.', + version='11.0.0', + collection_name='community.general', + ) + scope = determine_scope(params) cwd = determine_cwd(scope, params) @@ -263,7 +245,7 @@ def main(): module.exit_json(changed=False, msg='', config_value=old_values[0] if old_values else '') elif unset and not out: module.exit_json(changed=False, msg='no setting to unset') - elif new_value in old_values and (len(old_values) == 1 or add_mode == "add"): + elif new_value in old_values and (len(old_values) == 1 or add_mode == "add") and not unset: module.exit_json(changed=False, msg="") # Until this point, the git config was just read and in case no change is needed, the module has already exited. diff --git a/plugins/modules/git_config_info.py b/plugins/modules/git_config_info.py index 147201fff3..c8152cfa42 100644 --- a/plugins/modules/git_config_info.py +++ b/plugins/modules/git_config_info.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: git_config_info author: - Guenther Grill (@guenhter) @@ -19,8 +18,7 @@ version_added: 8.1.0 requirements: ['git'] short_description: Read git configuration description: - - The M(community.general.git_config_info) module reads the git configuration - by invoking C(git config). + - The M(community.general.git_config_info) module reads the git configuration by invoking C(git config). extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module @@ -44,12 +42,12 @@ options: - If set to V(system), the system git config is used. O(path) is ignored. - If set to V(local), O(path) must be set to the repo to read from. - If set to V(file), O(path) must be set to the config file to read from. - choices: [ "global", "system", "local", "file" ] + choices: ["global", "system", "local", "file"] default: "system" type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Read a system wide config community.general.git_config_info: name: core.editor @@ -81,14 +79,13 @@ EXAMPLES = ''' community.general.git_config_info: scope: file path: /etc/gitconfig -''' +""" -RETURN = ''' ---- +RETURN = r""" config_value: - description: > - When O(name) is set, a string containing the value of the setting in name. If O(name) is not set, empty. - If a config key such as V(push.pushoption) has more then one entry, just the first one is returned here. + description: >- + When O(name) is set, a string containing the value of the setting in name. If O(name) is not set, empty. If a config key + such as V(push.pushoption) has more then one entry, just the first one is returned here. returned: success if O(name) is set type: str sample: "vim" @@ -97,8 +94,8 @@ config_values: description: - This is a dictionary mapping a git configuration setting to a list of its values. - When O(name) is not set, all configuration settings are returned here. - - When O(name) is set, only the setting specified in O(name) is returned here. - If that setting is not set, the key will still be present, and its value will be an empty list. + - When O(name) is set, only the setting specified in O(name) is returned here. If that setting is not set, the key will + still be present, and its value will be an empty list. returned: success type: dict sample: @@ -106,7 +103,7 @@ config_values: color.ui: ["auto"] push.pushoption: ["merge_request.create", "merge_request.draft"] alias.remotev: ["remote -v"] -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/github_deploy_key.py b/plugins/modules/github_deploy_key.py index ae90e04c91..509a67c491 100644 --- a/plugins/modules/github_deploy_key.py +++ b/plugins/modules/github_deploy_key.py @@ -9,15 +9,14 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_deploy_key author: "Ali (@bincyber)" short_description: Manages deploy keys for GitHub repositories description: - - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password, - username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin - rights on the repository are required." + - Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password, username and + password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin rights on the repository + are required. extends_documentation_fragment: - community.general.attributes attributes: @@ -28,7 +27,7 @@ attributes: options: github_url: description: - - The base URL of the GitHub API + - The base URL of the GitHub API. required: false type: str version_added: '0.2.0' @@ -37,19 +36,19 @@ options: description: - The name of the individual account or organization that owns the GitHub repository. required: true - aliases: [ 'account', 'organization' ] + aliases: ['account', 'organization'] type: str repo: description: - The name of the GitHub repository. required: true - aliases: [ 'repository' ] + aliases: ['repository'] type: str name: description: - The name for the deploy key. required: true - aliases: [ 'title', 'label' ] + aliases: ['title', 'label'] type: str key: description: @@ -58,14 +57,15 @@ options: type: str read_only: description: - - If V(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write. + - If V(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to + read and write. type: bool default: true state: description: - The state of the deploy key. default: "present" - choices: [ "present", "absent" ] + choices: ["present", "absent"] type: str force: description: @@ -74,11 +74,12 @@ options: default: false username: description: - - The username to authenticate with. Should not be set when using personal access token + - The username to authenticate with. Should not be set when using personal access token. type: str password: description: - - The password to authenticate with. Alternatively, a personal access token can be used instead of O(username) and O(password) combination. + - The password to authenticate with. Alternatively, a personal access token can be used instead of O(username) and O(password) + combination. type: str token: description: @@ -89,10 +90,10 @@ options: - The 6 digit One Time Password for 2-Factor Authentication. Required together with O(username) and O(password). type: int notes: - - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." -''' + - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add a new read-only deploy key to a GitHub repository using basic authentication community.general.github_deploy_key: owner: "johndoe" @@ -152,33 +153,33 @@ EXAMPLES = ''' read_only: true username: "janedoe" password: "supersecretpassword" -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: the status message describing what occurred - returned: always - type: str - sample: "Deploy key added successfully" + description: The status message describing what occurred. + returned: always + type: str + sample: "Deploy key added successfully" http_status_code: - description: the HTTP status code returned by the GitHub API - returned: failed - type: int - sample: 400 + description: The HTTP status code returned by the GitHub API. + returned: failed + type: int + sample: 400 error: - description: the error message returned by the GitHub API - returned: failed - type: str - sample: "key is already in use" + description: The error message returned by the GitHub API. + returned: failed + type: str + sample: "key is already in use" id: - description: the key identifier assigned by GitHub for the deploy key - returned: changed - type: int - sample: 24381901 -''' + description: The key identifier assigned by GitHub for the deploy key. + returned: changed + type: int + sample: 24381901 +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/github_issue.py b/plugins/modules/github_issue.py index 4e10e9f925..86e81d38ef 100644 --- a/plugins/modules/github_issue.py +++ b/plugins/modules/github_issue.py @@ -10,7 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: github_issue short_description: View GitHub issue description: @@ -40,24 +40,24 @@ options: type: int action: description: - - Get various details about issue depending upon action specified. + - Get various details about issue depending upon action specified. default: 'get_status' choices: - - 'get_status' + - get_status type: str author: - - Abhijeet Kasurde (@Akasurde) -''' + - Abhijeet Kasurde (@Akasurde) +""" -RETURN = ''' +RETURN = r""" issue_status: - description: State of the GitHub issue - type: str - returned: success - sample: open, closed -''' + description: State of the GitHub issue. + type: str + returned: success + sample: open, closed +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Check if GitHub issue is closed or not community.general.github_issue: organization: ansible @@ -70,7 +70,7 @@ EXAMPLES = ''' ansible.builtin.debug: msg: Do something when issue 23642 is open when: r.issue_status == 'open' -''' +""" import json diff --git a/plugins/modules/github_key.py b/plugins/modules/github_key.py index a74ead9848..f3d5863d54 100644 --- a/plugins/modules/github_key.py +++ b/plugins/modules/github_key.py @@ -9,7 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: github_key short_description: Manage GitHub access keys description: @@ -29,7 +29,7 @@ options: type: str name: description: - - SSH key name + - SSH key name. required: true type: str pubkey: @@ -44,34 +44,36 @@ options: type: str force: description: - - The default is V(true), which will replace the existing remote key - if it is different than O(pubkey). If V(false), the key will only be - set if no key with the given O(name) exists. + - The default is V(true), which will replace the existing remote key if it is different than O(pubkey). If V(false), + the key will only be set if no key with the given O(name) exists. type: bool default: true author: Robert Estelle (@erydo) -''' +""" -RETURN = ''' +RETURN = r""" deleted_keys: - description: An array of key objects that were deleted. Only present on state=absent - type: list - returned: When state=absent - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}] + description: An array of key objects that were deleted. Only present on state=absent. + type: list + returned: When state=absent + sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', + 'read_only': false}] matching_keys: - description: An array of keys matching the specified name. Only present on state=present - type: list - returned: When state=present - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}] + description: An array of keys matching the specified name. Only present on state=present. + type: list + returned: When state=present + sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', + 'read_only': false}] key: - description: Metadata about the key just created. Only present on state=present - type: dict - returned: success - sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false} -''' + description: Metadata about the key just created. Only present on state=present. + type: dict + returned: success + sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', + 'read_only': false} +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Read SSH public key to authorize ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub register: ssh_pub_key @@ -89,7 +91,7 @@ EXAMPLES = ''' name: Access Key for Some Machine token: '{{ github_access_token }}' pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}" -''' +""" import datetime import json @@ -162,7 +164,7 @@ def create_key(session, name, pubkey, check_mode): 'key': pubkey, 'title': name, 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', - 'created_at': datetime.strftime(now_t, '%Y-%m-%dT%H:%M:%SZ'), + 'created_at': datetime.datetime.strftime(now_t, '%Y-%m-%dT%H:%M:%SZ'), 'read_only': False, 'verified': False } diff --git a/plugins/modules/github_release.py b/plugins/modules/github_release.py index d8ee155b81..1376bf4f3d 100644 --- a/plugins/modules/github_release.py +++ b/plugins/modules/github_release.py @@ -9,78 +9,77 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_release short_description: Interact with GitHub Releases description: - - Fetch metadata about GitHub Releases + - Fetch metadata about GitHub Releases. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - token: - description: - - GitHub Personal Access Token for authenticating. Mutually exclusive with O(password). - type: str - user: - description: - - The GitHub account that owns the repository - type: str - required: true - password: - description: - - The GitHub account password for the user. Mutually exclusive with O(token). - type: str - repo: - description: - - Repository name - type: str - required: true - action: - description: - - Action to perform - type: str - required: true - choices: [ 'latest_release', 'create_release' ] - tag: - description: - - Tag name when creating a release. Required when using O(action=create_release). - type: str - target: - description: - - Target of release when creating a release - type: str - name: - description: - - Name of release when creating a release - type: str - body: - description: - - Description of the release when creating a release - type: str - draft: - description: - - Sets if the release is a draft or not. (boolean) - type: bool - default: false - prerelease: - description: - - Sets if the release is a prerelease or not. (boolean) - type: bool - default: false + token: + description: + - GitHub Personal Access Token for authenticating. Mutually exclusive with O(password). + type: str + user: + description: + - The GitHub account that owns the repository. + type: str + required: true + password: + description: + - The GitHub account password for the user. Mutually exclusive with O(token). + type: str + repo: + description: + - Repository name. + type: str + required: true + action: + description: + - Action to perform. + type: str + required: true + choices: ['latest_release', 'create_release'] + tag: + description: + - Tag name when creating a release. Required when using O(action=create_release). + type: str + target: + description: + - Target of release when creating a release. + type: str + name: + description: + - Name of release when creating a release. + type: str + body: + description: + - Description of the release when creating a release. + type: str + draft: + description: + - Sets if the release is a draft or not. (boolean). + type: bool + default: false + prerelease: + description: + - Sets if the release is a prerelease or not. (boolean). + type: bool + default: false author: - - "Adrian Moisey (@adrianmoisey)" + - "Adrian Moisey (@adrianmoisey)" requirements: - - "github3.py >= 1.0.0a3" -''' + - "github3.py >= 1.0.0a3" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get latest release of a public repository community.general.github_release: user: ansible @@ -111,16 +110,15 @@ EXAMPLES = ''' target: master name: My Release body: Some description +""" -''' - -RETURN = ''' +RETURN = r""" tag: - description: Version of the created/latest release. - type: str - returned: success - sample: 1.1.0 -''' + description: Version of the created/latest release. + type: str + returned: success + sample: 1.1.0 +""" import traceback diff --git a/plugins/modules/github_repo.py b/plugins/modules/github_repo.py index f02ad30ac3..2d2c6f8588 100644 --- a/plugins/modules/github_repo.py +++ b/plugins/modules/github_repo.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_repo short_description: Manage your repositories on Github version_added: 2.2.0 @@ -26,81 +25,82 @@ attributes: options: username: description: - - Username used for authentication. - - This is only needed when not using O(access_token). + - Username used for authentication. + - This is only needed when not using O(access_token). type: str required: false password: description: - - Password used for authentication. - - This is only needed when not using O(access_token). + - Password used for authentication. + - This is only needed when not using O(access_token). type: str required: false access_token: description: - - Token parameter for authentication. - - This is only needed when not using O(username) and O(password). + - Token parameter for authentication. + - This is only needed when not using O(username) and O(password). type: str required: false name: description: - - Repository name. + - Repository name. type: str required: true description: description: - - Description for the repository. - - Defaults to empty if O(force_defaults=true), which is the default in this module. - - Defaults to empty if O(force_defaults=false) when creating a new repository. - - This is only used when O(state) is V(present). + - Description for the repository. + - Defaults to empty if O(force_defaults=true), which is the default in this module. + - Defaults to empty if O(force_defaults=false) when creating a new repository. + - This is only used when O(state) is V(present). type: str required: false private: description: - - Whether the repository should be private or not. - - Defaults to V(false) if O(force_defaults=true), which is the default in this module. - - Defaults to V(false) if O(force_defaults=false) when creating a new repository. - - This is only used when O(state=present). + - Whether the repository should be private or not. + - Defaults to V(false) if O(force_defaults=true), which is the default in this module. + - Defaults to V(false) if O(force_defaults=false) when creating a new repository. + - This is only used when O(state=present). type: bool required: false state: description: - - Whether the repository should exist or not. + - Whether the repository should exist or not. type: str default: present - choices: [ absent, present ] + choices: [absent, present] required: false organization: description: - - Organization for the repository. - - When O(state=present), the repository will be created in the current user profile. + - Organization for the repository. + - When O(state=present), the repository will be created in the current user profile. type: str required: false api_url: description: - - URL to the GitHub API if not using github.com but you own instance. + - URL to the GitHub API if not using github.com but you own instance. type: str default: 'https://api.github.com' version_added: "3.5.0" force_defaults: description: - - Overwrite current O(description) and O(private) attributes with defaults if set to V(true), which currently is the default. - - The default for this option will be deprecated in a future version of this collection, and eventually change to V(false). + - Overwrite current O(description) and O(private) attributes with defaults if set to V(true), which currently is the + default. + - The default for this option will be deprecated in a future version of this collection, and eventually change to V(false). type: bool default: true required: false version_added: 4.1.0 requirements: -- PyGithub>=1.54 + - PyGithub>=1.54 notes: -- For Python 3, PyGithub>=1.54 should be used. -- "For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020)." -- "For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019)." + - For Python 3, PyGithub>=1.54 should be used. + - 'For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020).' + - 'For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019).' author: -- Álvaro Torres Cogollo (@atorrescogollo) -''' + - Álvaro Torres Cogollo (@atorrescogollo) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Github repository community.general.github_repo: access_token: mytoken @@ -120,14 +120,14 @@ EXAMPLES = ''' name: myrepo state: absent register: result -''' +""" -RETURN = ''' +RETURN = r""" repo: description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository). returned: success and O(state=present) type: dict -''' +""" import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib diff --git a/plugins/modules/github_webhook.py b/plugins/modules/github_webhook.py index 11b115750b..8608c90bc9 100644 --- a/plugins/modules/github_webhook.py +++ b/plugins/modules/github_webhook.py @@ -8,12 +8,11 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_webhook short_description: Manage GitHub webhooks description: - - "Create and delete GitHub webhooks" + - Create and delete GitHub webhooks. requirements: - "PyGithub >= 1.3.5" extends_documentation_fragment: @@ -26,22 +25,22 @@ attributes: options: repository: description: - - Full name of the repository to configure a hook for + - Full name of the repository to configure a hook for. type: str required: true aliases: - repo url: description: - - URL to which payloads will be delivered + - URL to which payloads will be delivered. type: str required: true content_type: description: - - The media type used to serialize the payloads + - The media type used to serialize the payloads. type: str required: false - choices: [ form, json ] + choices: [form, json] default: form secret: description: @@ -50,61 +49,57 @@ options: required: false insecure_ssl: description: - - > - Flag to indicate that GitHub should skip SSL verification when calling - the hook. + - Flag to indicate that GitHub should skip SSL verification when calling the hook. required: false type: bool default: false events: description: - - > - A list of GitHub events the hook is triggered for. Events are listed at - U(https://developer.github.com/v3/activity/events/types/). Required - unless O(state=absent) + - A list of GitHub events the hook is triggered for. Events are listed at U(https://developer.github.com/v3/activity/events/types/). + Required unless O(state=absent). required: false type: list elements: str active: description: - - Whether or not the hook is active + - Whether or not the hook is active. required: false type: bool default: true state: description: - - Whether the hook should be present or absent + - Whether the hook should be present or absent. type: str required: false - choices: [ absent, present ] + choices: [absent, present] default: present user: description: - - User to authenticate to GitHub as + - User to authenticate to GitHub as. type: str required: true password: description: - - Password to authenticate to GitHub with + - Password to authenticate to GitHub with. type: str required: false token: description: - - Token to authenticate to GitHub with + - Token to authenticate to GitHub with. type: str required: false github_url: description: - - Base URL of the GitHub API + - Base URL of the GitHub API. type: str required: false default: https://api.github.com author: - "Chris St. Pierre (@stpierre)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new webhook that triggers on push (password auth) community.general.github_webhook: repository: ansible/ansible @@ -135,16 +130,15 @@ EXAMPLES = ''' state: absent user: "{{ github_user }}" password: "{{ github_password }}" -''' +""" -RETURN = ''' ---- +RETURN = r""" hook_id: - description: The GitHub ID of the hook created/updated + description: The GitHub ID of the hook created/updated. returned: when state is 'present' type: int sample: 6206 -''' +""" import traceback diff --git a/plugins/modules/github_webhook_info.py b/plugins/modules/github_webhook_info.py index dcad02a369..440a373f1d 100644 --- a/plugins/modules/github_webhook_info.py +++ b/plugins/modules/github_webhook_info.py @@ -8,12 +8,11 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_webhook_info short_description: Query information about GitHub webhooks description: - - "Query information about GitHub webhooks" + - Query information about GitHub webhooks. requirements: - "PyGithub >= 1.3.5" extends_documentation_fragment: @@ -22,38 +21,38 @@ extends_documentation_fragment: options: repository: description: - - Full name of the repository to configure a hook for + - Full name of the repository to configure a hook for. type: str required: true aliases: - repo user: description: - - User to authenticate to GitHub as + - User to authenticate to GitHub as. type: str required: true password: description: - - Password to authenticate to GitHub with + - Password to authenticate to GitHub with. type: str required: false token: description: - - Token to authenticate to GitHub with + - Token to authenticate to GitHub with. type: str required: false github_url: description: - - Base URL of the github api + - Base URL of the GitHub API. type: str required: false default: https://api.github.com author: - "Chris St. Pierre (@stpierre)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List hooks for a repository (password auth) community.general.github_webhook_info: repository: ansible/ansible @@ -68,12 +67,11 @@ EXAMPLES = ''' token: "{{ github_user_api_token }}" github_url: https://github.example.com/api/v3/ register: myrepo_webhooks -''' +""" -RETURN = ''' ---- +RETURN = r""" hooks: - description: A list of hooks that exist for the repo + description: A list of hooks that exist for the repo. returned: always type: list elements: dict @@ -88,7 +86,7 @@ hooks: "id": 6206, "last_response": {"status": "active", "message": "OK", "code": 200} } -''' +""" import traceback diff --git a/plugins/modules/gitlab_branch.py b/plugins/modules/gitlab_branch.py index 623c25644e..b32169ef5a 100644 --- a/plugins/modules/gitlab_branch.py +++ b/plugins/modules/gitlab_branch.py @@ -7,7 +7,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_branch short_description: Create or delete a branch version_added: 4.2.0 @@ -50,10 +50,10 @@ options: - Reference branch to create from. - This must be specified if O(state=present). type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create branch branch2 from main community.general.gitlab_branch: api_url: https://gitlab.com @@ -70,11 +70,10 @@ EXAMPLES = ''' project: "group1/project1" branch: branch2 state: absent +""" -''' - -RETURN = ''' -''' +RETURN = r""" +""" import traceback diff --git a/plugins/modules/gitlab_deploy_key.py b/plugins/modules/gitlab_deploy_key.py index 7c0ff06b7b..5a2f582357 100644 --- a/plugins/modules/gitlab_deploy_key.py +++ b/plugins/modules/gitlab_deploy_key.py @@ -11,11 +11,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_deploy_key short_description: Manages GitLab project deploy keys description: - - Adds, updates and removes project deploy keys + - Adds, updates and removes project deploy keys. author: - Marcus Watkins (@marwatk) - Guillaume Martinez (@Lunik) @@ -45,7 +45,7 @@ options: type: str key: description: - - Deploy key + - Deploy key. required: true type: str can_push: @@ -55,14 +55,14 @@ options: default: false state: description: - - When V(present) the deploy key added to the project if it doesn't exist. + - When V(present) the deploy key added to the project if it does not exist. - When V(absent) it will be removed from the project if it exists. default: present type: str - choices: [ "present", "absent" ] -''' + choices: ["present", "absent"] +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Adding a project deploy key" community.general.gitlab_deploy_key: api_url: https://gitlab.example.com/ @@ -88,32 +88,31 @@ EXAMPLES = ''' project: "my_group/my_project" state: absent key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." +""" -''' - -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: key is already in use" deploy_key: - description: API object + description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule @@ -196,9 +195,9 @@ class GitLabDeployKey(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(deploy_key, arg_key) != arguments[arg_key]: - setattr(deploy_key, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(deploy_key, arg_key) != arg_value: + setattr(deploy_key, arg_key, arg_value) changed = True return (changed, deploy_key) diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py index 3d57b18528..711318c6d4 100644 --- a/plugins/modules/gitlab_group.py +++ b/plugins/modules/gitlab_group.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gitlab_group short_description: Creates/updates/deletes GitLab Groups description: @@ -33,66 +32,35 @@ attributes: support: none options: - name: - description: - - Name of the group you want to create. - required: true - type: str - path: - description: - - The path of the group you want to create, this will be api_url/group_path - - If not supplied, the group_name will be used. - type: str - description: - description: - - A description for the group. - type: str - state: - description: - - create or delete group. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - parent: - description: - - Allow to create subgroups - - Id or Full path of parent group in the form of group/name - type: str - visibility: - description: - - Default visibility of the group - choices: ["private", "internal", "public"] - default: private - type: str - project_creation_level: - description: - - Determine if developers can create projects in the group. - choices: ["developer", "maintainer", "noone"] - type: str - version_added: 3.7.0 auto_devops_enabled: description: - Default to Auto DevOps pipeline for all projects within this group. type: bool version_added: 3.7.0 - subgroup_creation_level: - description: - - Allowed to create subgroups. - choices: ["maintainer", "owner"] - type: str - version_added: 3.7.0 - require_two_factor_authentication: - description: - - Require all users in this group to setup two-factor authentication. - type: bool - version_added: 3.7.0 avatar_path: description: - Absolute path image to configure avatar. File size should not exceed 200 kb. - This option is only used on creation, not for updates. type: path version_added: 4.2.0 + default_branch: + description: + - All merge requests and commits are made against this branch unless you specify a different one. + type: str + version_added: 9.5.0 + description: + description: + - A description for the group. + type: str + enabled_git_access_protocol: + description: + - V(all) means SSH and HTTP(S) is enabled. + - V(ssh) means only SSH is enabled. + - V(http) means only HTTP(S) is enabled. + - Only available for top level groups. + choices: ["all", "ssh", "http"] + type: str + version_added: 9.5.0 force_delete: description: - Force delete group even if projects in it. @@ -100,9 +68,116 @@ options: type: bool default: false version_added: 7.5.0 -''' + lfs_enabled: + description: + - Projects in this group can use Git LFS. + type: bool + version_added: 9.5.0 + lock_duo_features_enabled: + description: + - Enforce GitLab Duo features for all subgroups. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + membership_lock: + description: + - Users cannot be added to projects in this group. + type: bool + version_added: 9.5.0 + mentions_disabled: + description: + - Group mentions are disabled. + type: bool + version_added: 9.5.0 + name: + description: + - Name of the group you want to create. + required: true + type: str + parent: + description: + - Allow to create subgroups. + - Id or Full path of parent group in the form of group/name. + type: str + path: + description: + - The path of the group you want to create, this will be api_url/group_path. + - If not supplied, the group_name will be used. + type: str + prevent_forking_outside_group: + description: + - Prevent forking outside of the group. + type: bool + version_added: 9.5.0 + prevent_sharing_groups_outside_hierarchy: + description: + - Members cannot invite groups outside of this group and its subgroups. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + project_creation_level: + description: + - Determine if developers can create projects in the group. + choices: ["developer", "maintainer", "noone"] + type: str + version_added: 3.7.0 + request_access_enabled: + description: + - Users can request access (if visibility is public or internal). + type: bool + version_added: 9.5.0 + service_access_tokens_expiration_enforced: + description: + - Service account token expiration. + - Changes will not affect existing token expiration dates. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + share_with_group_lock: + description: + - Projects cannot be shared with other groups. + type: bool + version_added: 9.5.0 + require_two_factor_authentication: + description: + - Require all users in this group to setup two-factor authentication. + type: bool + version_added: 3.7.0 + state: + description: + - Create or delete group. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + subgroup_creation_level: + description: + - Allowed to create subgroups. + choices: ["maintainer", "owner"] + type: str + version_added: 3.7.0 + two_factor_grace_period: + description: + - Delay 2FA enforcement (hours). + type: str + version_added: 9.5.0 + visibility: + description: + - Default visibility of the group. + choices: ["private", "internal", "public"] + default: private + type: str + wiki_access_level: + description: + - V(enabled) means everyone can access the wiki. + - V(private) means only members of this group can access the wiki. + - V(disabled) means group-level wiki is disabled. + choices: ["enabled", "private", "disabled"] + type: str + version_added: 9.5.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Delete GitLab Group" community.general.gitlab_group: api_url: https://gitlab.example.com/ @@ -145,31 +220,31 @@ EXAMPLES = ''' project_creation_level: noone auto_devops_enabled: false subgroup_creation_level: maintainer -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" group: - description: API object + description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule @@ -202,23 +277,38 @@ class GitLabGroup(object): def create_or_update_group(self, name, parent, options): changed = False + payload = { + 'auto_devops_enabled': options['auto_devops_enabled'], + 'default_branch': options['default_branch'], + 'description': options['description'], + 'lfs_enabled': options['lfs_enabled'], + 'membership_lock': options['membership_lock'], + 'mentions_disabled': options['mentions_disabled'], + 'name': name, + 'path': options['path'], + 'prevent_forking_outside_group': options['prevent_forking_outside_group'], + 'project_creation_level': options['project_creation_level'], + 'request_access_enabled': options['request_access_enabled'], + 'require_two_factor_authentication': options['require_two_factor_authentication'], + 'share_with_group_lock': options['share_with_group_lock'], + 'subgroup_creation_level': options['subgroup_creation_level'], + 'visibility': options['visibility'], + 'wiki_access_level': options['wiki_access_level'], + } + if options.get('enabled_git_access_protocol') and parent is None: + payload['enabled_git_access_protocol'] = options['enabled_git_access_protocol'] + if options.get('lock_duo_features_enabled') and parent is None: + payload['lock_duo_features_enabled'] = options['lock_duo_features_enabled'] + if options.get('prevent_sharing_groups_outside_hierarchy') and parent is None: + payload['prevent_sharing_groups_outside_hierarchy'] = options['prevent_sharing_groups_outside_hierarchy'] + if options.get('service_access_tokens_expiration_enforced') and parent is None: + payload['service_access_tokens_expiration_enforced'] = options['service_access_tokens_expiration_enforced'] + if options.get('two_factor_grace_period'): + payload['two_factor_grace_period'] = int(options['two_factor_grace_period']) + # Because we have already call userExists in main() if self.group_object is None: - parent_id = self.get_group_id(parent) - - payload = { - 'name': name, - 'path': options['path'], - 'parent_id': parent_id, - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], - 'auto_devops_enabled': options['auto_devops_enabled'], - 'subgroup_creation_level': options['subgroup_creation_level'], - } - if options.get('description'): - payload['description'] = options['description'] - if options.get('require_two_factor_authentication'): - payload['require_two_factor_authentication'] = options['require_two_factor_authentication'] + payload['parent_id'] = self.get_group_id(parent) group = self.create_group(payload) # add avatar to group @@ -229,15 +319,7 @@ class GitLabGroup(object): self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e)) changed = True else: - changed, group = self.update_group(self.group_object, { - 'name': name, - 'description': options['description'], - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], - 'auto_devops_enabled': options['auto_devops_enabled'], - 'subgroup_creation_level': options['subgroup_creation_level'], - 'require_two_factor_authentication': options['require_two_factor_authentication'], - }) + changed, group = self.update_group(self.group_object, payload) self.group_object = group if changed: @@ -261,7 +343,7 @@ class GitLabGroup(object): try: # Filter out None values - filtered = dict((arg_key, arg_value) for arg_key, arg_value in arguments.items() if arg_value is not None) + filtered = {arg_key: arg_value for arg_key, arg_value in arguments.items() if arg_value is not None} group = self._gitlab.groups.create(filtered) except (gitlab.exceptions.GitlabCreateError) as e: @@ -277,9 +359,9 @@ class GitLabGroup(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(group, arg_key) != arguments[arg_key]: - setattr(group, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(group, arg_key) != arg_value: + setattr(group, arg_key, arg_value) changed = True return (changed, group) @@ -322,28 +404,41 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update(dict( - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - parent=dict(type='str'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), - project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), auto_devops_enabled=dict(type='bool'), - subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), - require_two_factor_authentication=dict(type='bool'), avatar_path=dict(type='path'), + default_branch=dict(type='str'), + description=dict(type='str'), + enabled_git_access_protocol=dict(type='str', choices=['all', 'ssh', 'http']), force_delete=dict(type='bool', default=False), + lfs_enabled=dict(type='bool'), + lock_duo_features_enabled=dict(type='bool'), + membership_lock=dict(type='bool'), + mentions_disabled=dict(type='bool'), + name=dict(type='str', required=True), + parent=dict(type='str'), + path=dict(type='str'), + prevent_forking_outside_group=dict(type='bool'), + prevent_sharing_groups_outside_hierarchy=dict(type='bool'), + project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), + request_access_enabled=dict(type='bool'), + require_two_factor_authentication=dict(type='bool'), + service_access_tokens_expiration_enforced=dict(type='bool'), + share_with_group_lock=dict(type='bool'), + state=dict(type='str', default="present", choices=["absent", "present"]), + subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), + two_factor_grace_period=dict(type='str'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), + wiki_access_level=dict(type='str', choices=['enabled', 'private', 'disabled']), )) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], ['api_token', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_token'], ], required_together=[ ['api_username', 'api_password'], @@ -357,18 +452,31 @@ def main(): # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) + auto_devops_enabled = module.params['auto_devops_enabled'] + avatar_path = module.params['avatar_path'] + default_branch = module.params['default_branch'] + description = module.params['description'] + enabled_git_access_protocol = module.params['enabled_git_access_protocol'] + force_delete = module.params['force_delete'] group_name = module.params['name'] group_path = module.params['path'] - description = module.params['description'] - state = module.params['state'] - parent_identifier = module.params['parent'] group_visibility = module.params['visibility'] + lfs_enabled = module.params['lfs_enabled'] + lock_duo_features_enabled = module.params['lock_duo_features_enabled'] + membership_lock = module.params['membership_lock'] + mentions_disabled = module.params['mentions_disabled'] + parent_identifier = module.params['parent'] + prevent_forking_outside_group = module.params['prevent_forking_outside_group'] + prevent_sharing_groups_outside_hierarchy = module.params['prevent_sharing_groups_outside_hierarchy'] project_creation_level = module.params['project_creation_level'] - auto_devops_enabled = module.params['auto_devops_enabled'] - subgroup_creation_level = module.params['subgroup_creation_level'] + request_access_enabled = module.params['request_access_enabled'] require_two_factor_authentication = module.params['require_two_factor_authentication'] - avatar_path = module.params['avatar_path'] - force_delete = module.params['force_delete'] + service_access_tokens_expiration_enforced = module.params['service_access_tokens_expiration_enforced'] + share_with_group_lock = module.params['share_with_group_lock'] + state = module.params['state'] + subgroup_creation_level = module.params['subgroup_creation_level'] + two_factor_grace_period = module.params['two_factor_grace_period'] + wiki_access_level = module.params['wiki_access_level'] # Define default group_path based on group_name if group_path is None: @@ -380,7 +488,7 @@ def main(): if parent_identifier: parent_group = find_group(gitlab_instance, parent_identifier) if not parent_group: - module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists") + module.fail_json(msg="Failed to create GitLab group: Parent group doesn't exist") group_exists = gitlab_group.exists_group(parent_group.full_path + '/' + group_path) else: @@ -391,18 +499,31 @@ def main(): gitlab_group.delete_group(force=force_delete) module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name) else: - module.exit_json(changed=False, msg="Group deleted or does not exists") + module.exit_json(changed=False, msg="Group deleted or does not exist") if state == 'present': if gitlab_group.create_or_update_group(group_name, parent_group, { - "path": group_path, - "description": description, - "visibility": group_visibility, - "project_creation_level": project_creation_level, "auto_devops_enabled": auto_devops_enabled, - "subgroup_creation_level": subgroup_creation_level, - "require_two_factor_authentication": require_two_factor_authentication, "avatar_path": avatar_path, + "default_branch": default_branch, + "description": description, + "enabled_git_access_protocol": enabled_git_access_protocol, + "lfs_enabled": lfs_enabled, + "lock_duo_features_enabled": lock_duo_features_enabled, + "membership_lock": membership_lock, + "mentions_disabled": mentions_disabled, + "path": group_path, + "prevent_forking_outside_group": prevent_forking_outside_group, + "prevent_sharing_groups_outside_hierarchy": prevent_sharing_groups_outside_hierarchy, + "project_creation_level": project_creation_level, + "request_access_enabled": request_access_enabled, + "require_two_factor_authentication": require_two_factor_authentication, + "service_access_tokens_expiration_enforced": service_access_tokens_expiration_enforced, + "share_with_group_lock": share_with_group_lock, + "subgroup_creation_level": subgroup_creation_level, + "two_factor_grace_period": two_factor_grace_period, + "visibility": group_visibility, + "wiki_access_level": wiki_access_level, }): module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.group_object._attrs) else: diff --git a/plugins/modules/gitlab_group_access_token.py b/plugins/modules/gitlab_group_access_token.py index 85bba205db..bcf75e056b 100644 --- a/plugins/modules/gitlab_group_access_token.py +++ b/plugins/modules/gitlab_group_access_token.py @@ -12,7 +12,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: gitlab_group_access_token short_description: Manages GitLab group access tokens version_added: 8.4.0 @@ -27,11 +27,10 @@ extends_documentation_fragment: - community.general.gitlab - community.general.attributes notes: - - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. - Whether tokens will be recreated is controlled by the O(recreate) option, which defaults to V(never). + - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens + will be recreated is controlled by the O(recreate) option, which defaults to V(never). - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards. - Token matching is done by comparing O(name) option. - attributes: check_mode: support: full @@ -56,7 +55,8 @@ options: type: list elements: str aliases: ["scope"] - choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner", "ai_features", "k8s_proxy"] + choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner", + "ai_features", "k8s_proxy"] access_level: description: - Access level of the access token. @@ -84,10 +84,10 @@ options: - When V(absent) it will be removed from the group if it exists. default: present type: str - choices: [ "present", "absent" ] -''' + choices: ["present", "absent"] +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: "Creating a group access token" community.general.gitlab_group_access_token: api_url: https://gitlab.example.com/ @@ -131,16 +131,16 @@ EXAMPLES = r''' - write_repository recreate: state_change state: present -''' +""" -RETURN = r''' +RETURN = r""" access_token: description: - API object. - Only contains the value of the token if the token was created or recreated. returned: success and O(state=present) type: dict -''' +""" from datetime import datetime @@ -313,7 +313,10 @@ def main(): module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) else: gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) - module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + if module.check_mode: + module.exit_json(changed=True, msg="Successfully created access token", access_token={}) + else: + module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) if __name__ == '__main__': diff --git a/plugins/modules/gitlab_group_members.py b/plugins/modules/gitlab_group_members.py index ca82891e30..86e9e6474a 100644 --- a/plugins/modules/gitlab_group_members.py +++ b/plugins/modules/gitlab_group_members.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gitlab_group_members short_description: Manage group members on GitLab Server description: @@ -81,16 +80,16 @@ options: type: str purge_users: description: - - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. - If omitted do not purge orphaned members. + - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. If omitted + do not purge orphaned members. - Is only used when O(state=present). type: list elements: str choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] version_added: 3.6.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a user to a GitLab Group community.general.gitlab_group_members: api_url: 'https://gitlab.example.com' @@ -152,9 +151,9 @@ EXAMPLES = r''' - name: user2 access_level: maintainer state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/gitlab_group_variable.py b/plugins/modules/gitlab_group_variable.py index 32e5aaa904..926f4fe20a 100644 --- a/plugins/modules/gitlab_group_variable.py +++ b/plugins/modules/gitlab_group_variable.py @@ -9,15 +9,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: gitlab_group_variable short_description: Creates, updates, or deletes GitLab groups variables version_added: 1.2.0 description: - Creates a group variable if it does not exist. - When a group variable does exist, its value will be updated when the values are different. - - Variables which are untouched in the playbook, but are not untouched in the GitLab group, - they stay untouched (O(purge=false)) or will be deleted (O(purge=true)). + - Variables which are untouched in the playbook, but are not untouched in the GitLab group, they stay untouched (O(purge=false)) + or will be deleted (O(purge=true)). author: - Florent Madiot (@scodeman) requirements: @@ -53,8 +53,8 @@ options: vars: description: - When the list element is a simple key-value pair, masked, raw and protected will be set to false. - - When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can - have full control about whether a value should be masked, raw, protected or both. + - When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can have full + control about whether a value should be masked, raw, protected or both. - Support for group variables requires GitLab >= 9.5. - Support for environment_scope requires GitLab Premium >= 13.11. - Support for protected values requires GitLab >= 9.3. @@ -62,8 +62,8 @@ options: - Support for raw values requires GitLab >= 15.7. - A C(value) must be a string or a number. - Field C(variable_type) must be a string with either V(env_var), which is the default, or V(file). - - When a value is masked, it must be in Base64 and have a length of at least 8 characters. - See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)). + - When a value is masked, it must be in Base64 and have a length of at least 8 characters. See GitLab documentation + on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)). default: {} type: dict variables: @@ -106,17 +106,17 @@ options: description: - Whether a variable is an environment variable (V(env_var)) or a file (V(file)). type: str - choices: [ "env_var", "file" ] + choices: ["env_var", "file"] default: env_var environment_scope: description: - The scope for the variable. type: str default: '*' -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set or update some CI/CD variables community.general.gitlab_group_variable: api_url: https://gitlab.com @@ -173,9 +173,9 @@ EXAMPLES = r''' state: absent vars: ACCESS_KEY_ID: abc123 -''' +""" -RETURN = r''' +RETURN = r""" group_variable: description: Four lists of the variablenames which were added, updated, removed or exist. returned: always @@ -201,7 +201,7 @@ group_variable: returned: always type: list sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec diff --git a/plugins/modules/gitlab_hook.py b/plugins/modules/gitlab_hook.py index 58781d182b..ccc3310ce5 100644 --- a/plugins/modules/gitlab_hook.py +++ b/plugins/modules/gitlab_hook.py @@ -11,12 +11,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gitlab_hook short_description: Manages GitLab project hooks description: - - Adds, updates and removes project hook + - Adds, updates and removes project hook. author: - Marcus Watkins (@marwatk) - Guillaume Martinez (@Lunik) @@ -46,11 +45,11 @@ options: type: str state: description: - - When V(present) the hook will be updated to match the input or created if it doesn't exist. + - When V(present) the hook will be updated to match the input or created if it does not exist. - When V(absent) hook will be deleted if it exists. default: present type: str - choices: [ "present", "absent" ] + choices: ["present", "absent"] push_events: description: - Trigger hook on push events. @@ -58,7 +57,7 @@ options: default: true push_events_branch_filter: description: - - Branch name of wildcard to trigger hook on push events + - Branch name of wildcard to trigger hook on push events. type: str version_added: '0.2.0' default: '' @@ -107,7 +106,7 @@ options: - Whether GitLab will do SSL verification when triggering the hook. type: bool default: false - aliases: [ enable_ssl_verification ] + aliases: [enable_ssl_verification] token: description: - Secret token to validate hook messages at the receiver. @@ -115,9 +114,9 @@ options: - Will show up in the X-GitLab-Token HTTP request header. required: false type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Adding a project hook" community.general.gitlab_hook: api_url: https://gitlab.example.com/ @@ -144,31 +143,31 @@ EXAMPLES = ''' project: 10 hook_url: "https://my-ci-server.example.com/gitlab-hook" state: absent -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: Json parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" hook: - description: API object + description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/gitlab_instance_variable.py b/plugins/modules/gitlab_instance_variable.py index cc2d812ca3..be89238eb4 100644 --- a/plugins/modules/gitlab_instance_variable.py +++ b/plugins/modules/gitlab_instance_variable.py @@ -10,7 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: gitlab_instance_variable short_description: Creates, updates, or deletes GitLab instance variables version_added: 7.1.0 @@ -18,8 +18,8 @@ description: - Creates a instance variable if it does not exist. - When a instance variable does exist, its value will be updated if the values are different. - Support for instance variables requires GitLab >= 13.0. - - Variables which are not mentioned in the modules options, but are present on the GitLab instance, - will either stay (O(purge=false)) or will be deleted (O(purge=true)). + - Variables which are not mentioned in the modules options, but are present on the GitLab instance, will either stay (O(purge=false)) + or will be deleted (O(purge=true)). author: - Benedikt Braunger (@benibr) requirements: @@ -78,12 +78,12 @@ options: description: - Whether a variable is an environment variable (V(env_var)) or a file (V(file)). type: str - choices: [ "env_var", "file" ] + choices: ["env_var", "file"] default: env_var -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set or update some CI/CD variables community.general.gitlab_instance_variable: api_url: https://gitlab.com @@ -105,9 +105,9 @@ EXAMPLES = r''' state: absent variables: - name: ACCESS_KEY_ID -''' +""" -RETURN = r''' +RETURN = r""" instance_variable: description: Four lists of the variablenames which were added, updated, removed or exist. returned: always @@ -133,7 +133,7 @@ instance_variable: returned: always type: list sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec diff --git a/plugins/modules/gitlab_issue.py b/plugins/modules/gitlab_issue.py index 3277c4f1aa..47b6f072e8 100644 --- a/plugins/modules/gitlab_issue.py +++ b/plugins/modules/gitlab_issue.py @@ -12,7 +12,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_issue short_description: Create, update, or delete GitLab issues version_added: '8.1.0' @@ -97,10 +97,10 @@ options: - A title for the issue. The title is used as a unique identifier to ensure idempotency. type: str required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create Issue community.general.gitlab_issue: api_url: https://gitlab.com @@ -109,10 +109,10 @@ EXAMPLES = ''' title: "Ansible demo Issue" description: "Demo Issue description" labels: - - Ansible - - Demo + - Ansible + - Demo assignee_ids: - - testassignee + - testassignee state_filter: "opened" state: present @@ -124,9 +124,9 @@ EXAMPLES = ''' title: "Ansible demo Issue" state_filter: "opened" state: absent -''' +""" -RETURN = r''' +RETURN = r""" msg: description: Success or failure message. returned: always @@ -137,7 +137,7 @@ issue: description: API object. returned: success type: dict -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec @@ -264,14 +264,14 @@ class GitlabIssue(object): if key == 'milestone_id': old_milestone = getattr(issue, 'milestone')['id'] if getattr(issue, 'milestone') else "" - if options[key] != old_milestone: + if value != old_milestone: return True elif key == 'assignee_ids': - if options[key] != sorted([user["id"] for user in getattr(issue, 'assignees')]): + if value != sorted([user["id"] for user in getattr(issue, 'assignees')]): return True elif key == 'labels': - if options[key] != sorted(getattr(issue, key)): + if value != sorted(getattr(issue, key)): return True elif getattr(issue, key) != value: diff --git a/plugins/modules/gitlab_label.py b/plugins/modules/gitlab_label.py index 635033ab6c..44fbf1ae02 100644 --- a/plugins/modules/gitlab_label.py +++ b/plugins/modules/gitlab_label.py @@ -7,9 +7,9 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_label -short_description: Creates/updates/deletes GitLab Labels belonging to project or group. +short_description: Creates/updates/deletes GitLab Labels belonging to project or group version_added: 8.3.0 description: - When a label does not exist, it will be created. @@ -45,12 +45,12 @@ options: required: false project: description: - - The path and name of the project. Either this or O(group) is required. + - The path and name of the project. Either this or O(group) is required. required: false type: str group: description: - - The path of the group. Either this or O(project) is required. + - The path of the group. Either this or O(project) is required. required: false type: str labels: @@ -76,21 +76,21 @@ options: - Integer value to give priority to the label. type: int required: false - default: null + default: description: description: - Label's description. type: str - default: null + default: new_name: description: - Optional field to change label's name. type: str - default: null -''' + default: +""" -EXAMPLES = ''' +EXAMPLES = r""" # same project's task can be executed for group - name: Create one Label community.general.gitlab_label: @@ -185,9 +185,9 @@ EXAMPLES = ''' labels: - name: label-abc123 - name: label-two -''' +""" -RETURN = ''' +RETURN = r""" labels: description: Four lists of the labels which were added, updated, removed or exist. returned: success @@ -217,7 +217,7 @@ labels_obj: description: API object. returned: success type: dict -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec @@ -275,6 +275,8 @@ class GitlabLabels(object): _label.description = var_obj.get('description') if var_obj.get('priority') is not None: _label.priority = var_obj.get('priority') + if var_obj.get('color') is not None: + _label.color = var_obj.get('color') # save returns None _label.save() diff --git a/plugins/modules/gitlab_merge_request.py b/plugins/modules/gitlab_merge_request.py index 5bb9cb9c7d..fd6068980a 100644 --- a/plugins/modules/gitlab_merge_request.py +++ b/plugins/modules/gitlab_merge_request.py @@ -12,7 +12,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_merge_request short_description: Create, update, or delete GitLab merge requests version_added: 7.1.0 @@ -21,8 +21,7 @@ description: - When a single merge request does exist, it will be updated if the provided parameters are different. - When a single merge request does exist and O(state=absent), the merge request will be deleted. - When multiple merge requests are detected, the task fails. - - Existing merge requests are matched based on O(title), O(source_branch), O(target_branch), - and O(state_filter) filters. + - Existing merge requests are matched based on O(title), O(source_branch), O(target_branch), and O(state_filter) filters. author: - zvaraondrej (@zvaraondrej) requirements: @@ -102,10 +101,10 @@ options: - Comma separated list of reviewers usernames omitting V(@) character. - Set to empty string to unassign all reviewers. type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create Merge Request from branch1 to branch2 community.general.gitlab_merge_request: api_url: https://gitlab.com @@ -117,7 +116,7 @@ EXAMPLES = ''' description: "Demo MR description" labels: "Ansible,Demo" state_filter: "opened" - remove_source_branch: True + remove_source_branch: true state: present - name: Delete Merge Request from branch1 to branch2 @@ -130,9 +129,9 @@ EXAMPLES = ''' title: "Ansible demo MR" state_filter: "opened" state: absent -''' +""" -RETURN = r''' +RETURN = r""" msg: description: Success or failure message. returned: always @@ -143,7 +142,7 @@ mr: description: API object. returned: success type: dict -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec @@ -263,15 +262,15 @@ class GitlabMergeRequest(object): key = 'force_remove_source_branch' if key == 'assignee_ids': - if options[key] != sorted([user["id"] for user in getattr(mr, 'assignees')]): + if value != sorted([user["id"] for user in getattr(mr, 'assignees')]): return True elif key == 'reviewer_ids': - if options[key] != sorted([user["id"] for user in getattr(mr, 'reviewers')]): + if value != sorted([user["id"] for user in getattr(mr, 'reviewers')]): return True elif key == 'labels': - if options[key] != sorted(getattr(mr, key)): + if value != sorted(getattr(mr, key)): return True elif getattr(mr, key) != value: diff --git a/plugins/modules/gitlab_milestone.py b/plugins/modules/gitlab_milestone.py index 4b8b933cc0..99b922c4dd 100644 --- a/plugins/modules/gitlab_milestone.py +++ b/plugins/modules/gitlab_milestone.py @@ -7,7 +7,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_milestone short_description: Creates/updates/deletes GitLab Milestones belonging to project or group version_added: 8.3.0 @@ -83,10 +83,10 @@ options: - Milestone's description. type: str default: null -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # same project's task can be executed for group - name: Create one milestone community.general.gitlab_milestone: @@ -169,9 +169,9 @@ EXAMPLES = ''' milestones: - title: milestone-abc123 - title: milestone-two -''' +""" -RETURN = ''' +RETURN = r""" milestones: description: Four lists of the milestones which were added, updated, removed or exist. returned: success @@ -201,7 +201,7 @@ milestones_obj: description: API object. returned: success type: dict -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py index f1b96bfac5..942e1d9816 100644 --- a/plugins/modules/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -9,13 +9,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gitlab_project short_description: Creates/updates/deletes GitLab Projects description: - When the project does not exist in GitLab, it will be created. - - When the project does exists and O(state=absent), the project will be deleted. + - When the project does exist and O(state=absent), the project will be deleted. - When changes are made to the project, the project will be updated. author: - Werner Dijkerman (@dj-wasabi) @@ -34,152 +33,17 @@ attributes: support: none options: - group: - description: - - Id or the full path of the group of which this projects belongs to. - type: str - name: - description: - - The name of the project. - required: true - type: str - path: - description: - - The path of the project you want to create, this will be server_url//path. - - If not supplied, name will be used. - type: str - description: - description: - - An description for the project. - type: str - initialize_with_readme: - description: - - Will initialize the project with a default C(README.md). - - Is only used when the project is created, and ignored otherwise. - type: bool - default: false - version_added: "4.0.0" - issues_enabled: - description: - - Whether you want to create issues or not. - - Possible values are true and false. - type: bool - default: true - merge_requests_enabled: - description: - - If merge requests can be made or not. - - Possible values are true and false. - type: bool - default: true - wiki_enabled: - description: - - If an wiki for this project should be available or not. - type: bool - default: true - snippets_enabled: - description: - - If creating snippets should be available or not. - type: bool - default: true - visibility: - description: - - V(private) Project access must be granted explicitly for each user. - - V(internal) The project can be cloned by any logged in user. - - V(public) The project can be cloned without any authentication. - default: private - type: str - choices: ["private", "internal", "public"] - aliases: - - visibility_level - import_url: - description: - - Git repository which will be imported into gitlab. - - GitLab server needs read access to this git repository. - required: false - type: str - state: - description: - - Create or delete project. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - merge_method: - description: - - What requirements are placed upon merges. - - Possible values are V(merge), V(rebase_merge) merge commit with semi-linear history, V(ff) fast-forward merges only. - type: str - choices: ["ff", "merge", "rebase_merge"] - default: merge - version_added: "1.0.0" - lfs_enabled: - description: - - Enable Git large file systems to manages large files such - as audio, video, and graphics files. - type: bool - required: false - default: false - version_added: "2.0.0" - username: - description: - - Used to create a personal project under a user's name. - type: str - version_added: "3.3.0" allow_merge_on_skipped_pipeline: description: - Allow merge when skipped pipelines exist. type: bool version_added: "3.4.0" - only_allow_merge_if_all_discussions_are_resolved: - description: - - All discussions on a merge request (MR) have to be resolved. - type: bool - version_added: "3.4.0" - only_allow_merge_if_pipeline_succeeds: - description: - - Only allow merges if pipeline succeeded. - type: bool - version_added: "3.4.0" - packages_enabled: - description: - - Enable GitLab package repository. - type: bool - version_added: "3.4.0" - remove_source_branch_after_merge: - description: - - Remove the source branch after merge. - type: bool - version_added: "3.4.0" - squash_option: - description: - - Squash commits when merging. - type: str - choices: ["never", "always", "default_off", "default_on"] - version_added: "3.4.0" - ci_config_path: - description: - - Custom path to the CI configuration file for this project. - type: str - version_added: "3.7.0" - shared_runners_enabled: - description: - - Enable shared runners for this project. - type: bool - version_added: "3.7.0" avatar_path: description: - Absolute path image to configure avatar. File size should not exceed 200 kb. - This option is only used on creation, not for updates. type: path version_added: "4.2.0" - default_branch: - description: - - The default branch name for this project. - - For project creation, this option requires O(initialize_with_readme=true). - - For project update, the branch must exist. - - Supports project's default branch update since community.general 8.0.0. - type: str - version_added: "4.2.0" builds_access_level: description: - V(private) means that repository CI/CD is allowed only to project members. @@ -188,14 +52,46 @@ options: type: str choices: ["private", "disabled", "enabled"] version_added: "6.2.0" - forking_access_level: + ci_config_path: description: - - V(private) means that repository forks is allowed only to project members. - - V(disabled) means that repository forks are disabled. - - V(enabled) means that repository forks are enabled. + - Custom path to the CI configuration file for this project. type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.2.0" + version_added: "3.7.0" + container_expiration_policy: + description: + - Project cleanup policy for its container registry. + type: dict + suboptions: + cadence: + description: + - How often cleanup should be run. + type: str + choices: ["1d", "7d", "14d", "1month", "3month"] + enabled: + description: + - Enable the cleanup policy. + type: bool + keep_n: + description: + - Number of tags kept per image name. + - V(0) clears the field. + type: int + choices: [0, 1, 5, 10, 25, 50, 100] + older_than: + description: + - Destroy tags older than this. + - V(0d) clears the field. + type: str + choices: ["0d", "7d", "14d", "30d", "90d"] + name_regex: + description: + - Destroy tags matching this regular expression. + type: str + name_regex_keep: + description: + - Keep tags matching this regular expression. + type: str + version_added: "9.3.0" container_registry_access_level: description: - V(private) means that container registry is allowed only to project members. @@ -204,14 +100,18 @@ options: type: str choices: ["private", "disabled", "enabled"] version_added: "6.2.0" - releases_access_level: + default_branch: description: - - V(private) means that accessing release is allowed only to project members. - - V(disabled) means that accessing release is disabled. - - V(enabled) means that accessing release is enabled. + - The default branch name for this project. + - For project creation, this option requires O(initialize_with_readme=true). + - For project update, the branch must exist. + - Supports project's default branch update since community.general 8.0.0. + type: str + version_added: "4.2.0" + description: + description: + - An description for the project. type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.4.0" environments_access_level: description: - V(private) means that deployment to environment is allowed only to project members. @@ -228,6 +128,24 @@ options: type: str choices: ["private", "disabled", "enabled"] version_added: "6.4.0" + forking_access_level: + description: + - V(private) means that repository forks is allowed only to project members. + - V(disabled) means that repository forks are disabled. + - V(enabled) means that repository forks are enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + group: + description: + - Id or the full path of the group of which this projects belongs to. + type: str + import_url: + description: + - Git repository which will be imported into gitlab. + - GitLab server needs read access to this git repository. + required: false + type: str infrastructure_access_level: description: - V(private) means that configuring infrastructure is allowed only to project members. @@ -236,6 +154,56 @@ options: type: str choices: ["private", "disabled", "enabled"] version_added: "6.4.0" + initialize_with_readme: + description: + - Will initialize the project with a default C(README.md). + - Is only used when the project is created, and ignored otherwise. + type: bool + default: false + version_added: "4.0.0" + issues_access_level: + description: + - V(private) means that accessing issues tab is allowed only to project members. + - V(disabled) means that accessing issues tab is disabled. + - V(enabled) means that accessing issues tab is enabled. + - O(issues_access_level) and O(issues_enabled) are mutually exclusive. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.4.0" + issues_enabled: + description: + - Whether you want to create issues or not. + - O(issues_access_level) and O(issues_enabled) are mutually exclusive. + type: bool + default: true + lfs_enabled: + description: + - Enable Git large file systems to manages large files such as audio, video, and graphics files. + type: bool + required: false + default: false + version_added: "2.0.0" + merge_method: + description: + - What requirements are placed upon merges. + - Possible values are V(merge), V(rebase_merge) merge commit with semi-linear history, V(ff) fast-forward merges only. + type: str + choices: ["ff", "merge", "rebase_merge"] + default: merge + version_added: "1.0.0" + merge_requests_enabled: + description: + - If merge requests can be made or not. + type: bool + default: true + model_registry_access_level: + description: + - V(private) means that accessing model registry tab is allowed only to project members. + - V(disabled) means that accessing model registry tab is disabled. + - V(enabled) means that accessing model registry tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" monitor_access_level: description: - V(private) means that monitoring health is allowed only to project members. @@ -244,6 +212,60 @@ options: type: str choices: ["private", "disabled", "enabled"] version_added: "6.4.0" + name: + description: + - The name of the project. + required: true + type: str + only_allow_merge_if_all_discussions_are_resolved: + description: + - All discussions on a merge request (MR) have to be resolved. + type: bool + version_added: "3.4.0" + only_allow_merge_if_pipeline_succeeds: + description: + - Only allow merges if pipeline succeeded. + type: bool + version_added: "3.4.0" + packages_enabled: + description: + - Enable GitLab package repository. + type: bool + version_added: "3.4.0" + pages_access_level: + description: + - V(private) means that accessing pages tab is allowed only to project members. + - V(disabled) means that accessing pages tab is disabled. + - V(enabled) means that accessing pages tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + path: + description: + - The path of the project you want to create, this will be server_url//path. + - If not supplied, name will be used. + type: str + releases_access_level: + description: + - V(private) means that accessing release is allowed only to project members. + - V(disabled) means that accessing release is disabled. + - V(enabled) means that accessing release is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + remove_source_branch_after_merge: + description: + - Remove the source branch after merge. + type: bool + version_added: "3.4.0" + repository_access_level: + description: + - V(private) means that accessing repository is allowed only to project members. + - V(disabled) means that accessing repository is disabled. + - V(enabled) means that accessing repository is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" security_and_compliance_access_level: description: - V(private) means that accessing security and complicance tab is allowed only to project members. @@ -252,6 +274,34 @@ options: type: str choices: ["private", "disabled", "enabled"] version_added: "6.4.0" + service_desk_enabled: + description: + - Enable Service Desk. + type: bool + version_added: "9.3.0" + shared_runners_enabled: + description: + - Enable shared runners for this project. + type: bool + version_added: "3.7.0" + snippets_enabled: + description: + - If creating snippets should be available or not. + type: bool + default: true + squash_option: + description: + - Squash commits when merging. + type: str + choices: ["never", "always", "default_off", "default_on"] + version_added: "3.4.0" + state: + description: + - Create or delete project. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] topics: description: - A topic or list of topics to be assigned to a project. @@ -259,9 +309,29 @@ options: type: list elements: str version_added: "6.6.0" -''' + username: + description: + - Used to create a personal project under a user's name. + type: str + version_added: "3.3.0" + visibility: + description: + - V(private) Project access must be granted explicitly for each user. + - V(internal) The project can be cloned by any logged in user. + - V(public) The project can be cloned without any authentication. + default: private + type: str + choices: ["private", "internal", "public"] + aliases: + - visibility_level + wiki_enabled: + description: + - If an wiki for this project should be available or not. + type: bool + default: true +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create GitLab Project community.general.gitlab_project: api_url: https://gitlab.example.com/ @@ -306,9 +376,9 @@ EXAMPLES = r''' api_password: "{{ initial_root_password }}" name: my_second_project group: "10481470" -''' +""" -RETURN = r''' +RETURN = r""" msg: description: Success or failure message. returned: always @@ -316,12 +386,12 @@ msg: sample: "Success" result: - description: json parsed response from the server. + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API. + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" @@ -330,7 +400,7 @@ project: description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec @@ -358,32 +428,38 @@ class GitLabProject(object): def create_or_update_project(self, module, project_name, namespace, options): changed = False project_options = { - 'name': project_name, - 'description': options['description'], - 'issues_enabled': options['issues_enabled'], - 'merge_requests_enabled': options['merge_requests_enabled'], - 'merge_method': options['merge_method'], - 'wiki_enabled': options['wiki_enabled'], - 'snippets_enabled': options['snippets_enabled'], - 'visibility': options['visibility'], - 'lfs_enabled': options['lfs_enabled'], 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], + 'builds_access_level': options['builds_access_level'], + 'ci_config_path': options['ci_config_path'], + 'container_expiration_policy': options['container_expiration_policy'], + 'container_registry_access_level': options['container_registry_access_level'], + 'description': options['description'], + 'environments_access_level': options['environments_access_level'], + 'feature_flags_access_level': options['feature_flags_access_level'], + 'forking_access_level': options['forking_access_level'], + 'infrastructure_access_level': options['infrastructure_access_level'], + 'issues_access_level': options['issues_access_level'], + 'issues_enabled': options['issues_enabled'], + 'lfs_enabled': options['lfs_enabled'], + 'merge_method': options['merge_method'], + 'merge_requests_enabled': options['merge_requests_enabled'], + 'model_registry_access_level': options['model_registry_access_level'], + 'monitor_access_level': options['monitor_access_level'], + 'name': project_name, 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'], 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'], 'packages_enabled': options['packages_enabled'], - 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], - 'squash_option': options['squash_option'], - 'ci_config_path': options['ci_config_path'], - 'shared_runners_enabled': options['shared_runners_enabled'], - 'builds_access_level': options['builds_access_level'], - 'forking_access_level': options['forking_access_level'], - 'container_registry_access_level': options['container_registry_access_level'], + 'pages_access_level': options['pages_access_level'], 'releases_access_level': options['releases_access_level'], - 'environments_access_level': options['environments_access_level'], - 'feature_flags_access_level': options['feature_flags_access_level'], - 'infrastructure_access_level': options['infrastructure_access_level'], - 'monitor_access_level': options['monitor_access_level'], + 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], + 'repository_access_level': options['repository_access_level'], 'security_and_compliance_access_level': options['security_and_compliance_access_level'], + 'service_desk_enabled': options['service_desk_enabled'], + 'shared_runners_enabled': options['shared_runners_enabled'], + 'snippets_enabled': options['snippets_enabled'], + 'squash_option': options['squash_option'], + 'visibility': options['visibility'], + 'wiki_enabled': options['wiki_enabled'], } # topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version @@ -396,7 +472,7 @@ class GitLabProject(object): # Because we have already call userExists in main() if self.project_object is None: if options['default_branch'] and not options['initialize_with_readme']: - module.fail_json(msg="Param default_branch need param initialize_with_readme set to true") + module.fail_json(msg="Param default_branch needs param initialize_with_readme set to true") project_options.update({ 'path': options['path'], 'import_url': options['import_url'], @@ -430,7 +506,7 @@ class GitLabProject(object): try: project.save() except Exception as e: - self._module.fail_json(msg="Failed update project: %s " % e) + self._module.fail_json(msg="Failed to update project: %s " % e) return True return False @@ -443,6 +519,8 @@ class GitLabProject(object): return True arguments['namespace_id'] = namespace.id + if 'container_expiration_policy' in arguments: + arguments['container_expiration_policy_attributes'] = arguments['container_expiration_policy'] try: project = self._gitlab.projects.create(arguments) except (gitlab.exceptions.GitlabCreateError) as e: @@ -454,11 +532,7 @@ class GitLabProject(object): @param arguments Attributes of the project ''' def get_options_with_value(self, arguments): - ret_arguments = dict() - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - ret_arguments[arg_key] = arg_value - + ret_arguments = {k: v for k, v in arguments.items() if v is not None} return ret_arguments ''' @@ -469,9 +543,22 @@ class GitLabProject(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(project, arg_key) != arguments[arg_key]: - setattr(project, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(project, arg_key, None) != arg_value: + if arg_key == 'container_expiration_policy': + old_val = getattr(project, arg_key, {}) + final_val = {key: value for key, value in arg_value.items() if value is not None} + + if final_val.get('older_than') == '0d': + final_val['older_than'] = None + if final_val.get('keep_n') == 0: + final_val['keep_n'] = None + + if all(old_val.get(key) == value for key, value in final_val.items()): + continue + setattr(project, 'container_expiration_policy_attributes', final_val) + else: + setattr(project, arg_key, arg_value) changed = True return (changed, project) @@ -501,41 +588,54 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update(dict( - group=dict(type='str'), - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - initialize_with_readme=dict(type='bool', default=False), - default_branch=dict(type='str'), - issues_enabled=dict(type='bool', default=True), - merge_requests_enabled=dict(type='bool', default=True), - merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), - wiki_enabled=dict(type='bool', default=True), - snippets_enabled=dict(default=True, type='bool'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), - import_url=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - lfs_enabled=dict(default=False, type='bool'), - username=dict(type='str'), allow_merge_on_skipped_pipeline=dict(type='bool'), + avatar_path=dict(type='path'), + builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + ci_config_path=dict(type='str'), + container_expiration_policy=dict(type='dict', default=None, options=dict( + cadence=dict(type='str', choices=["1d", "7d", "14d", "1month", "3month"]), + enabled=dict(type='bool'), + keep_n=dict(type='int', choices=[0, 1, 5, 10, 25, 50, 100]), + older_than=dict(type='str', choices=["0d", "7d", "14d", "30d", "90d"]), + name_regex=dict(type='str'), + name_regex_keep=dict(type='str'), + )), + container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + default_branch=dict(type='str'), + description=dict(type='str'), + environments_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + feature_flags_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + group=dict(type='str'), + import_url=dict(type='str'), + infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + initialize_with_readme=dict(type='bool', default=False), + issues_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + issues_enabled=dict(type='bool', default=True), + lfs_enabled=dict(default=False, type='bool'), + merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), + merge_requests_enabled=dict(type='bool', default=True), + model_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + name=dict(type='str', required=True), only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), only_allow_merge_if_pipeline_succeeds=dict(type='bool'), packages_enabled=dict(type='bool'), - remove_source_branch_after_merge=dict(type='bool'), - squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), - ci_config_path=dict(type='str'), - shared_runners_enabled=dict(type='bool'), - avatar_path=dict(type='path'), - builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + pages_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + path=dict(type='str'), releases_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - environments_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - feature_flags_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + remove_source_branch_after_merge=dict(type='bool'), + repository_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + service_desk_enabled=dict(type='bool'), + shared_runners_enabled=dict(type='bool'), + snippets_enabled=dict(default=True, type='bool'), + squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), + state=dict(type='str', default="present", choices=["absent", "present"]), topics=dict(type='list', elements='str'), + username=dict(type='str'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), + wiki_enabled=dict(type='bool', default=True), )) module = AnsibleModule( @@ -547,6 +647,7 @@ def main(): ['api_token', 'api_oauth_token'], ['api_token', 'api_job_token'], ['group', 'username'], + ['issues_access_level', 'issues_enabled'], ], required_together=[ ['api_username', 'api_password'], @@ -560,41 +661,47 @@ def main(): # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - group_identifier = module.params['group'] - project_name = module.params['name'] - project_path = module.params['path'] - project_description = module.params['description'] - initialize_with_readme = module.params['initialize_with_readme'] - issues_enabled = module.params['issues_enabled'] - merge_requests_enabled = module.params['merge_requests_enabled'] - merge_method = module.params['merge_method'] - wiki_enabled = module.params['wiki_enabled'] - snippets_enabled = module.params['snippets_enabled'] - visibility = module.params['visibility'] - import_url = module.params['import_url'] - state = module.params['state'] - lfs_enabled = module.params['lfs_enabled'] - username = module.params['username'] allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] + avatar_path = module.params['avatar_path'] + builds_access_level = module.params['builds_access_level'] + ci_config_path = module.params['ci_config_path'] + container_expiration_policy = module.params['container_expiration_policy'] + container_registry_access_level = module.params['container_registry_access_level'] + default_branch = module.params['default_branch'] + environments_access_level = module.params['environments_access_level'] + feature_flags_access_level = module.params['feature_flags_access_level'] + forking_access_level = module.params['forking_access_level'] + group_identifier = module.params['group'] + import_url = module.params['import_url'] + infrastructure_access_level = module.params['infrastructure_access_level'] + initialize_with_readme = module.params['initialize_with_readme'] + issues_access_level = module.params['issues_access_level'] + issues_enabled = module.params['issues_enabled'] + lfs_enabled = module.params['lfs_enabled'] + merge_method = module.params['merge_method'] + merge_requests_enabled = module.params['merge_requests_enabled'] + model_registry_access_level = module.params['model_registry_access_level'] + monitor_access_level = module.params['monitor_access_level'] only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved'] only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds'] packages_enabled = module.params['packages_enabled'] - remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] - squash_option = module.params['squash_option'] - ci_config_path = module.params['ci_config_path'] - shared_runners_enabled = module.params['shared_runners_enabled'] - avatar_path = module.params['avatar_path'] - default_branch = module.params['default_branch'] - builds_access_level = module.params['builds_access_level'] - forking_access_level = module.params['forking_access_level'] - container_registry_access_level = module.params['container_registry_access_level'] + pages_access_level = module.params['pages_access_level'] + project_description = module.params['description'] + project_name = module.params['name'] + project_path = module.params['path'] releases_access_level = module.params['releases_access_level'] - environments_access_level = module.params['environments_access_level'] - feature_flags_access_level = module.params['feature_flags_access_level'] - infrastructure_access_level = module.params['infrastructure_access_level'] - monitor_access_level = module.params['monitor_access_level'] + remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] + repository_access_level = module.params['repository_access_level'] security_and_compliance_access_level = module.params['security_and_compliance_access_level'] + service_desk_enabled = module.params['service_desk_enabled'] + shared_runners_enabled = module.params['shared_runners_enabled'] + snippets_enabled = module.params['snippets_enabled'] + squash_option = module.params['squash_option'] + state = module.params['state'] topics = module.params['topics'] + username = module.params['username'] + visibility = module.params['visibility'] + wiki_enabled = module.params['wiki_enabled'] # Set project_path to project_name if it is empty. if project_path is None: @@ -607,7 +714,7 @@ def main(): if group_identifier: group = find_group(gitlab_instance, group_identifier) if group is None: - module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier) + module.fail_json(msg="Failed to create project: group %s doesn't exist" % group_identifier) namespace_id = group.id else: @@ -633,42 +740,48 @@ def main(): if project_exists: gitlab_project.delete_project() module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name) - module.exit_json(changed=False, msg="Project deleted or does not exists") + module.exit_json(changed=False, msg="Project deleted or does not exist") if state == 'present': if gitlab_project.create_or_update_project(module, project_name, namespace, { - "path": project_path, - "description": project_description, - "initialize_with_readme": initialize_with_readme, - "default_branch": default_branch, - "issues_enabled": issues_enabled, - "merge_requests_enabled": merge_requests_enabled, - "merge_method": merge_method, - "wiki_enabled": wiki_enabled, - "snippets_enabled": snippets_enabled, - "visibility": visibility, - "import_url": import_url, - "lfs_enabled": lfs_enabled, "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, + "avatar_path": avatar_path, + "builds_access_level": builds_access_level, + "ci_config_path": ci_config_path, + "container_expiration_policy": container_expiration_policy, + "container_registry_access_level": container_registry_access_level, + "default_branch": default_branch, + "description": project_description, + "environments_access_level": environments_access_level, + "feature_flags_access_level": feature_flags_access_level, + "forking_access_level": forking_access_level, + "import_url": import_url, + "infrastructure_access_level": infrastructure_access_level, + "initialize_with_readme": initialize_with_readme, + "issues_access_level": issues_access_level, + "issues_enabled": issues_enabled, + "lfs_enabled": lfs_enabled, + "merge_method": merge_method, + "merge_requests_enabled": merge_requests_enabled, + "model_registry_access_level": model_registry_access_level, + "monitor_access_level": monitor_access_level, "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, "packages_enabled": packages_enabled, - "remove_source_branch_after_merge": remove_source_branch_after_merge, - "squash_option": squash_option, - "ci_config_path": ci_config_path, - "shared_runners_enabled": shared_runners_enabled, - "avatar_path": avatar_path, - "builds_access_level": builds_access_level, - "forking_access_level": forking_access_level, - "container_registry_access_level": container_registry_access_level, + "pages_access_level": pages_access_level, + "path": project_path, "releases_access_level": releases_access_level, - "environments_access_level": environments_access_level, - "feature_flags_access_level": feature_flags_access_level, - "infrastructure_access_level": infrastructure_access_level, - "monitor_access_level": monitor_access_level, + "remove_source_branch_after_merge": remove_source_branch_after_merge, + "repository_access_level": repository_access_level, "security_and_compliance_access_level": security_and_compliance_access_level, + "service_desk_enabled": service_desk_enabled, + "shared_runners_enabled": shared_runners_enabled, + "snippets_enabled": snippets_enabled, + "squash_option": squash_option, "topics": topics, + "visibility": visibility, + "wiki_enabled": wiki_enabled, }): module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs) diff --git a/plugins/modules/gitlab_project_access_token.py b/plugins/modules/gitlab_project_access_token.py index e692a30577..a93d5531bf 100644 --- a/plugins/modules/gitlab_project_access_token.py +++ b/plugins/modules/gitlab_project_access_token.py @@ -12,7 +12,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: gitlab_project_access_token short_description: Manages GitLab project access tokens version_added: 8.4.0 @@ -27,11 +27,10 @@ extends_documentation_fragment: - community.general.gitlab - community.general.attributes notes: - - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. - Whether tokens will be recreated is controlled by the O(recreate) option, which defaults to V(never). + - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens + will be recreated is controlled by the O(recreate) option, which defaults to V(never). - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards. - Token matching is done by comparing O(name) option. - attributes: check_mode: support: full @@ -56,7 +55,8 @@ options: type: list elements: str aliases: ["scope"] - choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner", "ai_features", "k8s_proxy"] + choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner", + "ai_features", "k8s_proxy"] access_level: description: - Access level of the access token. @@ -84,10 +84,10 @@ options: - When V(absent) it will be removed from the project if it exists. default: present type: str - choices: [ "present", "absent" ] -''' + choices: ["present", "absent"] +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: "Creating a project access token" community.general.gitlab_project_access_token: api_url: https://gitlab.example.com/ @@ -131,16 +131,16 @@ EXAMPLES = r''' - write_repository recreate: state_change state: present -''' +""" -RETURN = r''' +RETURN = r""" access_token: description: - API object. - Only contains the value of the token if the token was created or recreated. returned: success and O(state=present) type: dict -''' +""" from datetime import datetime @@ -311,7 +311,10 @@ def main(): module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) else: gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) - module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + if module.check_mode: + module.exit_json(changed=True, msg="Successfully created access token", access_token={}) + else: + module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) if __name__ == '__main__': diff --git a/plugins/modules/gitlab_project_badge.py b/plugins/modules/gitlab_project_badge.py index fee9389492..b62d651c7c 100644 --- a/plugins/modules/gitlab_project_badge.py +++ b/plugins/modules/gitlab_project_badge.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gitlab_project_badge short_description: Manage project badges on GitLab Server version_added: 6.1.0 @@ -57,9 +56,9 @@ options: - A badge is identified by this URL. required: true type: str -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a badge to a GitLab Project community.general.gitlab_project_badge: api_url: 'https://example.gitlab.com' @@ -77,9 +76,9 @@ EXAMPLES = r''' state: absent link_url: 'https://example.gitlab.com/%{project_path}' image_url: 'https://example.gitlab.com/%{project_path}/badges/%{default_branch}/pipeline.svg' -''' +""" -RETURN = ''' +RETURN = r""" badge: description: The badge information. returned: when O(state=present) @@ -91,7 +90,7 @@ badge: rendered_link_url: 'http://example.com/ci_status.svg?project=example-org/example-project&ref=master' rendered_image_url: 'https://shields.io/my/badge' kind: project -''' +""" from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/gitlab_project_members.py b/plugins/modules/gitlab_project_members.py index 2ce277f688..228af9a062 100644 --- a/plugins/modules/gitlab_project_members.py +++ b/plugins/modules/gitlab_project_members.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gitlab_project_members short_description: Manage project members on GitLab Server version_added: 2.2.0 @@ -82,16 +81,16 @@ options: type: str purge_users: description: - - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. - If omitted do not purge orphaned members. + - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. If omitted + do not purge orphaned members. - Is only used when O(state=present). type: list elements: str choices: ['guest', 'reporter', 'developer', 'maintainer'] version_added: 3.7.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a user to a GitLab Project community.general.gitlab_project_members: api_url: 'https://gitlab.example.com' @@ -154,9 +153,9 @@ EXAMPLES = r''' - name: user2 access_level: maintainer state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/gitlab_project_variable.py b/plugins/modules/gitlab_project_variable.py index 329e7a414b..5903c9b5c4 100644 --- a/plugins/modules/gitlab_project_variable.py +++ b/plugins/modules/gitlab_project_variable.py @@ -7,14 +7,14 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_project_variable short_description: Creates/updates/deletes GitLab Projects Variables description: - When a project variable does not exist, it will be created. - When a project variable does exist, its value will be updated when the values are different. - - Variables which are untouched in the playbook, but are not untouched in the GitLab project, - they stay untouched (O(purge=false)) or will be deleted (O(purge=true)). + - Variables which are untouched in the playbook, but are not untouched in the GitLab project, they stay untouched (O(purge=false)) + or will be deleted (O(purge=true)). author: - "Markus Bergholz (@markuman)" requirements: @@ -51,8 +51,8 @@ options: vars: description: - When the list element is a simple key-value pair, masked, raw and protected will be set to false. - - When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can - have full control about whether a value should be masked, raw, protected or both. + - When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can have full + control about whether a value should be masked, raw, protected or both. - Support for protected values requires GitLab >= 9.3. - Support for masked values requires GitLab >= 11.10. - Support for raw values requires GitLab >= 15.7. @@ -61,8 +61,8 @@ options: - A C(value) must be a string or a number. - Field C(variable_type) must be a string with either V(env_var), which is the default, or V(file). - Field C(environment_scope) must be a string defined by scope environment. - - When a value is masked, it must be in Base64 and have a length of at least 8 characters. - See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables). + - When a value is masked, it must be in Base64 and have a length of at least 8 characters. See GitLab documentation + on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables). default: {} type: dict variables: @@ -116,10 +116,10 @@ options: - Support for O(variables[].environment_scope) requires GitLab Premium >= 13.11. type: str default: '*' -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set or update some CI/CD variables community.general.gitlab_project_variable: api_url: https://gitlab.com @@ -190,9 +190,9 @@ EXAMPLES = ''' state: absent vars: ACCESS_KEY_ID: abc123 -''' +""" -RETURN = ''' +RETURN = r""" project_variable: description: Four lists of the variablenames which were added, updated, removed or exist. returned: always @@ -218,7 +218,7 @@ project_variable: returned: always type: list sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec diff --git a/plugins/modules/gitlab_protected_branch.py b/plugins/modules/gitlab_protected_branch.py index 8d2d75736b..4a3b7177ee 100644 --- a/plugins/modules/gitlab_protected_branch.py +++ b/plugins/modules/gitlab_protected_branch.py @@ -7,7 +7,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_protected_branch short_description: Manage protection of existing branches version_added: 3.4.0 @@ -58,10 +58,10 @@ options: default: maintainer type: str choices: ["maintainer", "developer", "nobody"] -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create protected branch on main community.general.gitlab_protected_branch: api_url: https://gitlab.com @@ -70,11 +70,10 @@ EXAMPLES = ''' name: main merge_access_levels: maintainer push_access_level: nobody +""" -''' - -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py index 96b3eb3fa4..62875c552a 100644 --- a/plugins/modules/gitlab_runner.py +++ b/plugins/modules/gitlab_runner.py @@ -10,30 +10,31 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gitlab_runner short_description: Create, modify and delete GitLab Runners description: - Register, update and delete runners on GitLab Server side with the GitLab API. - All operations are performed using the GitLab API v4. - - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html) - and U(https://docs.gitlab.com/ee/api/users.html#create-a-runner-linked-to-a-user). - - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at - U(https://$GITLAB_URL/profile/personal_access_tokens). - - A valid registration token is required for registering a new runner. - To create shared runners, you need to ask your administrator to give you this token. - It can be found at U(https://$GITLAB_URL/admin/runners/). - - This module does not handle the C(gitlab-runner) process part, but only manages the runner on GitLab Server side through its API. - Once the module has created the runner, you may use the generated token to run C(gitlab-runner register) command + - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html) and + U(https://docs.gitlab.com/ee/api/users.html#create-a-runner-linked-to-a-user). + - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web + interface at U(https://$GITLAB_URL/profile/personal_access_tokens). + - A valid registration token is required for registering a new runner. To create shared runners, you need to ask your administrator + to give you this token. It can be found at U(https://$GITLAB_URL/admin/runners/). + - This module does not handle the C(gitlab-runner) process part, but only manages the runner on GitLab Server side through + its API. Once the module has created the runner, you may use the generated token to run C(gitlab-runner register) command. notes: - To create a new runner at least the O(api_token), O(description) and O(api_url) options are required. - - Runners need to have unique descriptions, since this attribute is used as key for idempotency + - Runners need to have unique descriptions, since this attribute is used as key for idempotency. author: - Samy Coenen (@SamyCoenen) - Guillaume Martinez (@Lunik) requirements: - - python-gitlab >= 1.5.0 + - python-gitlab >= 1.5.0 for legacy runner registration workflow (runner registration token - + U(https://docs.gitlab.com/runner/register/#register-with-a-runner-registration-token-deprecated)) + - python-gitlab >= 4.0.0 for new runner registration workflow (runner authentication token - + U(https://docs.gitlab.com/runner/register/#register-with-a-runner-authentication-token)) extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab @@ -70,7 +71,8 @@ options: - name state: description: - - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name. + - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same + name. required: false default: present choices: ["present", "absent"] @@ -115,12 +117,12 @@ options: access_level: description: - Determines if a runner can pick up jobs only from protected branches. - - If O(access_level_on_creation) is not explicitly set to V(true), this option is ignored on registration and - is only applied on updates. + - If O(access_level_on_creation) is not explicitly set to V(true), this option is ignored on registration and is only + applied on updates. - If set to V(not_protected), runner can pick up jobs from both protected and unprotected branches. - If set to V(ref_protected), runner can pick up jobs only from protected branches. - - Before community.general 8.0.0 the default was V(ref_protected). This was changed to no default in community.general 8.0.0. - If this option is not specified explicitly, GitLab will use V(not_protected) on creation, and the value set + - Before community.general 8.0.0 the default was V(ref_protected). This was changed to no default in community.general + 8.0.0. If this option is not specified explicitly, GitLab will use V(not_protected) on creation, and the value set will not be changed on any updates. required: false choices: ["not_protected", "ref_protected"] @@ -153,9 +155,9 @@ options: default: [] type: list elements: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an instance-level runner community.general.gitlab_runner: api_url: https://gitlab.example.com/ @@ -231,31 +233,31 @@ EXAMPLES = ''' state: present project: mygroup/mysubgroup/myproject register: runner # Register module output to run C(gitlab-runner register) command in another task -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" runner: - description: API object + description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule @@ -365,18 +367,18 @@ class GitLabRunner(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if isinstance(arguments[arg_key], list): + if arg_value is not None: + if isinstance(arg_value, list): list1 = getattr(runner, arg_key) list1.sort() - list2 = arguments[arg_key] + list2 = arg_value list2.sort() if list1 != list2: - setattr(runner, arg_key, arguments[arg_key]) + setattr(runner, arg_key, arg_value) changed = True else: - if getattr(runner, arg_key) != arguments[arg_key]: - setattr(runner, arg_key, arguments[arg_key]) + if getattr(runner, arg_key) != arg_value: + setattr(runner, arg_key, arg_value) changed = True return (changed, runner) @@ -466,6 +468,7 @@ def main(): state = module.params['state'] runner_description = module.params['description'] runner_active = module.params['active'] + runner_paused = module.params['paused'] tag_list = module.params['tag_list'] run_untagged = module.params['run_untagged'] runner_locked = module.params['locked'] @@ -500,7 +503,7 @@ def main(): module.exit_json(changed=False, msg="Runner deleted or does not exists") if state == 'present': - if gitlab_runner.create_or_update_runner(runner_description, { + runner_values = { "active": runner_active, "tag_list": tag_list, "run_untagged": run_untagged, @@ -510,7 +513,11 @@ def main(): "registration_token": registration_token, "group": group, "project": project, - }): + } + if LooseVersion(gitlab_runner._gitlab.version()[0]) >= LooseVersion("14.8.0"): + # the paused attribute for runners is available since 14.8 + runner_values["paused"] = runner_paused + if gitlab_runner.create_or_update_runner(runner_description, runner_values): module.exit_json(changed=True, runner=gitlab_runner.runner_object._attrs, msg="Successfully created or updated the runner %s" % runner_description) else: diff --git a/plugins/modules/gitlab_user.py b/plugins/modules/gitlab_user.py index 6e5ab4ece0..dd8685312d 100644 --- a/plugins/modules/gitlab_user.py +++ b/plugins/modules/gitlab_user.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gitlab_user short_description: Creates/updates/deletes/blocks/unblocks GitLab Users description: @@ -88,13 +87,8 @@ options: type: str access_level: description: - - The access level to the group. One of the following can be used. - - guest - - reporter - - developer - - master (alias for maintainer) - - maintainer - - owner + - The access level to the group. + - The value V(master) is an alias for V(maintainer). default: guest type: str choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"] @@ -128,7 +122,7 @@ options: suboptions: provider: description: - - The name of the external identity provider + - The name of the external identity provider. type: str extern_uid: description: @@ -143,9 +137,9 @@ options: type: bool default: false version_added: 3.3.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Delete GitLab User" community.general.gitlab_user: api_url: https://gitlab.example.com/ @@ -179,8 +173,8 @@ EXAMPLES = ''' password: mysecretpassword email: me@example.com identities: - - provider: Keycloak - extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc + - provider: Keycloak + extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc state: present group: super_group/mon_group access_level: owner @@ -198,31 +192,31 @@ EXAMPLES = ''' api_token: "{{ access_token }}" username: myusername state: unblocked -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" user: - description: API object + description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec diff --git a/plugins/modules/grove.py b/plugins/modules/grove.py index b50546b4da..abdc303f90 100644 --- a/plugins/modules/grove.py +++ b/plugins/modules/grove.py @@ -9,13 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: grove short_description: Sends a notification to a grove.io channel description: - - The C(grove) module sends a message for a service to a Grove.io - channel. + - The C(grove) module sends a message for a service to a Grove.io channel. extends_documentation_fragment: - community.general.attributes attributes: @@ -32,7 +30,7 @@ options: service: type: str description: - - Name of the service (displayed as the "user" in the message) + - Name of the service (displayed as the "user" in the message). required: false default: ansible message_content: @@ -44,29 +42,29 @@ options: url: type: str description: - - Service URL for the web client + - Service URL for the web client. required: false icon_url: type: str description: - - Icon for the service + - Icon for the service. required: false validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using + self-signed certificates. default: true type: bool author: "Jonas Pfenniger (@zimbatm)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Sends a notification to a grove.io channel community.general.grove: channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg service: my-app message: 'deployed {{ target }}' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode diff --git a/plugins/modules/gunicorn.py b/plugins/modules/gunicorn.py index 2b2abcf8e6..baf24c53b8 100644 --- a/plugins/modules/gunicorn.py +++ b/plugins/modules/gunicorn.py @@ -9,21 +9,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gunicorn short_description: Run gunicorn with various settings description: - - Starts gunicorn with the parameters specified. Common settings for gunicorn - configuration are supported. For additional configuration use a config file - See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more - options. It's recommended to always use the chdir option to avoid problems - with the location of the app. + - Starts gunicorn with the parameters specified. Common settings for gunicorn configuration are supported. For additional + configuration use a config file See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more options. + It's recommended to always use the chdir option to avoid problems with the location of the app. requirements: [gunicorn] author: - - "Alejandro Gomez (@agmezr)" + - "Alejandro Gomez (@agmezr)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: none @@ -40,37 +37,36 @@ options: type: path aliases: ['virtualenv'] description: - - 'Path to the virtualenv directory.' + - Path to the virtualenv directory. config: type: path description: - - 'Path to the gunicorn configuration file.' + - Path to the gunicorn configuration file. aliases: ['conf'] chdir: type: path description: - - 'Chdir to specified directory before apps loading.' + - Chdir to specified directory before apps loading. pid: type: path description: - - 'A filename to use for the PID file. If not set and not found on the configuration file a tmp - pid file will be created to check a successful run of gunicorn.' + - A filename to use for the PID file. If not set and not found on the configuration file a tmp pid file will be created + to check a successful run of gunicorn. worker: type: str choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp'] description: - - 'The type of workers to use. The default class (sync) should handle most "normal" types of workloads.' + - The type of workers to use. The default class (sync) should handle most "normal" types of workloads. user: type: str description: - - 'Switch worker processes to run as this user.' + - Switch worker processes to run as this user. notes: - - If not specified on config file, a temporary error log will be created on /tmp dir. - Please make sure you have write access in /tmp dir. Not needed but will help you to - identify any problem with configuration. -''' + - If not specified on config file, a temporary error log will be created on /tmp dir. Please make sure you have write access + in /tmp dir. Not needed but will help you to identify any problem with configuration. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Simple gunicorn run example community.general.gunicorn: app: 'wsgi' @@ -96,15 +92,15 @@ EXAMPLES = ''' venv: '/workspace/example/venv' pid: '/workspace/example/gunicorn.pid' user: 'ansible' -''' +""" -RETURN = ''' +RETURN = r""" gunicorn: - description: process id of gunicorn - returned: changed - type: str - sample: "1234" -''' + description: Process id of gunicorn. + returned: changed + type: str + sample: "1234" +""" import os import time diff --git a/plugins/modules/haproxy.py b/plugins/modules/haproxy.py index cbaa438334..9c60e59040 100644 --- a/plugins/modules/haproxy.py +++ b/plugins/modules/haproxy.py @@ -8,23 +8,21 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: haproxy short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands author: - - Ravi Bhure (@ravibhure) + - Ravi Bhure (@ravibhure) description: - - Enable, disable, drain and set weights for HAProxy backend servers using socket commands. + - Enable, disable, drain and set weights for HAProxy backend servers using socket commands. notes: - - Enable, disable and drain commands are restricted and can only be issued on - sockets configured for level 'admin'. For example, you can add the line - 'stats socket /var/run/haproxy.sock level admin' to the general section of - haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt). - - Depends on netcat (C(nc)) being available; you need to install the appropriate - package for your operating system before this module can be used. + - Enable, disable and drain commands are restricted and can only be issued on sockets configured for level C(admin). For + example, you can add the line C(stats socket /var/run/haproxy.sock level admin) to the general section of C(haproxy.cfg). + See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt). + - Depends on netcat (C(nc)) being available; you need to install the appropriate package for your operating system before + this module can be used. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: none @@ -38,8 +36,8 @@ options: type: str drain: description: - - Wait until the server has no active connections or until the timeout - determined by wait_interval and wait_retries is reached. + - Wait until the server has no active connections or until the timeout determined by O(wait_interval) and O(wait_retries) + is reached. - Continue only after the status changes to C(MAINT). - This overrides the shutdown_sessions option. type: bool @@ -51,10 +49,9 @@ options: required: true shutdown_sessions: description: - - When disabling a server, immediately terminate all the sessions attached - to the specified server. - - This can be used to terminate long-running sessions after a server is put - into maintenance mode. Overridden by the drain option. + - When disabling a server, immediately terminate all the sessions attached to the specified server. + - This can be used to terminate long-running sessions after a server is put into maintenance mode. Overridden by the + drain option. type: bool default: false socket: @@ -65,12 +62,11 @@ options: state: description: - Desired state of the provided backend host. - - Note that V(drain) state was added in version 2.4. - - It is supported only by HAProxy version 1.5 or later, - - When used on versions < 1.5, it will be ignored. + - Note that V(drain) state is supported only by HAProxy version 1.5 or later. When used on versions < 1.5, it will be + ignored. type: str required: true - choices: [ disabled, drain, enabled ] + choices: [disabled, drain, enabled] agent: description: - Disable/enable agent checks (depending on O(state) value). @@ -90,8 +86,8 @@ options: default: false wait: description: - - Wait until the server reports a status of C(UP) when O(state=enabled), - status of C(MAINT) when O(state=disabled) or status of C(DRAIN) when O(state=drain). + - Wait until the server reports a status of C(UP) when O(state=enabled), status of C(MAINT) when O(state=disabled) or + status of C(DRAIN) when O(state=drain). type: bool default: false wait_interval: @@ -107,14 +103,12 @@ options: weight: description: - The value passed in argument. - - If the value ends with the V(%) sign, then the new weight will be - relative to the initially configured weight. - - Relative weights are only permitted between 0 and 100% and absolute - weights are permitted between 0 and 256. + - If the value ends with the V(%) sign, then the new weight will be relative to the initially configured weight. + - Relative weights are only permitted between 0 and 100% and absolute weights are permitted between 0 and 256. type: str -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Disable server in 'www' backend pool community.general.haproxy: state: disabled @@ -169,7 +163,8 @@ EXAMPLES = r''' socket: /var/run/haproxy.sock shutdown_sessions: true -- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found +- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is + not found community.general.haproxy: state: disabled host: '{{ inventory_hostname }}' @@ -188,7 +183,8 @@ EXAMPLES = r''' backend: www wait: true -- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health +- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the + health community.general.haproxy: state: enabled host: '{{ inventory_hostname }}' @@ -211,7 +207,7 @@ EXAMPLES = r''' host: '{{ inventory_hostname }}' socket: /var/run/haproxy.sock backend: www -''' +""" import csv import socket diff --git a/plugins/modules/heroku_collaborator.py b/plugins/modules/heroku_collaborator.py index e07ae333dd..1d278339e4 100644 --- a/plugins/modules/heroku_collaborator.py +++ b/plugins/modules/heroku_collaborator.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: heroku_collaborator short_description: Add or delete app collaborators on Heroku description: @@ -32,35 +31,35 @@ options: api_key: type: str description: - - Heroku API key + - Heroku API key. apps: type: list elements: str description: - - List of Heroku App names + - List of Heroku App names. required: true suppress_invitation: description: - - Suppress email invitation when creating collaborator + - Suppress email invitation when creating collaborator. type: bool default: false user: type: str description: - - User ID or e-mail + - User ID or e-mail. required: true state: type: str description: - - Create or remove the heroku collaborator + - Create or remove the heroku collaborator. choices: ["present", "absent"] default: "present" notes: - E(HEROKU_API_KEY) and E(TF_VAR_HEROKU_API_KEY) environment variables can be used instead setting O(api_key). - - If you use C(check_mode), you can also pass the C(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"]. -''' + - If you use C(check_mode), you can also pass the C(-v) flag to see affected apps in C(msg), for example C(["heroku-example-app"]). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a heroku collaborator community.general.heroku_collaborator: api_key: YOUR_API_KEY @@ -76,12 +75,12 @@ EXAMPLES = ''' suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}' state: '{{ item.state | default("present") }}' with_items: - - { user: 'a.b@example.com' } - - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false } - - { user: 'x.y@example.com', apps: ["heroku-example-app"] } -''' + - {user: 'a.b@example.com'} + - {state: 'absent', user: 'b.c@example.com', suppress_invitation: false} + - {user: 'x.y@example.com', apps: ["heroku-example-app"]} +""" -RETURN = ''' # ''' +RETURN = """ # """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper diff --git a/plugins/modules/hg.py b/plugins/modules/hg.py index 4b6b7c4330..fd539ba54d 100644 --- a/plugins/modules/hg.py +++ b/plugins/modules/hg.py @@ -9,75 +9,71 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hg short_description: Manages Mercurial (hg) repositories description: - - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. + - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. author: "Yeukhon Wong (@yeukhon)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - repo: - description: - - The repository address. - required: true - aliases: [ name ] - type: str - dest: - description: - - Absolute path of where the repository should be cloned to. - This parameter is required, unless clone and update are set to no - type: path - revision: - description: - - Equivalent C(-r) option in hg command which could be the changeset, revision number, - branch name or even tag. - aliases: [ version ] - type: str - force: - description: - - Discards uncommitted changes. Runs C(hg update -C). - type: bool - default: false - purge: - description: - - Deletes untracked files. Runs C(hg purge). - type: bool - default: false - update: - description: - - If V(false), do not retrieve new revisions from the origin repository - type: bool - default: true - clone: - description: - - If V(false), do not clone the repository if it does not exist locally. - type: bool - default: true - executable: - description: - - Path to hg executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - type: str + repo: + description: + - The repository address. + required: true + aliases: [name] + type: str + dest: + description: + - Absolute path of where the repository should be cloned to. This parameter is required, unless clone and update are + set to no. + type: path + revision: + description: + - Equivalent C(-r) option in hg command which could be the changeset, revision number, branch name or even tag. + aliases: [version] + type: str + force: + description: + - Discards uncommitted changes. Runs C(hg update -C). + type: bool + default: false + purge: + description: + - Deletes untracked files. Runs C(hg purge). + type: bool + default: false + update: + description: + - If V(false), do not retrieve new revisions from the origin repository. + type: bool + default: true + clone: + description: + - If V(false), do not clone the repository if it does not exist locally. + type: bool + default: true + executable: + description: + - Path to hg executable to use. If not supplied, the normal mechanism for resolving binary paths will be used. + type: str notes: - - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). - - "If the task seems to be hanging, first verify remote host is in C(known_hosts). - SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, - one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling - the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts." - - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, - if the underlying system still uses a Python version below 2.7.9, you will have issues checking out - bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). -''' + - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). + - 'If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the + first contact with a remote host. To avoid this prompt, one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) + before calling the hg module, with the following command: C(ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts).' + - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, if the underlying system + still uses a Python version below 2.7.9, you will have issues checking out bitbucket repositories. + See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure the current working copy is inside the stable branch and deletes untracked files if any. community.general.hg: repo: https://bitbucket.org/user/repo1 @@ -91,7 +87,7 @@ EXAMPLES = ''' dest: /srv/checkout clone: false update: false -''' +""" import os diff --git a/plugins/modules/hipchat.py b/plugins/modules/hipchat.py index 83e253679c..e605278507 100644 --- a/plugins/modules/hipchat.py +++ b/plugins/modules/hipchat.py @@ -9,14 +9,17 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hipchat short_description: Send a message to Hipchat description: - - Send a message to a Hipchat room, with options to control the formatting. + - Send a message to a Hipchat room, with options to control the formatting. extends_documentation_fragment: - community.general.attributes +deprecated: + removed_in: 11.0.0 + why: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. + alternative: There is none. attributes: check_mode: support: full @@ -36,8 +39,7 @@ options: msg_from: type: str description: - - Name the message will appear to be sent from. Max length is 15 - characters - above this it will be truncated. + - Name the message will appear to be sent from. Max length is 15 characters - above this it will be truncated. default: Ansible aliases: [from] msg: @@ -50,13 +52,13 @@ options: description: - Background color for the message. default: yellow - choices: [ "yellow", "red", "green", "purple", "gray", "random" ] + choices: ["yellow", "red", "green", "purple", "gray", "random"] msg_format: type: str description: - Message format. default: text - choices: [ "text", "html" ] + choices: ["text", "html"] notify: description: - If true, a notification will be triggered for users in the room. @@ -64,23 +66,23 @@ options: default: true validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using + self-signed certificates. type: bool default: true api: type: str description: - - API url if using a self-hosted hipchat server. For Hipchat API version - 2 use the default URI with C(/v2) instead of C(/v1). + - API url if using a self-hosted hipchat server. For Hipchat API version 2 use the default URI with C(/v2) instead of + C(/v1). default: 'https://api.hipchat.com/v1' author: -- Shirou Wakayama (@shirou) -- Paul Bourdel (@pb8226) -''' + - Shirou Wakayama (@shirou) + - Paul Bourdel (@pb8226) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a message to a Hipchat room community.general.hipchat: room: notif @@ -92,7 +94,7 @@ EXAMPLES = ''' token: OAUTH2_TOKEN room: notify msg: Ansible task finished -''' +""" # =========================================== # HipChat module specific support methods. diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py index 388682d924..eaf7d652b4 100644 --- a/plugins/modules/homebrew.py +++ b/plugins/modules/homebrew.py @@ -14,81 +14,80 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: homebrew author: - - "Indrajit Raychaudhuri (@indrajitr)" - - "Daniel Jaouen (@danieljaouen)" - - "Andrew Dunham (@andrew-d)" + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" + - "Andrew Dunham (@andrew-d)" requirements: - - homebrew must already be installed on the target system + - homebrew must already be installed on the target system short_description: Package manager for Homebrew description: - - Manages Homebrew packages + - Manages Homebrew packages. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - A list of names of packages to install/remove. - aliases: [ 'formula', 'package', 'pkg' ] - type: list - elements: str - path: - description: - - "A V(:) separated list of paths to search for C(brew) executable. - Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of C(brew) command, - providing an alternative C(brew) path enables managing different set of packages in an alternative location in the system." - default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' - type: path - state: - description: - - state of the package. - choices: [ 'absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded' ] - default: present - type: str - update_homebrew: - description: - - update homebrew itself first. - type: bool - default: false - upgrade_all: - description: - - upgrade all homebrew packages. - type: bool - default: false - aliases: ['upgrade'] - install_options: - description: - - options flags to install a package. - aliases: ['options'] - type: list - elements: str - upgrade_options: - description: - - Option flags to upgrade. - type: list - elements: str - version_added: '0.2.0' - force_formula: - description: - - Force the package(s) to be treated as a formula (equivalent to C(brew --formula)). - - To install a cask, use the M(community.general.homebrew_cask) module. - type: bool - default: false - version_added: 9.0.0 + name: + description: + - A list of names of packages to install/remove. + aliases: ['formula', 'package', 'pkg'] + type: list + elements: str + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. Since a package (I(formula) in homebrew parlance) + location is prefixed relative to the actual path of C(brew) command, providing an alternative C(brew) path enables + managing different set of packages in an alternative location in the system. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + state: + description: + - State of the package. + choices: ['absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded'] + default: present + type: str + update_homebrew: + description: + - Update homebrew itself first. + type: bool + default: false + upgrade_all: + description: + - Upgrade all homebrew packages. + type: bool + default: false + aliases: ['upgrade'] + install_options: + description: + - Options flags to install a package. + aliases: ['options'] + type: list + elements: str + upgrade_options: + description: + - Option flags to upgrade. + type: list + elements: str + version_added: '0.2.0' + force_formula: + description: + - Force the package(s) to be treated as a formula (equivalent to C(brew --formula)). + - To install a cask, use the M(community.general.homebrew_cask) module. + type: bool + default: false + version_added: 9.0.0 notes: - - When used with a C(loop:) each package will be processed individually, - it is much more efficient to pass the list directly to the O(name) option. -''' + - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly + to the O(name) option. +""" -EXAMPLES = ''' +EXAMPLES = r""" # Install formula foo with 'brew' in default path - community.general.homebrew: name: foo @@ -154,29 +153,29 @@ EXAMPLES = ''' name: ambiguous_formula state: present force_formula: true -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: if the cache was updated or not - returned: always - type: str - sample: "Changed: 0, Unchanged: 2" + description: If the cache was updated or not. + returned: always + type: str + sample: "Changed: 0, Unchanged: 2" unchanged_pkgs: - description: - - List of package names which are unchanged after module run - returned: success - type: list - sample: ["awscli", "ag"] - version_added: '0.2.0' + description: + - List of package names which are unchanged after module run. + returned: success + type: list + sample: ["awscli", "ag"] + version_added: '0.2.0' changed_pkgs: - description: - - List of package names which are changed after module run - returned: success - type: list - sample: ['git', 'git-cola'] - version_added: '0.2.0' -''' + description: + - List of package names which are changed after module run. + returned: success + type: list + sample: ['git', 'git-cola'] + version_added: '0.2.0' +""" import json import re @@ -415,9 +414,9 @@ class Homebrew(object): if self.force_formula: cmd.append("--formula") rc, out, err = self.module.run_command(cmd) - if err: + if rc != 0: self.failed = True - self.message = err.strip() + self.message = err.strip() or ("Unknown failure with exit code %d" % rc) raise HomebrewException(self.message) data = json.loads(out) diff --git a/plugins/modules/homebrew_cask.py b/plugins/modules/homebrew_cask.py index dc9aea5db8..d69fd266a2 100644 --- a/plugins/modules/homebrew_cask.py +++ b/plugins/modules/homebrew_cask.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: homebrew_cask author: - "Indrajit Raychaudhuri (@indrajitr)" @@ -31,60 +30,59 @@ attributes: options: name: description: - - Name of cask to install or remove. - aliases: [ 'cask', 'package', 'pkg' ] + - Name of cask to install or remove. + aliases: ['cask', 'package', 'pkg'] type: list elements: str path: description: - - "':' separated list of paths to search for 'brew' executable." + - "':' separated list of paths to search for 'brew' executable." default: '/usr/local/bin:/opt/homebrew/bin' type: path state: description: - - State of the cask. - choices: [ 'absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded' ] + - State of the cask. + choices: ['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded'] default: present type: str sudo_password: description: - - The sudo password to be passed to SUDO_ASKPASS. + - The sudo password to be passed to E(SUDO_ASKPASS). required: false type: str update_homebrew: description: - - Update homebrew itself first. - - Note that C(brew cask update) is a synonym for C(brew update). + - Update homebrew itself first. + - Note that C(brew cask update) is a synonym for C(brew update). type: bool default: false install_options: description: - - Options flags to install a package. - aliases: [ 'options' ] + - Options flags to install a package. + aliases: ['options'] type: list elements: str accept_external_apps: description: - - Allow external apps. + - Allow external apps. type: bool default: false upgrade_all: description: - - Upgrade all casks. - - Mutually exclusive with C(upgraded) state. + - Upgrade all casks. + - Mutually exclusive with C(upgraded) state. type: bool default: false - aliases: [ 'upgrade' ] + aliases: ['upgrade'] greedy: description: - - Upgrade casks that auto update. - - Passes C(--greedy) to C(brew outdated --cask) when checking - if an installed cask has a newer version available, - or to C(brew upgrade --cask) when upgrading all casks. + - Upgrade casks that auto update. + - Passes C(--greedy) to C(brew outdated --cask) when checking if an installed cask has a newer version available, or + to C(brew upgrade --cask) when upgrading all casks. type: bool default: false -''' -EXAMPLES = ''' +""" +EXAMPLES = r""" - name: Install cask community.general.homebrew_cask: name: alfred @@ -151,7 +149,7 @@ EXAMPLES = ''' name: wireshark state: present sudo_password: "{{ ansible_become_pass }}" -''' +""" import os import re @@ -190,6 +188,7 @@ class HomebrewCask(object): / # slash (for taps) \- # dashes @ # at symbol + \+ # plus symbol ''' INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS) @@ -534,7 +533,12 @@ class HomebrewCask(object): rc, out, err = self.module.run_command(cmd) if rc == 0: - if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): + # 'brew upgrade --cask' does not output anything if no casks are upgraded + if not out.strip(): + self.message = 'Homebrew casks already upgraded.' + + # handle legacy 'brew cask upgrade' + elif re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): self.message = 'Homebrew casks already upgraded.' else: diff --git a/plugins/modules/homebrew_services.py b/plugins/modules/homebrew_services.py new file mode 100644 index 0000000000..5d84563d33 --- /dev/null +++ b/plugins/modules/homebrew_services.py @@ -0,0 +1,255 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Andrew Dunham +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2015, Indrajit Raychaudhuri +# Copyright (c) 2024, Kit Ham +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +module: homebrew_services +author: + - "Kit Ham (@kitizz)" +requirements: + - homebrew must already be installed on the target system +short_description: Services manager for Homebrew +version_added: 9.3.0 +description: + - Manages daemons and services using Homebrew. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - An installed homebrew package whose service is to be updated. + aliases: ['formula'] + type: str + required: true + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. Since a package (I(formula) in homebrew parlance) + location is prefixed relative to the actual path of C(brew) command, providing an alternative C(brew) path enables + managing different set of packages in an alternative location in the system. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + state: + description: + - State of the package's service. + choices: ['present', 'absent', 'restarted'] + default: present + type: str +""" + +EXAMPLES = r""" +- name: Install foo package + community.general.homebrew: + name: foo + state: present + +- name: Start the foo service (equivalent to `brew services start foo`) + community.general.homebrew_services: + name: foo + state: present + +- name: Restart the foo service (equivalent to `brew services restart foo`) + community.general.homebrew_services: + name: foo + state: restarted + +- name: Remove the foo service (equivalent to `brew services stop foo`) + community.general.homebrew_services: + name: foo + service_state: absent +""" + +RETURN = r""" +pid: + description: + - If the service is now running, this is the PID of the service, otherwise -1. + returned: success + type: int + sample: 1234 +running: + description: + - Whether the service is running after running this command. + returned: success + type: bool + sample: true +""" + +import json +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.homebrew import ( + HomebrewValidate, + parse_brew_path, +) + +if sys.version_info < (3, 5): + from collections import namedtuple + + # Stores validated arguments for an instance of an action. + # See DOCUMENTATION string for argument-specific information. + HomebrewServiceArgs = namedtuple( + "HomebrewServiceArgs", ["name", "state", "brew_path"] + ) + + # Stores the state of a Homebrew service. + HomebrewServiceState = namedtuple("HomebrewServiceState", ["running", "pid"]) + +else: + from typing import NamedTuple, Optional + + # Stores validated arguments for an instance of an action. + # See DOCUMENTATION string for argument-specific information. + HomebrewServiceArgs = NamedTuple( + "HomebrewServiceArgs", [("name", str), ("state", str), ("brew_path", str)] + ) + + # Stores the state of a Homebrew service. + HomebrewServiceState = NamedTuple( + "HomebrewServiceState", [("running", bool), ("pid", Optional[int])] + ) + + +def _brew_service_state(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> HomebrewServiceState + cmd = [args.brew_path, "services", "info", args.name, "--json"] + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + + try: + data = json.loads(stdout)[0] + except json.JSONDecodeError: + module.fail_json(msg="Failed to parse JSON output:\n{0}".format(stdout)) + + return HomebrewServiceState(running=data["status"] == "started", pid=data["pid"]) + + +def _exit_with_state(args, module, changed=False, message=None): + # type: (HomebrewServiceArgs, AnsibleModule, bool, Optional[str]) -> None + state = _brew_service_state(args, module) + if message is None: + message = ( + "Running: {state.running}, Changed: {changed}, PID: {state.pid}".format( + state=state, changed=changed + ) + ) + module.exit_json(msg=message, pid=state.pid, running=state.running, changed=changed) + + +def validate_and_load_arguments(module): + # type: (AnsibleModule) -> HomebrewServiceArgs + """Reuse the Homebrew module's validation logic to validate these arguments.""" + package = module.params["name"] # type: ignore + if not HomebrewValidate.valid_package(package): + module.fail_json(msg="Invalid package name: {0}".format(package)) + + state = module.params["state"] # type: ignore + if state not in ["present", "absent", "restarted"]: + module.fail_json(msg="Invalid state: {0}".format(state)) + + brew_path = parse_brew_path(module) + + return HomebrewServiceArgs(name=package, state=state, brew_path=brew_path) + + +def start_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Start the requested brew service if it is not already running.""" + state = _brew_service_state(args, module) + if state.running: + # Nothing to do, return early. + _exit_with_state(args, module, changed=False, message="Service already running") + + if module.check_mode: + _exit_with_state(args, module, changed=True, message="Service would be started") + + start_cmd = [args.brew_path, "services", "start", args.name] + rc, stdout, stderr = module.run_command(start_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def stop_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Stop the requested brew service if it is running.""" + state = _brew_service_state(args, module) + if not state.running: + # Nothing to do, return early. + _exit_with_state(args, module, changed=False, message="Service already stopped") + + if module.check_mode: + _exit_with_state(args, module, changed=True, message="Service would be stopped") + + stop_cmd = [args.brew_path, "services", "stop", args.name] + rc, stdout, stderr = module.run_command(stop_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def restart_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Restart the requested brew service. This always results in a change.""" + if module.check_mode: + _exit_with_state( + args, module, changed=True, message="Service would be restarted" + ) + + restart_cmd = [args.brew_path, "services", "restart", args.name] + rc, stdout, stderr = module.run_command(restart_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict( + aliases=["formula"], + required=True, + type="str", + ), + state=dict( + choices=["present", "absent", "restarted"], + default="present", + ), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", + type="path", + ), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict( + LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" + ) + + # Pre-validate arguments. + service_args = validate_and_load_arguments(module) + + # Choose logic based on the desired state. + if service_args.state == "present": + start_service(service_args, module) + elif service_args.state == "absent": + stop_service(service_args, module) + elif service_args.state == "restarted": + restart_service(service_args, module) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/homebrew_tap.py b/plugins/modules/homebrew_tap.py index 151d09d328..f070ccccc7 100644 --- a/plugins/modules/homebrew_tap.py +++ b/plugins/modules/homebrew_tap.py @@ -13,56 +13,53 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: homebrew_tap author: - - "Indrajit Raychaudhuri (@indrajitr)" - - "Daniel Jaouen (@danieljaouen)" + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" short_description: Tap a Homebrew repository description: - - Tap external Homebrew repositories. + - Tap external Homebrew repositories. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - The GitHub user/organization repository to tap. - required: true - aliases: ['tap'] - type: list - elements: str - url: - description: - - The optional git URL of the repository to tap. The URL is not - assumed to be on GitHub, and the protocol doesn't have to be HTTP. - Any location and protocol that git can handle is fine. - - O(name) option may not be a list of multiple taps (but a single - tap instead) when this option is provided. - required: false - type: str - state: - description: - - state of the repository. - choices: [ 'present', 'absent' ] - required: false - default: 'present' - type: str - path: - description: - - "A V(:) separated list of paths to search for C(brew) executable." - default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' - type: path - version_added: '2.1.0' -requirements: [ homebrew ] -''' + name: + description: + - The GitHub user/organization repository to tap. + required: true + aliases: ['tap'] + type: list + elements: str + url: + description: + - The optional git URL of the repository to tap. The URL is not assumed to be on GitHub, and the protocol does not have + to be HTTP. Any location and protocol that git can handle is fine. + - O(name) option may not be a list of multiple taps (but a single tap instead) when this option is provided. + required: false + type: str + state: + description: + - State of the repository. + choices: ['present', 'absent'] + required: false + default: 'present' + type: str + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + version_added: '2.1.0' +requirements: [homebrew] +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Tap a Homebrew repository, state present community.general.homebrew_tap: name: homebrew/dupes @@ -81,7 +78,7 @@ EXAMPLES = r''' community.general.homebrew_tap: name: telemachus/brew url: 'https://bitbucket.org/telemachus/brew' -''' +""" import re diff --git a/plugins/modules/homectl.py b/plugins/modules/homectl.py index ca4c19a875..f0b64be149 100644 --- a/plugins/modules/homectl.py +++ b/plugins/modules/homectl.py @@ -8,180 +8,185 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: homectl author: - - "James Livulpi (@jameslivulpi)" + - "James Livulpi (@jameslivulpi)" short_description: Manage user accounts with systemd-homed version_added: 4.4.0 description: - - Manages a user's home directory managed by systemd-homed. + - Manages a user's home directory managed by systemd-homed. +notes: + - This module requires the deprecated L(crypt Python module, https://docs.python.org/3.12/library/crypt.html) library which + was removed from Python 3.13. For Python 3.13 or newer, you need to install L(legacycrypt, https://pypi.org/project/legacycrypt/). +requirements: + - legacycrypt (on Python 3.13 or newer) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - The user name to create, remove, or update. - required: true - aliases: [ 'user', 'username' ] - type: str - password: - description: - - Set the user's password to this. - - Homed requires this value to be in cleartext on user creation and updating a user. - - The module takes the password and generates a password hash in SHA-512 with 10000 rounds of salt generation using crypt. - - See U(https://systemd.io/USER_RECORD/). - - This is required for O(state=present). When an existing user is updated this is checked against the stored hash in homed. - type: str - state: - description: - - The operation to take on the user. - choices: [ 'absent', 'present' ] - default: present - type: str - storage: - description: - - Indicates the storage mechanism for the user's home directory. - - If the storage type is not specified, ``homed.conf(5)`` defines which default storage to use. - - Only used when a user is first created. - choices: [ 'classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs' ] - type: str - disksize: - description: - - The intended home directory disk space. - - Human readable value such as V(10G), V(10M), or V(10B). - type: str - resize: - description: - - When used with O(disksize) this will attempt to resize the home directory immediately. - default: false - type: bool - realname: - description: - - The user's real ('human') name. - - This can also be used to add a comment to maintain compatibility with C(useradd). - aliases: [ 'comment' ] - type: str - realm: - description: - - The 'realm' a user is defined in. - type: str - email: - description: - - The email address of the user. - type: str - location: - description: - - A free-form location string describing the location of the user. - type: str - iconname: - description: - - The name of an icon picked by the user, for example for the purpose of an avatar. - - Should follow the semantics defined in the Icon Naming Specification. - - See U(https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html) for specifics. - type: str - homedir: - description: - - Path to use as home directory for the user. - - This is the directory the user's home directory is mounted to while the user is logged in. - - This is not where the user's data is actually stored, see O(imagepath) for that. - - Only used when a user is first created. - type: path - imagepath: - description: - - Path to place the user's home directory. - - See U(https://www.freedesktop.org/software/systemd/man/homectl.html#--image-path=PATH) for more information. - - Only used when a user is first created. - type: path - uid: - description: - - Sets the UID of the user. - - If using O(gid) homed requires the value to be the same. - - Only used when a user is first created. - type: int - gid: - description: - - Sets the gid of the user. - - If using O(uid) homed requires the value to be the same. - - Only used when a user is first created. - type: int - mountopts: - description: - - String separated by comma each indicating mount options for a users home directory. - - Valid options are V(nosuid), V(nodev) or V(noexec). - - Homed by default uses V(nodev) and V(nosuid) while V(noexec) is off. - type: str - umask: - description: - - Sets the umask for the user's login sessions - - Value from V(0000) to V(0777). - type: int - memberof: - description: - - String separated by comma each indicating a UNIX group this user shall be a member of. - - Groups the user should be a member of should be supplied as comma separated list. - aliases: [ 'groups' ] - type: str - skeleton: - description: - - The absolute path to the skeleton directory to populate a new home directory from. - - This is only used when a home directory is first created. - - If not specified homed by default uses V(/etc/skel). - aliases: [ 'skel' ] - type: path - shell: - description: - - Shell binary to use for terminal logins of given user. - - If not specified homed by default uses V(/bin/bash). - type: str - environment: - description: - - String separated by comma each containing an environment variable and its value to - set for the user's login session, in a format compatible with ``putenv()``. - - Any environment variable listed here is automatically set by pam_systemd for all - login sessions of the user. - aliases: [ 'setenv' ] - type: str - timezone: - description: - - Preferred timezone to use for the user. - - Should be a tzdata compatible location string such as V(America/New_York). - type: str - locked: - description: - - Whether the user account should be locked or not. - type: bool - language: - description: - - The preferred language/locale for the user. - - This should be in a format compatible with the E(LANG) environment variable. - type: str - passwordhint: - description: - - Password hint for the given user. - type: str - sshkeys: - description: - - String separated by comma each listing a SSH public key that is authorized to access the account. - - The keys should follow the same format as the lines in a traditional C(~/.ssh/authorized_key) file. - type: str - notbefore: - description: - - A time since the UNIX epoch before which the record should be considered invalid for the purpose of logging in. - type: int - notafter: - description: - - A time since the UNIX epoch after which the record should be considered invalid for the purpose of logging in. - type: int -''' + name: + description: + - The user name to create, remove, or update. + required: true + aliases: ['user', 'username'] + type: str + password: + description: + - Set the user's password to this. + - Homed requires this value to be in cleartext on user creation and updating a user. + - The module takes the password and generates a password hash in SHA-512 with 10000 rounds of salt generation using + crypt. + - See U(https://systemd.io/USER_RECORD/). + - This is required for O(state=present). When an existing user is updated this is checked against the stored hash in + homed. + type: str + state: + description: + - The operation to take on the user. + choices: ['absent', 'present'] + default: present + type: str + storage: + description: + - Indicates the storage mechanism for the user's home directory. + - If the storage type is not specified, C(homed.conf(5\)) defines which default storage to use. + - Only used when a user is first created. + choices: ['classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs'] + type: str + disksize: + description: + - The intended home directory disk space. + - Human readable value such as V(10G), V(10M), or V(10B). + type: str + resize: + description: + - When used with O(disksize) this will attempt to resize the home directory immediately. + default: false + type: bool + realname: + description: + - The user's real ('human') name. + - This can also be used to add a comment to maintain compatibility with C(useradd). + aliases: ['comment'] + type: str + realm: + description: + - The 'realm' a user is defined in. + type: str + email: + description: + - The email address of the user. + type: str + location: + description: + - A free-form location string describing the location of the user. + type: str + iconname: + description: + - The name of an icon picked by the user, for example for the purpose of an avatar. + - Should follow the semantics defined in the Icon Naming Specification. + - See U(https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html) for specifics. + type: str + homedir: + description: + - Path to use as home directory for the user. + - This is the directory the user's home directory is mounted to while the user is logged in. + - This is not where the user's data is actually stored, see O(imagepath) for that. + - Only used when a user is first created. + type: path + imagepath: + description: + - Path to place the user's home directory. + - See U(https://www.freedesktop.org/software/systemd/man/homectl.html#--image-path=PATH) for more information. + - Only used when a user is first created. + type: path + uid: + description: + - Sets the UID of the user. + - If using O(gid) homed requires the value to be the same. + - Only used when a user is first created. + type: int + gid: + description: + - Sets the gid of the user. + - If using O(uid) homed requires the value to be the same. + - Only used when a user is first created. + type: int + mountopts: + description: + - String separated by comma each indicating mount options for a users home directory. + - Valid options are V(nosuid), V(nodev) or V(noexec). + - Homed by default uses V(nodev) and V(nosuid) while V(noexec) is off. + type: str + umask: + description: + - Sets the umask for the user's login sessions. + - Value from V(0000) to V(0777). + type: int + memberof: + description: + - String separated by comma each indicating a UNIX group this user shall be a member of. + - Groups the user should be a member of should be supplied as comma separated list. + aliases: ['groups'] + type: str + skeleton: + description: + - The absolute path to the skeleton directory to populate a new home directory from. + - This is only used when a home directory is first created. + - If not specified homed by default uses V(/etc/skel). + aliases: ['skel'] + type: path + shell: + description: + - Shell binary to use for terminal logins of given user. + - If not specified homed by default uses V(/bin/bash). + type: str + environment: + description: + - String separated by comma each containing an environment variable and its value to set for the user's login session, + in a format compatible with C(putenv(\)). + - Any environment variable listed here is automatically set by pam_systemd for all login sessions of the user. + aliases: ['setenv'] + type: str + timezone: + description: + - Preferred timezone to use for the user. + - Should be a tzdata compatible location string such as V(America/New_York). + type: str + locked: + description: + - Whether the user account should be locked or not. + type: bool + language: + description: + - The preferred language/locale for the user. + - This should be in a format compatible with the E(LANG) environment variable. + type: str + passwordhint: + description: + - Password hint for the given user. + type: str + sshkeys: + description: + - String separated by comma each listing a SSH public key that is authorized to access the account. + - The keys should follow the same format as the lines in a traditional C(~/.ssh/authorized_key) file. + type: str + notbefore: + description: + - A time since the UNIX epoch before which the record should be considered invalid for the purpose of logging in. + type: int + notafter: + description: + - A time since the UNIX epoch after which the record should be considered invalid for the purpose of logging in. + type: int +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add the user 'james' community.general.homectl: name: johnd @@ -209,9 +214,9 @@ EXAMPLES = ''' community.general.homectl: name: janet state: absent -''' +""" -RETURN = ''' +RETURN = r""" data: description: A json dictionary returned from C(homectl inspect -j). returned: success @@ -261,14 +266,34 @@ data: "userName": "james", } } -''' +""" -import crypt import json -from ansible.module_utils.basic import AnsibleModule +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.basic import jsonify from ansible.module_utils.common.text.formatters import human_to_bytes +try: + import crypt +except ImportError: + HAS_CRYPT = False + CRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_CRYPT = True + CRYPT_IMPORT_ERROR = None + +try: + import legacycrypt + if not HAS_CRYPT: + crypt = legacycrypt +except ImportError: + HAS_LEGACYCRYPT = False + LEGACYCRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_LEGACYCRYPT = True + LEGACYCRYPT_IMPORT_ERROR = None + class Homectl(object): '''#TODO DOC STRINGS''' @@ -591,6 +616,12 @@ def main(): ] ) + if not HAS_CRYPT and not HAS_LEGACYCRYPT: + module.fail_json( + msg=missing_required_lib('crypt (part of standard library up to Python 3.12) or legacycrypt (PyPI)'), + exception=CRYPT_IMPORT_ERROR, + ) + homectl = Homectl(module) homectl.result['state'] = homectl.state diff --git a/plugins/modules/honeybadger_deployment.py b/plugins/modules/honeybadger_deployment.py index cf52745ac7..c653643e33 100644 --- a/plugins/modules/honeybadger_deployment.py +++ b/plugins/modules/honeybadger_deployment.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: honeybadger_deployment author: "Benjamin Curtis (@stympy)" short_description: Notify Honeybadger.io about app deployments @@ -31,20 +30,20 @@ options: environment: type: str description: - - The environment name, typically 'production', 'staging', etc. + - The environment name, typically V(production), V(staging), and so on. required: true user: type: str description: - - The username of the person doing the deployment + - The username of the person doing the deployment. repo: type: str description: - - URL of the project repository + - URL of the project repository. revision: type: str description: - - A hash, number, tag, or other identifier showing what revision was deployed + - A hash, number, tag, or other identifier showing what revision was deployed. url: type: str description: @@ -52,14 +51,13 @@ options: default: "https://api.honeybadger.io/v1/deploys" validate_certs: description: - - If V(false), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates for the target url will not be validated. This should only be used on personally controlled + sites using self-signed certificates. type: bool default: true +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Notify Honeybadger.io about an app deployment community.general.honeybadger_deployment: token: AAAAAA @@ -67,9 +65,9 @@ EXAMPLES = ''' user: ansible revision: b6826b8 repo: 'git@github.com:user/repo.git' -''' +""" -RETURN = '''# ''' +RETURN = """# """ import traceback diff --git a/plugins/modules/hpilo_boot.py b/plugins/modules/hpilo_boot.py index ace79a493a..60f3ecc958 100644 --- a/plugins/modules/hpilo_boot.py +++ b/plugins/modules/hpilo_boot.py @@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hpilo_boot author: Dag Wieers (@dagwieers) short_description: Boot system using specific media through HP iLO interface description: - - "This module boots a system through its HP iLO interface. The boot media - can be one of: cdrom, floppy, hdd, network or usb." + - 'This module boots a system through its HP iLO interface. The boot media can be one of: V(cdrom), V(floppy), V(hdd), + V(network), or V(usb).' - This module requires the hpilo python module. extends_documentation_fragment: - community.general.attributes @@ -43,33 +42,32 @@ options: type: str media: description: - - The boot media to boot the system from - choices: [ "cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb" ] + - The boot media to boot the system from. + choices: ["cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb"] type: str image: description: - - The URL of a cdrom, floppy or usb boot media image. - protocol://username:password@hostname:port/filename - - protocol is either 'http' or 'https' - - username:password is optional - - port is optional + - The URL of a cdrom, floppy or usb boot media image in the form V(protocol://username:password@hostname:port/filename). + - V(protocol) is either V(http) or V(https). + - V(username:password) is optional. + - V(port) is optional. type: str state: description: - The state of the boot media. - - "no_boot: Do not boot from the device" - - "boot_once: Boot from the device once and then notthereafter" - - "boot_always: Boot from the device each time the server is rebooted" - - "connect: Connect the virtual media device and set to boot_always" - - "disconnect: Disconnects the virtual media device and set to no_boot" - - "poweroff: Power off the server" + - "V(no_boot): Do not boot from the device" + - "V(boot_once): Boot from the device once and then notthereafter" + - "V(boot_always): Boot from the device each time the server is rebooted" + - "V(connect): Connect the virtual media device and set to boot_always" + - "V(disconnect): Disconnects the virtual media device and set to no_boot" + - "V(poweroff): Power off the server" default: boot_once type: str - choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ] + choices: ["boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff"] force: description: - - Whether to force a reboot (even when the system is already booted). - - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running. + - Whether to force a reboot (even when the system is already booted). + - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running. default: false type: bool ssl_version: @@ -77,16 +75,16 @@ options: - Change the ssl_version used. default: TLSv1 type: str - choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] + choices: ["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"] requirements: -- python-hpilo + - python-hpilo notes: -- To use a USB key image you need to specify floppy as boot media. -- This module ought to be run from a system that can access the HP iLO - interface directly, either by using C(local_action) or using C(delegate_to). -''' + - To use a USB key image you need to specify floppy as boot media. + - This module ought to be run from a system that can access the HP iLO interface directly, either by using C(local_action) + or using C(delegate_to). +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server community.general.hpilo_boot: host: YOUR_ILO_ADDRESS @@ -104,11 +102,11 @@ EXAMPLES = r''' password: YOUR_ILO_PASSWORD state: poweroff delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" # Default return values -''' +""" import time import traceback diff --git a/plugins/modules/hpilo_info.py b/plugins/modules/hpilo_info.py index d329764b4c..70eecb8b0e 100644 --- a/plugins/modules/hpilo_info.py +++ b/plugins/modules/hpilo_info.py @@ -9,23 +9,21 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: hpilo_info author: Dag Wieers (@dagwieers) short_description: Gather information through an HP iLO interface description: -- This module gathers information on a specific system using its HP iLO interface. - These information includes hardware and network related information useful - for provisioning (e.g. macaddress, uuid). -- This module requires the C(hpilo) python module. + - This module gathers information on a specific system using its HP iLO interface. These information includes hardware and + network related information useful for provisioning (for example macaddress, uuid). + - This module requires the C(hpilo) python module. extends_documentation_fragment: -- community.general.attributes -- community.general.attributes.info_module + - community.general.attributes + - community.general.attributes.info_module options: host: description: - - The HP iLO hostname/address that is linked to the physical system. + - The HP iLO hostname/address that is linked to the physical system. type: str required: true login: @@ -43,15 +41,15 @@ options: - Change the ssl_version used. default: TLSv1 type: str - choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] + choices: ["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"] requirements: -- hpilo + - hpilo notes: -- This module ought to be run from a system that can access the HP iLO - interface directly, either by using C(local_action) or using C(delegate_to). -''' + - This module ought to be run from a system that can access the HP iLO interface directly, either by using C(local_action) + or using C(delegate_to). +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts from a HP iLO interface only if the system is an HP server community.general.hpilo_info: host: YOUR_ILO_ADDRESS @@ -64,71 +62,71 @@ EXAMPLES = r''' - ansible.builtin.fail: msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !' when: cmdb_serialno != results.hw_system_serial -''' +""" -RETURN = r''' +RETURN = r""" # Typical output of HP iLO_info for a physical system hw_bios_date: - description: BIOS date - returned: always - type: str - sample: 05/05/2011 + description: BIOS date. + returned: always + type: str + sample: 05/05/2011 hw_bios_version: - description: BIOS version - returned: always - type: str - sample: P68 + description: BIOS version. + returned: always + type: str + sample: P68 hw_ethX: - description: Interface information (for each interface) - returned: always - type: dict - sample: - - macaddress: 00:11:22:33:44:55 - macaddress_dash: 00-11-22-33-44-55 + description: Interface information (for each interface). + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:55 + macaddress_dash: 00-11-22-33-44-55 hw_eth_ilo: - description: Interface information (for the iLO network interface) - returned: always - type: dict - sample: - - macaddress: 00:11:22:33:44:BA - - macaddress_dash: 00-11-22-33-44-BA + description: Interface information (for the iLO network interface). + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:BA + - macaddress_dash: 00-11-22-33-44-BA hw_product_name: - description: Product name - returned: always - type: str - sample: ProLiant DL360 G7 + description: Product name. + returned: always + type: str + sample: ProLiant DL360 G7 hw_product_uuid: - description: Product UUID - returned: always - type: str - sample: ef50bac8-2845-40ff-81d9-675315501dac + description: Product UUID. + returned: always + type: str + sample: ef50bac8-2845-40ff-81d9-675315501dac hw_system_serial: - description: System serial number - returned: always - type: str - sample: ABC12345D6 + description: System serial number. + returned: always + type: str + sample: ABC12345D6 hw_uuid: - description: Hardware UUID - returned: always - type: str - sample: 123456ABC78901D2 + description: Hardware UUID. + returned: always + type: str + sample: 123456ABC78901D2 host_power_status: - description: - - Power status of host. - - Will be one of V(ON), V(OFF) and V(UNKNOWN). - returned: always - type: str - sample: "ON" - version_added: 3.5.0 -''' + description: + - Power status of host. + - Will be one of V(ON), V(OFF) and V(UNKNOWN). + returned: always + type: str + sample: "ON" + version_added: 3.5.0 +""" import re import traceback diff --git a/plugins/modules/hponcfg.py b/plugins/modules/hponcfg.py index 612a20d923..654ba2c710 100644 --- a/plugins/modules/hponcfg.py +++ b/plugins/modules/hponcfg.py @@ -9,13 +9,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: hponcfg author: Dag Wieers (@dagwieers) -short_description: Configure HP iLO interface using hponcfg +short_description: Configure HP iLO interface using C(hponcfg) description: - - This modules configures the HP iLO interface using hponcfg. + - This modules configures the HP iLO interface using C(hponcfg). extends_documentation_fragment: - community.general.attributes attributes: @@ -26,32 +25,32 @@ attributes: options: path: description: - - The XML file as accepted by hponcfg. + - The XML file as accepted by C(hponcfg). required: true aliases: ['src'] type: path minfw: description: - - The minimum firmware level needed. + - The minimum firmware level needed. required: false type: str executable: description: - - Path to the hponcfg executable (C(hponcfg) which uses $PATH). + - Path to the hponcfg executable (C(hponcfg) which uses E(PATH)). default: hponcfg type: str verbose: description: - - Run hponcfg in verbose mode (-v). + - Run C(hponcfg) in verbose mode (-v). default: false type: bool requirements: - - hponcfg tool + - hponcfg tool notes: - - You need a working hponcfg on the target system. -''' + - You need a working C(hponcfg) on the target system. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Example hponcfg configuration XML ansible.builtin.copy: content: | @@ -78,7 +77,7 @@ EXAMPLES = r''' community.general.hponcfg: src: /tmp/enable-ssh.xml executable: /opt/hp/tools/hponcfg -''' +""" from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper @@ -98,6 +97,7 @@ class HPOnCfg(ModuleHelper): verbose=cmd_runner_fmt.as_bool("-v"), minfw=cmd_runner_fmt.as_opt_val("-m"), ) + use_old_vardict = False def __run__(self): runner = CmdRunner( diff --git a/plugins/modules/htpasswd.py b/plugins/modules/htpasswd.py index 9633ce2fb5..de94765130 100644 --- a/plugins/modules/htpasswd.py +++ b/plugins/modules/htpasswd.py @@ -9,7 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: htpasswd short_description: Manage user files for basic authentication description: @@ -24,13 +24,13 @@ options: path: type: path required: true - aliases: [ dest, destfile ] + aliases: [dest, destfile] description: - Path to the file that contains the usernames and passwords. name: type: str required: true - aliases: [ username ] + aliases: [username] description: - User name to add or remove. password: @@ -44,19 +44,17 @@ options: required: false default: "apr_md5_crypt" description: - - Hashing scheme to be used. As well as the four choices listed - here, you can also use any other hash supported by passlib, such as - V(portable_apache22) and V(host_apache24); or V(md5_crypt) and V(sha256_crypt), - which are Linux passwd hashes. Only some schemes in addition to - the four choices below will be compatible with Apache or Nginx, and - supported schemes depend on passlib version and its dependencies. + - Hashing scheme to be used. As well as the four choices listed here, you can also use any other hash supported by passlib, + such as V(portable_apache22) and V(host_apache24); or V(md5_crypt) and V(sha256_crypt), which are Linux passwd hashes. + Only some schemes in addition to the four choices below will be compatible with Apache or Nginx, and supported schemes + depend on passlib version and its dependencies. - See U(https://passlib.readthedocs.io/en/stable/lib/passlib.apache.html#passlib.apache.HtpasswdFile) parameter C(default_scheme). - 'Some of the available choices might be: V(apr_md5_crypt), V(des_crypt), V(ldap_sha1), V(plaintext).' aliases: [crypt_scheme] state: type: str required: false - choices: [ present, absent ] + choices: [present, absent] default: "present" description: - Whether the user entry should be present or not. @@ -65,22 +63,21 @@ options: type: bool default: true description: - - Used with O(state=present). If V(true), the file will be created - if it does not exist. Conversely, if set to V(false) and the file - does not exist it will fail. + - Used with O(state=present). If V(true), the file will be created if it does not exist. Conversely, if set to V(false) + and the file does not exist it will fail. notes: - - "This module depends on the C(passlib) Python library, which needs to be installed on all target systems." - - "On Debian < 11, Ubuntu <= 20.04, or Fedora: install C(python-passlib)." - - "On Debian, Ubuntu: install C(python3-passlib)." - - "On RHEL or CentOS: Enable EPEL, then install C(python-passlib)." -requirements: [ passlib>=1.6 ] + - This module depends on the C(passlib) Python library, which needs to be installed on all target systems. + - 'On Debian < 11, Ubuntu <= 20.04, or Fedora: install C(python-passlib).' + - 'On Debian, Ubuntu: install C(python3-passlib).' + - 'On RHEL or CentOS: Enable EPEL, then install C(python-passlib).' +requirements: [passlib>=1.6] author: "Ansible Core Team" extends_documentation_fragment: - files - community.general.attributes -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Add a user to a password file and ensure permissions are set community.general.htpasswd: path: /etc/nginx/passwdfile @@ -246,8 +243,9 @@ def main(): (msg, changed) = absent(path, username, check_mode) else: module.fail_json(msg="Invalid state: %s" % state) + return # needed to make pylint happy - check_file_attrs(module, changed, msg) + (msg, changed) = check_file_attrs(module, changed, msg) module.exit_json(msg=msg, changed=changed) except Exception as e: module.fail_json(msg=to_native(e)) diff --git a/plugins/modules/hwc_ecs_instance.py b/plugins/modules/hwc_ecs_instance.py index 9ba95dc96d..f01b7c48fd 100644 --- a/plugins/modules/hwc_ecs_instance.py +++ b/plugins/modules/hwc_ecs_instance.py @@ -12,230 +12,207 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_ecs_instance description: - - instance management. + - Instance management. short_description: Creates a resource of Ecs/Instance in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '30m' - update: - description: - - The timeouts for update operation. - type: str - default: '30m' - delete: - description: - - The timeouts for delete operation. - type: str - default: '30m' - availability_zone: - description: - - Specifies the name of the AZ where the ECS is located. - type: str - required: true - flavor_name: - description: - - Specifies the name of the system flavor. - type: str - required: true - image_id: - description: - - Specifies the ID of the system image. - type: str - required: true - name: - description: - - Specifies the ECS name. Value requirements consists of 1 to 64 - characters, including letters, digits, underscores (V(_)), hyphens - (V(-)), periods (V(.)). - type: str - required: true - nics: - description: - - Specifies the NIC information of the ECS. Constraints the - network of the NIC must belong to the VPC specified by vpc_id. A - maximum of 12 NICs can be attached to an ECS. - type: list - elements: dict - required: true - suboptions: - ip_address: - description: - - Specifies the IP address of the NIC. The value is an IPv4 - address. Its value must be an unused IP - address in the network segment of the subnet. - type: str - required: true - subnet_id: - description: - - Specifies the ID of subnet. - type: str - required: true - root_volume: - description: - - Specifies the configuration of the ECS's system disks. - type: dict - required: true - suboptions: - volume_type: - description: - - Specifies the ECS system disk type. - - SATA is common I/O disk type. - - SAS is high I/O disk type. - - SSD is ultra-high I/O disk type. - - co-p1 is high I/O (performance-optimized I) disk type. - - uh-l1 is ultra-high I/O (latency-optimized) disk type. - - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 - disks. For other ECSs, do not use co-p1 or uh-l1 disks. - type: str - required: true - size: - description: - - Specifies the system disk size, in GB. The value range is - 1 to 1024. The system disk size must be - greater than or equal to the minimum system disk size - supported by the image (min_disk attribute of the image). - If this parameter is not specified or is set to 0, the - default system disk size is the minimum value of the - system disk in the image (min_disk attribute of the - image). - type: int - required: false - snapshot_id: - description: - - Specifies the snapshot ID or ID of the original data disk - contained in the full-ECS image. - type: str - required: false - vpc_id: - description: - - Specifies the ID of the VPC to which the ECS belongs. - type: str - required: true - admin_pass: - description: - - Specifies the initial login password of the administrator account - for logging in to an ECS using password authentication. The Linux - administrator is root, and the Windows administrator is - Administrator. Password complexity requirements, consists of 8 to - 26 characters. The password must contain at least three of the - following character types 'uppercase letters, lowercase letters, - digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password - cannot contain the username or the username in reverse. The - Windows ECS password cannot contain the username, the username in - reverse, or more than two consecutive characters in the username. - type: str - required: false - data_volumes: - description: - - Specifies the data disks of ECS instance. - type: list - elements: dict - required: false - suboptions: - volume_id: - description: - - Specifies the disk ID. - type: str - required: true - device: - description: - - Specifies the disk device name. - type: str - required: false + state: description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Specifies the description of an ECS, which is a null string by - default. Can contain a maximum of 85 characters. Cannot contain - special characters, such as < and >. + - The timeouts for create operation. + type: str + default: '30m' + update: + description: + - The timeouts for update operation. + type: str + default: '30m' + delete: + description: + - The timeouts for delete operation. + type: str + default: '30m' + availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + required: true + flavor_name: + description: + - Specifies the name of the system flavor. + type: str + required: true + image_id: + description: + - Specifies the ID of the system image. + type: str + required: true + name: + description: + - Specifies the ECS name. Value requirements consists of 1 to 64 characters, including letters, digits, underscores + (V(_)), hyphens (V(-)), periods (V(.)). + type: str + required: true + nics: + description: + - Specifies the NIC information of the ECS. Constraints the network of the NIC must belong to the VPC specified by vpc_id. + A maximum of 12 NICs can be attached to an ECS. + type: list + elements: dict + required: true + suboptions: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 address. Its value must be an unused IP address in the + network segment of the subnet. + type: str + required: true + subnet_id: + description: + - Specifies the ID of subnet. + type: str + required: true + root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + required: true + suboptions: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - Co-p1 is high I/O (performance-optimized I) disk type. + - Uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + required: true + size: + description: + - Specifies the system disk size, in GB. The value range is 1 to 1024. The system disk size must be greater than + or equal to the minimum system disk size supported by the image (min_disk attribute of the image). If this parameter + is not specified or is set to 0, the default system disk size is the minimum value of the system disk in the image + (min_disk attribute of the image). + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk contained in the full-ECS image. type: str required: false - eip_id: + vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + required: true + admin_pass: + description: + - Specifies the initial login password of the administrator account for logging in to an ECS using password authentication. + The Linux administrator is root, and the Windows administrator is Administrator. Password complexity requirements, + consists of 8 to 26 characters. The password must contain at least three of the following character types 'uppercase + letters, lowercase letters, digits, and special characters (V(!@$%^-_=+[{}]:,./?))'. The password cannot contain the + username or the username in reverse. The Windows ECS password cannot contain the username, the username in reverse, + or more than two consecutive characters in the username. + type: str + required: false + data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + elements: dict + required: false + suboptions: + volume_id: description: - - Specifies the ID of the elastic IP address assigned to the ECS. - Only elastic IP addresses in the DOWN state can be - assigned. - type: str - required: false - enable_auto_recovery: - description: - - Specifies whether automatic recovery is enabled on the ECS. - type: bool - required: false - enterprise_project_id: - description: - - Specifies the ID of the enterprise project to which the ECS - belongs. - type: str - required: false - security_groups: - description: - - Specifies the security groups of the ECS. If this - parameter is left blank, the default security group is bound to - the ECS by default. - type: list - elements: str - required: false - server_metadata: - description: - - Specifies the metadata of ECS to be created. - type: dict - required: false - server_tags: - description: - - Specifies the tags of an ECS. When you create ECSs, one ECS - supports up to 10 tags. - type: dict - required: false - ssh_key_name: - description: - - Specifies the name of the SSH key used for logging in to the ECS. - type: str - required: false - user_data: - description: - - Specifies the user data to be injected during the ECS creation - process. Text, text files, and gzip files can be injected. - The content to be injected must be encoded with - base64. The maximum size of the content to be injected (before - encoding) is 32 KB. For Linux ECSs, this parameter does not take - effect when adminPass is used. + - Specifies the disk ID. + type: str + required: true + device: + description: + - Specifies the disk device name. type: str required: false + description: + description: + - Specifies the description of an ECS, which is a null string by default. Can contain a maximum of 85 characters. Cannot + contain special characters, such as V(<) and V(>). + type: str + required: false + eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. Only elastic IP addresses in the DOWN state can be + assigned. + type: str + required: false + enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + required: false + enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS belongs. + type: str + required: false + security_groups: + description: + - Specifies the security groups of the ECS. If this parameter is left blank, the default security group is bound to + the ECS by default. + type: list + elements: str + required: false + server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + required: false + server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS supports up to 10 tags. + type: dict + required: false + ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + required: false + user_data: + description: + - Specifies the user data to be injected during the ECS creation process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with base64. The maximum size of the content to be injected (before encoding) + is 32 KB. For Linux ECSs, this parameter does not take effect when adminPass is used. + type: str + required: false extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create an ecs instance - name: Create a vpc hwc_network_vpc: @@ -285,238 +262,216 @@ EXAMPLES = ''' vpc_id: "{{ vpc.id }}" root_volume: volume_type: "SAS" -''' +""" -RETURN = ''' - availability_zone: - description: - - Specifies the name of the AZ where the ECS is located. - type: str - returned: success - flavor_name: - description: - - Specifies the name of the system flavor. - type: str - returned: success - image_id: - description: - - Specifies the ID of the system image. - type: str - returned: success - name: - description: - - Specifies the ECS name. Value requirements "Consists of 1 to 64 - characters, including letters, digits, underscores (V(_)), hyphens - (V(-)), periods (V(.)).". - type: str - returned: success - nics: - description: - - Specifies the NIC information of the ECS. The - network of the NIC must belong to the VPC specified by vpc_id. A - maximum of 12 NICs can be attached to an ECS. - type: list - returned: success - contains: - ip_address: - description: - - Specifies the IP address of the NIC. The value is an IPv4 - address. Its value must be an unused IP - address in the network segment of the subnet. - type: str - returned: success - subnet_id: - description: - - Specifies the ID of subnet. - type: str - returned: success - port_id: - description: - - Specifies the port ID corresponding to the IP address. - type: str - returned: success - root_volume: - description: - - Specifies the configuration of the ECS's system disks. - type: dict - returned: success - contains: - volume_type: - description: - - Specifies the ECS system disk type. - - SATA is common I/O disk type. - - SAS is high I/O disk type. - - SSD is ultra-high I/O disk type. - - co-p1 is high I/O (performance-optimized I) disk type. - - uh-l1 is ultra-high I/O (latency-optimized) disk type. - - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 - disks. For other ECSs, do not use co-p1 or uh-l1 disks. - type: str - returned: success - size: - description: - - Specifies the system disk size, in GB. The value range is - 1 to 1024. The system disk size must be - greater than or equal to the minimum system disk size - supported by the image (min_disk attribute of the image). - If this parameter is not specified or is set to 0, the - default system disk size is the minimum value of the - system disk in the image (min_disk attribute of the - image). - type: int - returned: success - snapshot_id: - description: - - Specifies the snapshot ID or ID of the original data disk - contained in the full-ECS image. - type: str - returned: success - device: - description: - - Specifies the disk device name. - type: str - returned: success - volume_id: - description: - - Specifies the disk ID. - type: str - returned: success - vpc_id: - description: - - Specifies the ID of the VPC to which the ECS belongs. - type: str - returned: success - admin_pass: - description: - - Specifies the initial login password of the administrator account - for logging in to an ECS using password authentication. The Linux - administrator is root, and the Windows administrator is - Administrator. Password complexity requirements consists of 8 to - 26 characters. The password must contain at least three of the - following character types "uppercase letters, lowercase letters, - digits, and special characters (!@$%^-_=+[{}]:,./?)". The password - cannot contain the username or the username in reverse. The - Windows ECS password cannot contain the username, the username in - reverse, or more than two consecutive characters in the username. - type: str - returned: success - data_volumes: - description: - - Specifies the data disks of ECS instance. - type: list - returned: success - contains: - volume_id: - description: - - Specifies the disk ID. - type: str - returned: success - device: - description: - - Specifies the disk device name. - type: str - returned: success - description: - description: - - Specifies the description of an ECS, which is a null string by - default. Can contain a maximum of 85 characters. Cannot contain - special characters, such as < and >. - type: str - returned: success - eip_id: - description: - - Specifies the ID of the elastic IP address assigned to the ECS. - Only elastic IP addresses in the DOWN state can be assigned. - type: str - returned: success - enable_auto_recovery: - description: - - Specifies whether automatic recovery is enabled on the ECS. - type: bool - returned: success - enterprise_project_id: - description: - - Specifies the ID of the enterprise project to which the ECS - belongs. - type: str - returned: success - security_groups: - description: - - Specifies the security groups of the ECS. If this parameter is left - blank, the default security group is bound to the ECS by default. - type: list - returned: success - server_metadata: - description: - - Specifies the metadata of ECS to be created. - type: dict - returned: success - server_tags: - description: - - Specifies the tags of an ECS. When you create ECSs, one ECS - supports up to 10 tags. - type: dict - returned: success - ssh_key_name: - description: - - Specifies the name of the SSH key used for logging in to the ECS. - type: str - returned: success - user_data: - description: - - Specifies the user data to be injected during the ECS creation - process. Text, text files, and gzip files can be injected. - The content to be injected must be encoded with base64. The maximum - size of the content to be injected (before encoding) is 32 KB. For - Linux ECSs, this parameter does not take effect when adminPass is - used. - type: str - returned: success - config_drive: - description: - - Specifies the configuration driver. - type: str - returned: success - created: - description: - - Specifies the time when an ECS was created. - type: str - returned: success - disk_config_type: - description: - - Specifies the disk configuration type. MANUAL is The image - space is not expanded. AUTO is the image space of the system disk - will be expanded to be as same as the flavor. - type: str - returned: success - host_name: - description: - - Specifies the host name of the ECS. - type: str - returned: success - image_name: - description: - - Specifies the image name of the ECS. - type: str - returned: success - power_state: - description: - - Specifies the power status of the ECS. - type: int - returned: success - server_alias: - description: - - Specifies the ECS alias. - type: str - returned: success - status: - description: - - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT, - REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR, - and DELETED. - type: str - returned: success -''' +RETURN = r""" +availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + returned: success +flavor_name: + description: + - Specifies the name of the system flavor. + type: str + returned: success +image_id: + description: + - Specifies the ID of the system image. + type: str + returned: success +name: + description: + - Specifies the ECS name. Value requirements "Consists of 1 to 64 characters, including letters, digits, underscores (V(_)), + hyphens (V(-)), periods (V(.)).". + type: str + returned: success +nics: + description: + - Specifies the NIC information of the ECS. The network of the NIC must belong to the VPC specified by vpc_id. A maximum + of 12 NICs can be attached to an ECS. + type: list + returned: success + contains: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 address. Its value must be an unused IP address in the + network segment of the subnet. + type: str + returned: success + subnet_id: + description: + - Specifies the ID of subnet. + type: str + returned: success + port_id: + description: + - Specifies the port ID corresponding to the IP address. + type: str + returned: success +root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + returned: success + contains: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - Co-p1 is high I/O (performance-optimized I) disk type. + - Uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + returned: success + size: + description: + - Specifies the system disk size, in GB. The value range is 1 to 1024. The system disk size must be greater than or + equal to the minimum system disk size supported by the image (min_disk attribute of the image). If this parameter + is not specified or is set to 0, the default system disk size is the minimum value of the system disk in the image + (min_disk attribute of the image). + type: int + returned: success + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk contained in the full-ECS image. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success +vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + returned: success +admin_pass: + description: + - Specifies the initial login password of the administrator account for logging in to an ECS using password authentication. + The Linux administrator is root, and the Windows administrator is Administrator. Password complexity requirements consists + of 8 to 26 characters. The password must contain at least three of the following character types "uppercase letters, + lowercase letters, digits, and special characters (!@$%^-_=+[{}]:,./?)". The password cannot contain the username or + the username in reverse. The Windows ECS password cannot contain the username, the username in reverse, or more than + two consecutive characters in the username. + type: str + returned: success +data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + returned: success + contains: + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success +description: + description: + - Specifies the description of an ECS, which is a null string by default. Can contain a maximum of 85 characters. Cannot + contain special characters, such as < and >. + type: str + returned: success +eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. Only elastic IP addresses in the DOWN state can be assigned. + type: str + returned: success +enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + returned: success +enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS belongs. + type: str + returned: success +security_groups: + description: + - Specifies the security groups of the ECS. If this parameter is left blank, the default security group is bound to the + ECS by default. + type: list + returned: success +server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + returned: success +server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS supports up to 10 tags. + type: dict + returned: success +ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + returned: success +user_data: + description: + - Specifies the user data to be injected during the ECS creation process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with base64. The maximum size of the content to be injected (before encoding) + is 32 KB. For Linux ECSs, this parameter does not take effect when adminPass is used. + type: str + returned: success +config_drive: + description: + - Specifies the configuration driver. + type: str + returned: success +created: + description: + - Specifies the time when an ECS was created. + type: str + returned: success +disk_config_type: + description: + - Specifies the disk configuration type. MANUAL is The image space is not expanded. AUTO is the image space of the system + disk will be expanded to be as same as the flavor. + type: str + returned: success +host_name: + description: + - Specifies the host name of the ECS. + type: str + returned: success +image_name: + description: + - Specifies the image name of the ECS. + type: str + returned: success +power_state: + description: + - Specifies the power status of the ECS. + type: int + returned: success +server_alias: + description: + - Specifies the ECS alias. + type: str + returned: success +status: + description: + - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT, REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, + ERROR, and DELETED. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, @@ -1163,8 +1118,7 @@ def send_delete_volume_request(module, params, client, info): path_parameters = { "volume_id": ["volume_id"], } - data = dict((key, navigate_value(info, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(info, path) for key, path in path_parameters.items()} url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data) diff --git a/plugins/modules/hwc_evs_disk.py b/plugins/modules/hwc_evs_disk.py index 7d445ddd21..0763c07b01 100644 --- a/plugins/modules/hwc_evs_disk.py +++ b/plugins/modules/hwc_evs_disk.py @@ -12,155 +12,135 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_evs_disk description: - - block storage management. + - Block storage management. short_description: Creates a resource of Evs/Disk in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huaweicloud Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '30m' - update: - description: - - The timeouts for update operation. - type: str - default: '30m' - delete: - description: - - The timeouts for delete operation. - type: str - default: '30m' - availability_zone: - description: - - Specifies the AZ where you want to create the disk. - type: str - required: true - name: - description: - - Specifies the disk name. The value can contain a maximum of 255 - bytes. - type: str - required: true - volume_type: - description: - - Specifies the disk type. Currently, the value can be SSD, SAS, or - SATA. - - SSD specifies the ultra-high I/O disk type. - - SAS specifies the high I/O disk type. - - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the - disk will fail to create. If the EVS disk is created from a - snapshot, the volume_type field must be the same as that of the - snapshot's source disk. - type: str - required: true - backup_id: - description: - - Specifies the ID of the backup that can be used to create a disk. - This parameter is mandatory when you use a backup to create the - disk. - type: str - required: false + state: description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Specifies the disk description. The value can contain a maximum - of 255 bytes. + - The timeouts for create operation. type: str - required: false - enable_full_clone: + default: '30m' + update: description: - - If the disk is created from a snapshot and linked cloning needs - to be used, set this parameter to True. - type: bool - required: false - enable_scsi: - description: - - If this parameter is set to True, the disk device type will be - SCSI, which allows ECS OSs to directly access underlying storage - media. SCSI reservation command is supported. If this parameter - is set to False, the disk device type will be VBD, which supports - only simple SCSI read/write commands. - - If parameter enable_share is set to True and this parameter - is not specified, shared SCSI disks are created. SCSI EVS disks - cannot be created from backups, which means that this parameter - cannot be True if backup_id has been specified. - type: bool - required: false - enable_share: - description: - - Specifies whether the disk is shareable. The default value is - False. - type: bool - required: false - encryption_id: - description: - - Specifies the encryption ID. The length of it fixes at 36 bytes. + - The timeouts for update operation. type: str - required: false - enterprise_project_id: + default: '30m' + delete: description: - - Specifies the enterprise project ID. This ID is associated with - the disk during the disk creation. If it is not specified, the - disk is bound to the default enterprise project. + - The timeouts for delete operation. type: str - required: false - image_id: - description: - - Specifies the image ID. If this parameter is specified, the disk - is created from an image. BMS system disks cannot be - created from BMS images. - type: str - required: false - size: - description: - - Specifies the disk size, in GB. Its values are as follows, System - disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This - parameter is mandatory when you create an empty disk or use an - image or a snapshot to create a disk. If you use an image or a - snapshot to create a disk, the disk size must be greater than or - equal to the image or snapshot size. This parameter is optional - when you use a backup to create a disk. If this parameter is not - specified, the disk size is equal to the backup size. - type: int - required: false - snapshot_id: - description: - - Specifies the snapshot ID. If this parameter is specified, the - disk is created from a snapshot. - type: str - required: false + default: '30m' + availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + required: true + name: + description: + - Specifies the disk name. The value can contain a maximum of 255 bytes. + type: str + required: true + volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the disk will fail to create. If the EVS disk is created from + a snapshot, the volume_type field must be the same as that of the snapshot's source disk. + type: str + required: true + backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. This parameter is mandatory when you use a backup + to create the disk. + type: str + required: false + description: + description: + - Specifies the disk description. The value can contain a maximum of 255 bytes. + type: str + required: false + enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs to be used, set this parameter to True. + type: bool + required: false + enable_scsi: + description: + - If this parameter is set to True, the disk device type will be SCSI, which allows ECS OSs to directly access underlying + storage media. SCSI reservation command is supported. If this parameter is set to False, the disk device type will + be VBD, which supports only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI + EVS disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified. + type: bool + required: false + enable_share: + description: + - Specifies whether the disk is shareable. The default value is False. + type: bool + required: false + encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + required: false + enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with the disk during the disk creation. If it is not specified, + the disk is bound to the default enterprise project. + type: str + required: false + image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk is created from an image. BMS system disks cannot + be created from BMS images. + type: str + required: false + size: + description: + - Specifies the disk size, in GB. Its values are as follows, System disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. + This parameter is mandatory when you create an empty disk or use an image or a snapshot to create a disk. If you use + an image or a snapshot to create a disk, the disk size must be greater than or equal to the image or snapshot size. + This parameter is optional when you use a backup to create a disk. If this parameter is not specified, the disk size + is equal to the backup size. + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the disk is created from a snapshot. + type: str + required: false extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # test create disk - name: Create a disk community.general.hwc_evs_disk: @@ -168,176 +148,153 @@ EXAMPLES = ''' name: "ansible_evs_disk_test" volume_type: "SATA" size: 10 -''' +""" -RETURN = ''' - availability_zone: - description: - - Specifies the AZ where you want to create the disk. - type: str - returned: success - name: - description: - - Specifies the disk name. The value can contain a maximum of 255 - bytes. - type: str - returned: success - volume_type: - description: - - Specifies the disk type. Currently, the value can be SSD, SAS, or - SATA. - - SSD specifies the ultra-high I/O disk type. - - SAS specifies the high I/O disk type. - - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the - disk will fail to create. If the EVS disk is created from a - snapshot, the volume_type field must be the same as that of the - snapshot's source disk. - type: str - returned: success - backup_id: - description: - - Specifies the ID of the backup that can be used to create a disk. - This parameter is mandatory when you use a backup to create the - disk. - type: str - returned: success - description: - description: - - Specifies the disk description. The value can contain a maximum - of 255 bytes. - type: str - returned: success - enable_full_clone: - description: - - If the disk is created from a snapshot and linked cloning needs - to be used, set this parameter to True. - type: bool - returned: success - enable_scsi: - description: - - If this parameter is set to True, the disk device type will be - SCSI, which allows ECS OSs to directly access underlying storage - media. SCSI reservation command is supported. If this parameter - is set to False, the disk device type will be VBD, which supports - only simple SCSI read/write commands. - - If parameter enable_share is set to True and this parameter - is not specified, shared SCSI disks are created. SCSI EVS disks - cannot be created from backups, which means that this parameter - cannot be True if backup_id has been specified. - type: bool - returned: success - enable_share: - description: - - Specifies whether the disk is shareable. The default value is - False. - type: bool - returned: success - encryption_id: - description: - - Specifies the encryption ID. The length of it fixes at 36 bytes. - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. This ID is associated with - the disk during the disk creation. If it is not specified, the - disk is bound to the default enterprise project. - type: str - returned: success - image_id: - description: - - Specifies the image ID. If this parameter is specified, the disk - is created from an image. BMS system disks cannot be - created from BMS images. - type: str - returned: success - size: - description: - - Specifies the disk size, in GB. Its values are as follows, System - disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This - parameter is mandatory when you create an empty disk or use an - image or a snapshot to create a disk. If you use an image or a - snapshot to create a disk, the disk size must be greater than or - equal to the image or snapshot size. This parameter is optional - when you use a backup to create a disk. If this parameter is not - specified, the disk size is equal to the backup size. - type: int - returned: success - snapshot_id: - description: - - Specifies the snapshot ID. If this parameter is specified, the - disk is created from a snapshot. - type: str - returned: success - attachments: - description: - - Specifies the disk attachment information. - type: complex - returned: success - contains: - attached_at: - description: - - Specifies the time when the disk was attached. Time - format is 'UTC YYYY-MM-DDTHH:MM:SS'. - type: str - returned: success - attachment_id: - description: - - Specifies the ID of the attachment information. - type: str - returned: success - device: - description: - - Specifies the device name. - type: str - returned: success - server_id: - description: - - Specifies the ID of the server to which the disk is - attached. - type: str - returned: success - backup_policy_id: - description: - - Specifies the backup policy ID. - type: str - returned: success - created_at: - description: - - Specifies the time when the disk was created. Time format is 'UTC - YYYY-MM-DDTHH:MM:SS'. - type: str - returned: success - is_bootable: - description: - - Specifies whether the disk is bootable. - type: bool - returned: success - is_readonly: - description: - - Specifies whether the disk is read-only or read/write. True - indicates that the disk is read-only. False indicates that the - disk is read/write. - type: bool - returned: success - source_volume_id: - description: - - Specifies the source disk ID. This parameter has a value if the - disk is created from a source disk. - type: str - returned: success - status: - description: - - Specifies the disk status. - type: str - returned: success - tags: - description: - - Specifies the disk tags. - type: dict - returned: success -''' +RETURN = r""" +availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + returned: success +name: + description: + - Specifies the disk name. The value can contain a maximum of 255 bytes. + type: str + returned: success +volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the disk will fail to create. If the EVS disk is created from + a snapshot, the volume_type field must be the same as that of the snapshot's source disk. + type: str + returned: success +backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. This parameter is mandatory when you use a backup + to create the disk. + type: str + returned: success +description: + description: + - Specifies the disk description. The value can contain a maximum of 255 bytes. + type: str + returned: success +enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs to be used, set this parameter to True. + type: bool + returned: success +enable_scsi: + description: + - If this parameter is set to True, the disk device type will be SCSI, which allows ECS OSs to directly access underlying + storage media. SCSI reservation command is supported. If this parameter is set to False, the disk device type will be + VBD, which supports only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI EVS + disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified. + type: bool + returned: success +enable_share: + description: + - Specifies whether the disk is shareable. The default value is False. + type: bool + returned: success +encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with the disk during the disk creation. If it is not specified, + the disk is bound to the default enterprise project. + type: str + returned: success +image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk is created from an image. BMS system disks cannot be + created from BMS images. + type: str + returned: success +size: + description: + - Specifies the disk size, in GB. Its values are as follows, System disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. + This parameter is mandatory when you create an empty disk or use an image or a snapshot to create a disk. If you use + an image or a snapshot to create a disk, the disk size must be greater than or equal to the image or snapshot size. + This parameter is optional when you use a backup to create a disk. If this parameter is not specified, the disk size + is equal to the backup size. + type: int + returned: success +snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the disk is created from a snapshot. + type: str + returned: success +attachments: + description: + - Specifies the disk attachment information. + type: complex + returned: success + contains: + attached_at: + description: + - Specifies the time when the disk was attached. Time format is 'UTC YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success + attachment_id: + description: + - Specifies the ID of the attachment information. + type: str + returned: success + device: + description: + - Specifies the device name. + type: str + returned: success + server_id: + description: + - Specifies the ID of the server to which the disk is attached. + type: str + returned: success +backup_policy_id: + description: + - Specifies the backup policy ID. + type: str + returned: success +created_at: + description: + - Specifies the time when the disk was created. Time format is 'UTC YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success +is_bootable: + description: + - Specifies whether the disk is bootable. + type: bool + returned: success +is_readonly: + description: + - Specifies whether the disk is read-only or read/write. True indicates that the disk is read-only. False indicates that + the disk is read/write. + type: bool + returned: success +source_volume_id: + description: + - Specifies the source disk ID. This parameter has a value if the disk is created from a source disk. + type: str + returned: success +status: + description: + - Specifies the disk status. + type: str + returned: success +tags: + description: + - Specifies the disk tags. + type: dict + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, @@ -771,8 +728,7 @@ def async_wait(config, result, client, timeout): path_parameters = { "job_id": ["job_id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "jobs/{job_id}", data) diff --git a/plugins/modules/hwc_network_vpc.py b/plugins/modules/hwc_network_vpc.py index 357fd55204..5c0c2c8b61 100644 --- a/plugins/modules/hwc_network_vpc.py +++ b/plugins/modules/hwc_network_vpc.py @@ -12,123 +12,120 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_network_vpc description: - - Represents an vpc resource. + - Represents an vpc resource. short_description: Creates a Huawei Cloud VPC author: Huawei Inc. (@huaweicloud) requirements: - - requests >= 2.18.4 - - keystoneauth1 >= 3.6.0 + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in VPC. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in vpc. + - The timeout for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '15m' + update: description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeout for create operation. - type: str - default: '15m' - update: - description: - - The timeout for update operation. - type: str - default: '15m' - delete: - description: - - The timeout for delete operation. - type: str - default: '15m' - name: - description: - - The name of vpc. + - The timeout for update operation. type: str - required: true - cidr: + default: '15m' + delete: description: - - The range of available subnets in the vpc. + - The timeout for delete operation. type: str - required: true + default: '15m' + name: + description: + - The name of vpc. + type: str + required: true + cidr: + description: + - The range of available subnets in the VPC. + type: str + required: true extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a vpc community.general.hwc_network_vpc: - identity_endpoint: "{{ identity_endpoint }}" - user: "{{ user }}" - password: "{{ password }}" - domain: "{{ domain }}" - project: "{{ project }}" - region: "{{ region }}" - name: "vpc_1" - cidr: "192.168.100.0/24" - state: present -''' + identity_endpoint: "{{ identity_endpoint }}" + user: "{{ user }}" + password: "{{ password }}" + domain: "{{ domain }}" + project: "{{ project }}" + region: "{{ region }}" + name: "vpc_1" + cidr: "192.168.100.0/24" + state: present +""" -RETURN = ''' - id: - description: - - the id of vpc. - type: str - returned: success - name: - description: - - the name of vpc. - type: str - returned: success - cidr: - description: - - the range of available subnets in the vpc. - type: str - returned: success - status: - description: - - the status of vpc. - type: str - returned: success - routes: - description: - - the route information. - type: complex - returned: success - contains: - destination: - description: - - the destination network segment of a route. - type: str - returned: success - next_hop: - description: - - the next hop of a route. If the route type is peering, - it will provide VPC peering connection ID. - type: str - returned: success - enable_shared_snat: - description: - - show whether the shared snat is enabled. - type: bool - returned: success -''' +RETURN = r""" +id: + description: + - The id of vpc. + type: str + returned: success +name: + description: + - The name of vpc. + type: str + returned: success +cidr: + description: + - The range of available subnets in the vpc. + type: str + returned: success +status: + description: + - The status of vpc. + type: str + returned: success +routes: + description: + - The route information. + type: complex + returned: success + contains: + destination: + description: + - The destination network segment of a route. + type: str + returned: success + next_hop: + description: + - The next hop of a route. If the route type is peering, it will provide VPC peering connection ID. + type: str + returned: success +enable_shared_snat: + description: + - Show whether the shared snat is enabled. + type: bool + returned: success +""" ############################################################################### # Imports diff --git a/plugins/modules/hwc_smn_topic.py b/plugins/modules/hwc_smn_topic.py index bb983fba71..45923833e6 100644 --- a/plugins/modules/hwc_smn_topic.py +++ b/plugins/modules/hwc_smn_topic.py @@ -12,101 +12,92 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_smn_topic description: - - Represents a SMN notification topic resource. -short_description: Creates a resource of SMNTopic in Huaweicloud Cloud + - Represents a SMN notification topic resource. +short_description: Creates a resource of SMNTopic in Huawei Cloud author: Huawei Inc. (@huaweicloud) requirements: - - requests >= 2.18.4 - - keystoneauth1 >= 3.6.0 + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huaweicloud Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - display_name: - description: - - Topic display name, which is presented as the name of the email - sender in an email message. The topic display name contains a - maximum of 192 bytes. - type: str - required: false - name: - description: - - Name of the topic to be created. The topic name is a string of 1 - to 256 characters. It must contain upper- or lower-case letters, - digits, hyphens (V(-)), and underscores (V(_)), and must start with a - letter or digit. - type: str - required: true + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + display_name: + description: + - Topic display name, which is presented as the name of the email sender in an email message. The topic display name + contains a maximum of 192 bytes. + type: str + required: false + name: + description: + - Name of the topic to be created. The topic name is a string of 1 to 256 characters. It must contain upper- or lower-case + letters, digits, hyphens (V(-)), and underscores (V(_)), and must start with a letter or digit. + type: str + required: true extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a smn topic community.general.hwc_smn_topic: - identity_endpoint: "{{ identity_endpoint }}" - user_name: "{{ user_name }}" - password: "{{ password }}" - domain_name: "{{ domain_name }}" - project_name: "{{ project_name }}" - region: "{{ region }}" - name: "ansible_smn_topic_test" - state: present -''' + identity_endpoint: "{{ identity_endpoint }}" + user_name: "{{ user_name }}" + password: "{{ password }}" + domain_name: "{{ domain_name }}" + project_name: "{{ project_name }}" + region: "{{ region }}" + name: "ansible_smn_topic_test" + state: present +""" -RETURN = ''' +RETURN = r""" create_time: - description: - - Time when the topic was created. - returned: success - type: str + description: + - Time when the topic was created. + returned: success + type: str display_name: - description: - - Topic display name, which is presented as the name of the email - sender in an email message. The topic display name contains a - maximum of 192 bytes. - returned: success - type: str + description: + - Topic display name, which is presented as the name of the email sender in an email message. The topic display name contains + a maximum of 192 bytes. + returned: success + type: str name: - description: - - Name of the topic to be created. The topic name is a string of 1 - to 256 characters. It must contain upper- or lower-case letters, - digits, hyphens (V(-)), and underscores (V(_)), and must start with a - letter or digit. - returned: success - type: str + description: + - Name of the topic to be created. The topic name is a string of 1 to 256 characters. It must contain upper- or lower-case + letters, digits, hyphens (V(-)), and underscores (V(_)), and must start with a letter or digit. + returned: success + type: str push_policy: - description: - - Message pushing policy. 0 indicates that the message sending - fails and the message is cached in the queue. 1 indicates that - the failed message is discarded. - returned: success - type: int + description: + - Message pushing policy. V(0) indicates that the message sending fails and the message is cached in the queue. V(1) indicates + that the failed message is discarded. + returned: success + type: int topic_urn: - description: - - Resource identifier of a topic, which is unique. - returned: success - type: str + description: + - Resource identifier of a topic, which is unique. + returned: success + type: str update_time: - description: - - Time when the topic was updated. - returned: success - type: str -''' + description: + - Time when the topic was updated. + returned: success + type: str +""" ############################################################################### # Imports diff --git a/plugins/modules/hwc_vpc_eip.py b/plugins/modules/hwc_vpc_eip.py index 5c44319409..b818fe0d86 100644 --- a/plugins/modules/hwc_vpc_eip.py +++ b/plugins/modules/hwc_vpc_eip.py @@ -12,126 +12,110 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_eip description: - - elastic ip management. -short_description: Creates a resource of Vpc/EIP in Huawei Cloud + - Elastic IP management. +short_description: Creates a resource of VPC/EIP in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in Huawei Cloud. + - The timeouts for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '5m' + update: description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '5m' - update: - description: - - The timeouts for update operation. - type: str - default: '5m' - type: + - The timeouts for update operation. + type: str + default: '5m' + type: + description: + - Specifies the EIP type. + type: str + required: true + dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + required: false + suboptions: + charge_mode: description: - - Specifies the EIP type. + - Specifies whether the bandwidth is billed by traffic or by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character string, default value bandwidth is used. For IPv6 addresses, + the default parameter value is bandwidth outside China and is traffic in China. type: str required: true - dedicated_bandwidth: + name: description: - - Specifies the dedicated bandwidth object. - type: dict - required: false - suboptions: - charge_mode: - description: - - Specifies whether the bandwidth is billed by traffic or - by bandwidth size. The value can be bandwidth or traffic. - If this parameter is left blank or is null character - string, default value bandwidth is used. For IPv6 - addresses, the default parameter value is bandwidth - outside China and is traffic in China. - type: str - required: true - name: - description: - - Specifies the bandwidth name. The value is a string of 1 - to 64 characters that can contain letters, digits, - underscores (V(_)), hyphens (V(-)), and periods (V(.)). - type: str - required: true - size: - description: - - Specifies the bandwidth size. The value ranges from 1 - Mbit/s to 2000 Mbit/s by default. (The specific range may - vary depending on the configuration in each region. You - can see the bandwidth range of each region on the - management console.) The minimum unit for bandwidth - adjustment varies depending on the bandwidth range. The - details are as follows. - - The minimum unit is 1 Mbit/s if the allowed bandwidth - size ranges from 0 to 300 Mbit/s (with 300 Mbit/s - included). - - The minimum unit is 50 Mbit/s if the allowed bandwidth - size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s - included). - - The minimum unit is 500 Mbit/s if the allowed bandwidth - size is greater than 1000 Mbit/s. - type: int - required: true - enterprise_project_id: - description: - - Specifies the enterprise project ID. + - Specifies the bandwidth name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). type: str - required: false - ip_version: + required: true + size: description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this - parameter is left blank, an IPv4 address will be assigned. + - Specifies the bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You can see the bandwidth range of each region on the management + console.) The minimum unit for bandwidth adjustment varies depending on the bandwidth range. The details are as + follows. + - The minimum unit is 1 Mbit/s if the allowed bandwidth size ranges from 0 to 300 Mbit/s (with 300 Mbit/s included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s + included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth size is greater than 1000 Mbit/s. type: int - required: false - ipv4_address: - description: - - Specifies the obtained IPv4 EIP. The system automatically assigns - an EIP if you do not specify it. - type: str - required: false - port_id: - description: - - Specifies the port ID. This parameter is returned only when a - private IP address is bound with the EIP. - type: str - required: false - shared_bandwidth_id: - description: - - Specifies the ID of shared bandwidth. - type: str - required: false + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + required: false + ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address will be assigned. + type: int + required: false + ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns an EIP if you do not specify it. + type: str + required: false + port_id: + description: + - Specifies the port ID. This parameter is returned only when a private IP address is bound with the EIP. + type: str + required: false + shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + required: false extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create an eip and bind it to a port - name: Create vpc hwc_network_vpc: @@ -159,107 +143,91 @@ EXAMPLES = ''' name: "ansible_test_dedicated_bandwidth" size: 1 port_id: "{{ port.id }}" -''' +""" -RETURN = ''' - type: - description: - - Specifies the EIP type. - type: str - returned: success - dedicated_bandwidth: - description: - - Specifies the dedicated bandwidth object. - type: dict - returned: success - contains: - charge_mode: - description: - - Specifies whether the bandwidth is billed by traffic or - by bandwidth size. The value can be bandwidth or traffic. - If this parameter is left blank or is null character - string, default value bandwidth is used. For IPv6 - addresses, the default parameter value is bandwidth - outside China and is traffic in China. - type: str - returned: success - name: - description: - - Specifies the bandwidth name. The value is a string of 1 - to 64 characters that can contain letters, digits, - underscores (V(_)), hyphens (V(-)), and periods (V(.)). - type: str - returned: success - size: - description: - - Specifies the bandwidth size. The value ranges from 1 - Mbit/s to 2000 Mbit/s by default. (The specific range may - vary depending on the configuration in each region. You - can see the bandwidth range of each region on the - management console.) The minimum unit for bandwidth - adjustment varies depending on the bandwidth range. The - details are as follows:. - - The minimum unit is 1 Mbit/s if the allowed bandwidth - size ranges from 0 to 300 Mbit/s (with 300 Mbit/s - included). - - The minimum unit is 50 Mbit/s if the allowed bandwidth - size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s - included). - - The minimum unit is 500 Mbit/s if the allowed bandwidth - size is greater than 1000 Mbit/s. - type: int - returned: success - id: - description: - - Specifies the ID of dedicated bandwidth. - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. - type: str - returned: success - ip_version: - description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this - parameter is left blank, an IPv4 address will be assigned. - type: int - returned: success - ipv4_address: - description: - - Specifies the obtained IPv4 EIP. The system automatically assigns - an EIP if you do not specify it. - type: str - returned: success - port_id: - description: - - Specifies the port ID. This parameter is returned only when a - private IP address is bound with the EIP. - type: str - returned: success - shared_bandwidth_id: - description: - - Specifies the ID of shared bandwidth. - type: str - returned: success - create_time: - description: - - Specifies the time (UTC time) when the EIP was assigned. - type: str - returned: success - ipv6_address: - description: - - Specifies the obtained IPv6 EIP. - type: str - returned: success - private_ip_address: - description: - - Specifies the private IP address bound with the EIP. This - parameter is returned only when a private IP address is bound - with the EIP. - type: str - returned: success -''' +RETURN = r""" +type: + description: + - Specifies the EIP type. + type: str + returned: success +dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + returned: success + contains: + charge_mode: + description: + - Specifies whether the bandwidth is billed by traffic or by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character string, default value bandwidth is used. For IPv6 addresses, + the default parameter value is bandwidth outside China and is traffic in China. + type: str + returned: success + name: + description: + - Specifies the bandwidth name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success + size: + description: + - Specifies the bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You can see the bandwidth range of each region on the management + console.) The minimum unit for bandwidth adjustment varies depending on the bandwidth range. The details are as + follows:. + - The minimum unit is 1 Mbit/s if the allowed bandwidth size ranges from 0 to 300 Mbit/s (with 300 Mbit/s included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth size is greater than 1000 Mbit/s. + type: int + returned: success + id: + description: + - Specifies the ID of dedicated bandwidth. + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + returned: success +ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address will be assigned. + type: int + returned: success +ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns an EIP if you do not specify it. + type: str + returned: success +port_id: + description: + - Specifies the port ID. This parameter is returned only when a private IP address is bound with the EIP. + type: str + returned: success +shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + returned: success +create_time: + description: + - Specifies the time (UTC time) when the EIP was assigned. + type: str + returned: success +ipv6_address: + description: + - Specifies the obtained IPv6 EIP. + type: str + returned: success +private_ip_address: + description: + - Specifies the private IP address bound with the EIP. This parameter is returned only when a private IP address is bound + with the EIP. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -547,8 +515,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "publicip_id": ["publicip", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "publicips/{publicip_id}", data) diff --git a/plugins/modules/hwc_vpc_peering_connect.py b/plugins/modules/hwc_vpc_peering_connect.py index 2d6832ce5d..478b28a2c8 100644 --- a/plugins/modules/hwc_vpc_peering_connect.py +++ b/plugins/modules/hwc_vpc_peering_connect.py @@ -13,79 +13,75 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_peering_connect description: - - vpc peering management. -short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud + - VPC peering management. +short_description: Creates a resource of VPC/PeeringConnect in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - local_vpc_id: - description: - - Specifies the ID of local VPC. - type: str - required: true - name: - description: - - Specifies the name of the VPC peering connection. The value can - contain 1 to 64 characters. - type: str - required: true - peering_vpc: - description: - - Specifies information about the peering VPC. - type: dict - required: true - suboptions: - vpc_id: - description: - - Specifies the ID of peering VPC. - type: str - required: true - project_id: - description: - - Specifies the ID of the project which the peering vpc - belongs to. - type: str - required: false + state: description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - The description of vpc peering connection. + - The timeouts for create operation. + type: str + default: '15m' + local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + required: true + name: + description: + - Specifies the name of the VPC peering connection. The value can contain 1 to 64 characters. + type: str + required: true + peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + required: true + suboptions: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + required: true + project_id: + description: + - Specifies the ID of the project which the peering vpc belongs to. type: str required: false + description: + description: + - The description of vpc peering connection. + type: str + required: false extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a peering connect - name: Create a local vpc hwc_network_vpc: @@ -103,43 +99,41 @@ EXAMPLES = ''' name: "ansible_network_peering_test" peering_vpc: vpc_id: "{{ vpc2.id }}" -''' +""" -RETURN = ''' - local_vpc_id: - description: - - Specifies the ID of local VPC. - type: str - returned: success - name: - description: - - Specifies the name of the VPC peering connection. The value can - contain 1 to 64 characters. - type: str - returned: success - peering_vpc: - description: - - Specifies information about the peering VPC. - type: dict - returned: success - contains: - vpc_id: - description: - - Specifies the ID of peering VPC. - type: str - returned: success - project_id: - description: - - Specifies the ID of the project which the peering vpc - belongs to. - type: str - returned: success - description: - description: - - The description of vpc peering connection. - type: str - returned: success -''' +RETURN = r""" +local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + returned: success +name: + description: + - Specifies the name of the VPC peering connection. The value can contain 1 to 64 characters. + type: str + returned: success +peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + returned: success + contains: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + returned: success + project_id: + description: + - Specifies the ID of the project which the peering vpc belongs to. + type: str + returned: success +description: + description: + - The description of vpc peering connection. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -407,8 +401,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "peering_id": ["peering", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data) diff --git a/plugins/modules/hwc_vpc_port.py b/plugins/modules/hwc_vpc_port.py index 2d830493d4..47f911821e 100644 --- a/plugins/modules/hwc_vpc_port.py +++ b/plugins/modules/hwc_vpc_port.py @@ -12,110 +12,105 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_port description: - - vpc port management. -short_description: Creates a resource of Vpc/Port in Huawei Cloud + - VPC port management. +short_description: Creates a resource of VPC/Port in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in Huawei Cloud. + - The timeouts for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '15m' + subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + required: true + admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + required: false + allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + required: false + type: list + elements: dict + suboptions: + ip_address: description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - subnet_id: - description: - - Specifies the ID of the subnet to which the port belongs. - type: str - required: true - admin_state_up: - description: - - Specifies the administrative state of the port. - type: bool - required: false - allowed_address_pairs: - description: - - Specifies a set of zero or more allowed address pairs. - required: false - type: list - elements: dict - suboptions: - ip_address: - description: - - Specifies the IP address. It cannot set it to 0.0.0.0. - Configure an independent security group for the port if a - large CIDR block (subnet mask less than 24) is configured - for parameter allowed_address_pairs. - type: str - required: false - mac_address: - description: - - Specifies the MAC address. - type: str - required: false - extra_dhcp_opts: - description: - - Specifies the extended option of DHCP. - type: list - elements: dict - required: false - suboptions: - name: - description: - - Specifies the option name. - type: str - required: false - value: - description: - - Specifies the option value. - type: str - required: false - ip_address: - description: - - Specifies the port IP address. + - Specifies the IP address. It cannot set it to 0.0.0.0. Configure an independent security group for the port if + a large CIDR block (subnet mask less than 24) is configured for parameter allowed_address_pairs. type: str required: false - name: + mac_address: description: - - Specifies the port name. The value can contain no more than 255 - characters. + - Specifies the MAC address. type: str required: false - security_groups: + extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + elements: dict + required: false + suboptions: + name: description: - - Specifies the ID of the security group. - type: list - elements: str + - Specifies the option name. + type: str required: false + value: + description: + - Specifies the option value. + type: str + required: false + ip_address: + description: + - Specifies the port IP address. + type: str + required: false + name: + description: + - Specifies the port name. The value can contain no more than 255 characters. + type: str + required: false + security_groups: + description: + - Specifies the ID of the security group. + type: list + elements: str + required: false extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a port - name: Create vpc hwc_network_vpc: @@ -134,76 +129,73 @@ EXAMPLES = ''' community.general.hwc_vpc_port: subnet_id: "{{ subnet.id }}" ip_address: "192.168.100.33" -''' +""" -RETURN = ''' - subnet_id: - description: - - Specifies the ID of the subnet to which the port belongs. - type: str - returned: success - admin_state_up: - description: - - Specifies the administrative state of the port. - type: bool - returned: success - allowed_address_pairs: - description: - - Specifies a set of zero or more allowed address pairs. - type: list - returned: success - contains: - ip_address: - description: - - Specifies the IP address. It cannot set it to 0.0.0.0. - Configure an independent security group for the port if a - large CIDR block (subnet mask less than 24) is configured - for parameter allowed_address_pairs. - type: str - returned: success - mac_address: - description: - - Specifies the MAC address. - type: str - returned: success - extra_dhcp_opts: - description: - - Specifies the extended option of DHCP. - type: list - returned: success - contains: - name: - description: - - Specifies the option name. - type: str - returned: success - value: - description: - - Specifies the option value. - type: str - returned: success +RETURN = r""" +subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + returned: success +admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + returned: success +allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + type: list + returned: success + contains: ip_address: - description: - - Specifies the port IP address. - type: str - returned: success - name: - description: - - Specifies the port name. The value can contain no more than 255 - characters. - type: str - returned: success - security_groups: - description: - - Specifies the ID of the security group. - type: list - returned: success + description: + - Specifies the IP address. It cannot set it to 0.0.0.0. Configure an independent security group for the port if a + large CIDR block (subnet mask less than 24) is configured for parameter allowed_address_pairs. + type: str + returned: success mac_address: - description: - - Specifies the port MAC address. - type: str - returned: success -''' + description: + - Specifies the MAC address. + type: str + returned: success +extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + returned: success + contains: + name: + description: + - Specifies the option name. + type: str + returned: success + value: + description: + - Specifies the option value. + type: str + returned: success +ip_address: + description: + - Specifies the port IP address. + type: str + returned: success +name: + description: + - Specifies the port name. The value can contain no more than 255 characters. + type: str + returned: success +security_groups: + description: + - Specifies the ID of the security group. + type: list + returned: success +mac_address: + description: + - Specifies the port MAC address. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -560,8 +552,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "port_id": ["port", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "ports/{port_id}", data) diff --git a/plugins/modules/hwc_vpc_private_ip.py b/plugins/modules/hwc_vpc_private_ip.py index 95e759f6f2..695c644cb9 100644 --- a/plugins/modules/hwc_vpc_private_ip.py +++ b/plugins/modules/hwc_vpc_private_ip.py @@ -12,54 +12,51 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_private_ip description: - - vpc private ip management. -short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud + - VPC private IP management. +short_description: Creates a resource of VPC/PrivateIP in Huawei Cloud notes: - - If O(id) option is provided, it takes precedence over O(subnet_id), O(ip_address) for private ip selection. - - O(subnet_id), O(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. + - If O(id) option is provided, it takes precedence over O(subnet_id), O(ip_address) for private IP selection. + - O(subnet_id), O(ip_address) are used for private IP selection. If more than one private IP with this options exists, execution + is aborted. + - No parameter support updating. If one of option is changed, the module will create a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - subnet_id: - description: - - Specifies the ID of the subnet from which IP addresses are - assigned. Cannot be changed after creating the private ip. - type: str - required: true - ip_address: - description: - - Specifies the target IP address. The value can be an available IP - address in the subnet. If it is not specified, the system - automatically assigns an IP address. Cannot be changed after - creating the private ip. - type: str - required: false + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are assigned. Cannot be changed after creating the private + IP. + type: str + required: true + ip_address: + description: + - Specifies the target IP address. The value can be an available IP address in the subnet. If it is not specified, the + system automatically assigns an IP address. Cannot be changed after creating the private IP. + type: str + required: false extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' -# create a private ip +EXAMPLES = r""" +# create a private IP - name: Create vpc hwc_network_vpc: cidr: "192.168.100.0/24" @@ -73,27 +70,25 @@ EXAMPLES = ''' vpc_id: "{{ vpc.id }}" cidr: "192.168.100.0/26" register: subnet -- name: Create a private ip +- name: Create a private IP community.general.hwc_vpc_private_ip: subnet_id: "{{ subnet.id }}" ip_address: "192.168.100.33" -''' +""" -RETURN = ''' - subnet_id: - description: - - Specifies the ID of the subnet from which IP addresses are - assigned. - type: str - returned: success - ip_address: - description: - - Specifies the target IP address. The value can be an available IP - address in the subnet. If it is not specified, the system - automatically assigns an IP address. - type: str - returned: success -''' +RETURN = r""" +subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are assigned. + type: str + returned: success +ip_address: + description: + - Specifies the target IP address. The value can be an available IP address in the subnet. If it is not specified, the + system automatically assigns an IP address. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/hwc_vpc_route.py b/plugins/modules/hwc_vpc_route.py index 091b49b0c8..85224fd4c8 100644 --- a/plugins/modules/hwc_vpc_route.py +++ b/plugins/modules/hwc_vpc_route.py @@ -12,60 +12,59 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_route description: - - vpc route management. -short_description: Creates a resource of Vpc/Route in Huawei Cloud + - VPC route management. +short_description: Creates a resource of VPC/Route in Huawei Cloud notes: - - If O(id) option is provided, it takes precedence over O(destination), O(vpc_id), O(type), and O(next_hop) for route selection. - - O(destination), O(vpc_id), O(type) and O(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. + - If O(id) option is provided, it takes precedence over O(destination), O(vpc_id), O(type), and O(next_hop) for route selection. + - O(destination), O(vpc_id), O(type) and O(next_hop) are used for route selection. If more than one route with this options + exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module will create a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - destination: - description: - - Specifies the destination IP address or CIDR block. - type: str - required: true - next_hop: - description: - - Specifies the next hop. The value is VPC peering connection ID. - type: str - required: true - vpc_id: - description: - - Specifies the VPC ID to which route is added. - type: str - required: true - type: - description: - - Specifies the type of route. - type: str - required: false - default: 'peering' + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + required: true + next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + required: true + vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + required: true + type: + description: + - Specifies the type of route. + type: str + required: false + default: 'peering' extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a peering connect - name: Create a local vpc hwc_network_vpc: @@ -91,35 +90,35 @@ EXAMPLES = ''' vpc_id: "{{ vpc1.id }}" destination: "192.168.0.0/16" next_hop: "{{ connect.id }}" -''' +""" -RETURN = ''' - id: - description: - - UUID of the route. - type: str - returned: success - destination: - description: - - Specifies the destination IP address or CIDR block. - type: str - returned: success - next_hop: - description: - - Specifies the next hop. The value is VPC peering connection ID. - type: str - returned: success - vpc_id: - description: - - Specifies the VPC ID to which route is added. - type: str - returned: success - type: - description: - - Specifies the type of route. - type: str - returned: success -''' +RETURN = r""" +id: + description: + - UUID of the route. + type: str + returned: success +destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + returned: success +next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + returned: success +vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + returned: success +type: + description: + - Specifies the type of route. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/hwc_vpc_security_group.py b/plugins/modules/hwc_vpc_security_group.py index aa65e801c4..9f53b49c0d 100644 --- a/plugins/modules/hwc_vpc_security_group.py +++ b/plugins/modules/hwc_vpc_security_group.py @@ -12,162 +12,141 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_security_group description: - - vpc security group management. -short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud + - VPC security group management. +short_description: Creates a resource of VPC/SecurityGroup in Huawei Cloud notes: - - If O(id) option is provided, it takes precedence over O(name), - O(enterprise_project_id), and O(vpc_id) for security group selection. - - O(name), O(enterprise_project_id) and O(vpc_id) are used for security - group selection. If more than one security group with this options exists, - execution is aborted. - - No parameter support updating. If one of option is changed, the module - will create a new resource. + - If O(id) option is provided, it takes precedence over O(name), O(enterprise_project_id), and O(vpc_id) for security group + selection. + - O(name), O(enterprise_project_id) and O(vpc_id) are used for security group selection. If more than one security group + with this options exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module will create a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - name: - description: - - Specifies the security group name. The value is a string of 1 to - 64 characters that can contain letters, digits, underscores (V(_)), - hyphens (V(-)), and periods (V(.)). - type: str - required: true - enterprise_project_id: - description: - - Specifies the enterprise project ID. When creating a security - group, associate the enterprise project ID with the security - group.s - type: str - required: false - vpc_id: - description: - - Specifies the resource ID of the VPC to which the security group - belongs. - type: str - required: false + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Specifies the security group name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security group, associate the enterprise project ID with the + security group.s. + type: str + required: false + vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group belongs. + type: str + required: false extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a security group - name: Create a security group community.general.hwc_vpc_security_group: name: "ansible_network_security_group_test" -''' +""" -RETURN = ''' - name: - description: - - Specifies the security group name. The value is a string of 1 to - 64 characters that can contain letters, digits, underscores (V(_)), - hyphens (V(-)), and periods (V(.)). - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. When creating a security - group, associate the enterprise project ID with the security - group. - type: str - returned: success - vpc_id: - description: - - Specifies the resource ID of the VPC to which the security group - belongs. - type: str - returned: success - rules: - description: - - Specifies the security group rule, which ensures that resources - in the security group can communicate with one another. - type: complex - returned: success - contains: - description: - description: - - Provides supplementary information about the security - group rule. - type: str - returned: success - direction: - description: - - Specifies the direction of access control. The value can - be egress or ingress. - type: str - returned: success - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 - or IPv6. - type: str - returned: success - id: - description: - - Specifies the security group rule ID. - type: str - returned: success - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to - 65535. If the protocol is not icmp, the value cannot be - smaller than the port_range_min value. An empty value - indicates all ports. - type: int - returned: success - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 - to 65535. The value cannot be greater than the - port_range_max value. An empty value indicates all ports. - type: int - returned: success - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, - udp, or others. If the parameter is left blank, the - security group supports all protocols. - type: str - returned: success - remote_address_group_id: - description: - - Specifies the ID of remote IP address group. - type: str - returned: success - remote_group_id: - description: - - Specifies the ID of the peer security group. - type: str - returned: success - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control - direction is set to egress, the parameter specifies the - source IP address. If the access control direction is set - to ingress, the parameter specifies the destination IP - address. - type: str - returned: success -''' +RETURN = r""" +name: + description: + - Specifies the security group name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security group, associate the enterprise project ID with the security + group. + type: str + returned: success +vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group belongs. + type: str + returned: success +rules: + description: + - Specifies the security group rule, which ensures that resources in the security group can communicate with one another. + type: complex + returned: success + contains: + description: + description: + - Provides supplementary information about the security group rule. + type: str + returned: success + direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + returned: success + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. + type: str + returned: success + id: + description: + - Specifies the security group rule ID. + type: str + returned: success + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be + smaller than the port_range_min value. An empty value indicates all ports. + type: int + returned: success + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + returned: success + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, udp, or others. If the parameter is left blank, the security + group supports all protocols. + type: str + returned: success + remote_address_group_id: + description: + - Specifies the ID of remote IP address group. + type: str + returned: success + remote_group_id: + description: + - Specifies the ID of the peer security group. + type: str + returned: success + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/hwc_vpc_security_group_rule.py b/plugins/modules/hwc_vpc_security_group_rule.py index 899647e8ce..0848901cd5 100644 --- a/plugins/modules/hwc_vpc_security_group_rule.py +++ b/plugins/modules/hwc_vpc_security_group_rule.py @@ -12,105 +12,90 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_security_group_rule description: - - vpc security group management. -short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud + - VPC security group management. +short_description: Creates a resource of VPC/SecurityGroupRule in Huawei Cloud notes: - - If O(id) option is provided, it takes precedence over - O(security_group_id) for security group rule selection. - - O(security_group_id) is used for security group rule selection. If more - than one security group rule with this options exists, execution is - aborted. - - No parameter support updating. If one of option is changed, the module - will create a new resource. + - If O(id) option is provided, it takes precedence over O(security_group_id) for security group rule selection. + - O(security_group_id) is used for security group rule selection. If more than one security group rule with this options + exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module will create a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - direction: - description: - - Specifies the direction of access control. The value can be - egress or ingress. - type: str - required: true - security_group_id: - description: - - Specifies the security group rule ID, which uniquely identifies - the security group rule. - type: str - required: true + state: description: - description: - - Provides supplementary information about the security group rule. - The value is a string of no more than 255 characters that can - contain letters and digits. - type: str - required: false - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 or IPv6. - If you do not set this parameter, IPv4 is used by default. - type: str - required: false - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to 65535. - If the protocol is not icmp, the value cannot be smaller than the - port_range_min value. An empty value indicates all ports. - type: int - required: false - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 to - 65535. The value cannot be greater than the port_range_max value. - An empty value indicates all ports. - type: int - required: false - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, or udp. - If the parameter is left blank, the security group supports all - protocols. - type: str - required: false - remote_group_id: - description: - - Specifies the ID of the peer security group. The value is - exclusive with parameter remote_ip_prefix. - type: str - required: false - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control direction - is set to egress, the parameter specifies the source IP address. - If the access control direction is set to ingress, the parameter - specifies the destination IP address. The value can be in the - CIDR format or IP addresses. The parameter is exclusive with - parameter remote_group_id. - type: str - required: false + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + required: true + security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies the security group rule. + type: str + required: true + description: + description: + - Provides supplementary information about the security group rule. The value is a string of no more than 255 characters + that can contain letters and digits. + type: str + required: false + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. If you do not set this parameter, IPv4 is used by + default. + type: str + required: false + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be + smaller than the port_range_min value. An empty value indicates all ports. + type: int + required: false + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + required: false + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. If the parameter is left blank, the security group + supports all protocols. + type: str + required: false + remote_group_id: + description: + - Specifies the ID of the peer security group. The value is exclusive with parameter remote_ip_prefix. + type: str + required: false + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. + The value can be in the CIDR format or IP addresses. The parameter is exclusive with parameter remote_group_id. + type: str + required: false extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a security group rule - name: Create a security group hwc_vpc_security_group: @@ -125,72 +110,62 @@ EXAMPLES = ''' security_group_id: "{{ sg.id }}" port_range_min: 22 remote_ip_prefix: "0.0.0.0/0" -''' +""" -RETURN = ''' - direction: - description: - - Specifies the direction of access control. The value can be - egress or ingress. - type: str - returned: success - security_group_id: - description: - - Specifies the security group rule ID, which uniquely identifies - the security group rule. - type: str - returned: success - description: - description: - - Provides supplementary information about the security group rule. - The value is a string of no more than 255 characters that can - contain letters and digits. - type: str - returned: success - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 or IPv6. - If you do not set this parameter, IPv4 is used by default. - type: str - returned: success - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to 65535. - If the protocol is not icmp, the value cannot be smaller than the - port_range_min value. An empty value indicates all ports. - type: int - returned: success - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 to - 65535. The value cannot be greater than the port_range_max value. - An empty value indicates all ports. - type: int - returned: success - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, or udp. - If the parameter is left blank, the security group supports all - protocols. - type: str - returned: success - remote_group_id: - description: - - Specifies the ID of the peer security group. The value is - exclusive with parameter remote_ip_prefix. - type: str - returned: success - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control direction - is set to egress, the parameter specifies the source IP address. - If the access control direction is set to ingress, the parameter - specifies the destination IP address. The value can be in the - CIDR format or IP addresses. The parameter is exclusive with - parameter remote_group_id. - type: str - returned: success -''' +RETURN = r""" +direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + returned: success +security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies the security group rule. + type: str + returned: success +description: + description: + - Provides supplementary information about the security group rule. The value is a string of no more than 255 characters + that can contain letters and digits. + type: str + returned: success +ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. If you do not set this parameter, IPv4 is used by + default. + type: str + returned: success +port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be smaller + than the port_range_min value. An empty value indicates all ports. + type: int + returned: success +port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + returned: success +protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. If the parameter is left blank, the security group + supports all protocols. + type: str + returned: success +remote_group_id: + description: + - Specifies the ID of the peer security group. The value is exclusive with parameter remote_ip_prefix. + type: str + returned: success +remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. The + value can be in the CIDR format or IP addresses. The parameter is exclusive with parameter remote_group_id. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/hwc_vpc_subnet.py b/plugins/modules/hwc_vpc_subnet.py index 7ba7473301..84a9219370 100644 --- a/plugins/modules/hwc_vpc_subnet.py +++ b/plugins/modules/hwc_vpc_subnet.py @@ -12,99 +12,90 @@ __metaclass__ = type # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_subnet description: - - subnet management. -short_description: Creates a resource of Vpc/Subnet in Huawei Cloud + - Subnet management. +short_description: Creates a resource of VPC/Subnet in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in Huawei Cloud. + - The timeouts for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '15m' + update: description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - update: - description: - - The timeouts for update operation. - type: str - default: '15m' - cidr: - description: - - Specifies the subnet CIDR block. The value must be within the VPC - CIDR block and be in CIDR format. The subnet mask cannot be - greater than 28. Cannot be changed after creating the subnet. + - The timeouts for update operation. type: str - required: true - gateway_ip: - description: - - Specifies the gateway of the subnet. The value must be an IP - address in the subnet. Cannot be changed after creating the subnet. - type: str - required: true - name: - description: - - Specifies the subnet name. The value is a string of 1 to 64 - characters that can contain letters, digits, underscores (V(_)), - hyphens (V(-)), and periods (V(.)). - type: str - required: true - vpc_id: - description: - - Specifies the ID of the VPC to which the subnet belongs. Cannot - be changed after creating the subnet. - type: str - required: true - availability_zone: - description: - - Specifies the AZ to which the subnet belongs. Cannot be changed - after creating the subnet. - type: str - required: false - dhcp_enable: - description: - - Specifies whether DHCP is enabled for the subnet. The value can - be true (enabled) or false(disabled), and default value is true. - If this parameter is set to false, newly created ECSs cannot - obtain IP addresses, and usernames and passwords cannot be - injected using Cloud-init. - type: bool - required: false - dns_address: - description: - - Specifies the DNS server addresses for subnet. The address - in the head will be used first. - type: list - elements: str - required: false + default: '15m' + cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC CIDR block and be in CIDR format. The subnet mask + cannot be greater than 28. Cannot be changed after creating the subnet. + type: str + required: true + gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP address in the subnet. Cannot be changed after creating + the subnet. + type: str + required: true + name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + required: true + vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. Cannot be changed after creating the subnet. + type: str + required: true + availability_zone: + description: + - Specifies the AZ to which the subnet belongs. Cannot be changed after creating the subnet. + type: str + required: false + dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can be true (enabled) or false(disabled), and default + value is true. If this parameter is set to false, newly created ECSs cannot obtain IP addresses, and usernames and + passwords cannot be injected using Cloud-init. + type: bool + required: false + dns_address: + description: + - Specifies the DNS server addresses for subnet. The address in the head will be used first. + type: list + elements: str + required: false extends_documentation_fragment: - community.general.hwc - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create subnet - name: Create vpc hwc_network_vpc: @@ -118,55 +109,49 @@ EXAMPLES = ''' gateway_ip: "192.168.100.32" name: "ansible_network_subnet_test" dhcp_enable: true -''' +""" -RETURN = ''' - cidr: - description: - - Specifies the subnet CIDR block. The value must be within the VPC - CIDR block and be in CIDR format. The subnet mask cannot be - greater than 28. - type: str - returned: success - gateway_ip: - description: - - Specifies the gateway of the subnet. The value must be an IP - address in the subnet. - type: str - returned: success - name: - description: - - Specifies the subnet name. The value is a string of 1 to 64 - characters that can contain letters, digits, underscores (V(_)), - hyphens (V(-)), and periods (V(.)). - type: str - returned: success - vpc_id: - description: - - Specifies the ID of the VPC to which the subnet belongs. - type: str - returned: success - availability_zone: - description: - - Specifies the AZ to which the subnet belongs. - type: str - returned: success - dhcp_enable: - description: - - Specifies whether DHCP is enabled for the subnet. The value can - be true (enabled) or false(disabled), and default value is true. - If this parameter is set to false, newly created ECSs cannot - obtain IP addresses, and usernames and passwords cannot be - injected using Cloud-init. - type: bool - returned: success - dns_address: - description: - - Specifies the DNS server addresses for subnet. The address - in the head will be used first. - type: list - returned: success -''' +RETURN = r""" +cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC CIDR block and be in CIDR format. The subnet mask + cannot be greater than 28. + type: str + returned: success +gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP address in the subnet. + type: str + returned: success +name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success +vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. + type: str + returned: success +availability_zone: + description: + - Specifies the AZ to which the subnet belongs. + type: str + returned: success +dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can be true (enabled) or false(disabled), and default value + is true. If this parameter is set to false, newly created ECSs cannot obtain IP addresses, and usernames and passwords + cannot be injected using Cloud-init. + type: bool + returned: success +dns_address: + description: + - Specifies the DNS server addresses for subnet. The address in the head will be used first. + type: list + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -440,8 +425,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "subnet_id": ["subnet", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "subnets/{subnet_id}", data) @@ -538,8 +522,7 @@ def async_wait_update(config, result, client, timeout): path_parameters = { "subnet_id": ["subnet", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "subnets/{subnet_id}", data) diff --git a/plugins/modules/ibm_sa_domain.py b/plugins/modules/ibm_sa_domain.py index 774f29134c..00b9ee1239 100644 --- a/plugins/modules/ibm_sa_domain.py +++ b/plugins/modules/ibm_sa_domain.py @@ -10,92 +10,90 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_domain short_description: Manages domains on IBM Spectrum Accelerate Family storage systems description: - - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems." - + - This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - domain: - description: - - Name of the domain to be managed. - required: true - type: str - state: - description: - - The desired state of the domain. - default: "present" - choices: [ "present", "absent" ] - type: str - ldap_id: - description: - - ldap id to add to the domain. - required: false - type: str - size: - description: - - Size of the domain. - required: false - type: str - hard_capacity: - description: - - Hard capacity of the domain. - required: false - type: str - soft_capacity: - description: - - Soft capacity of the domain. - required: false - type: str - max_cgs: - description: - - Number of max cgs. - required: false - type: str - max_dms: - description: - - Number of max dms. - required: false - type: str - max_mirrors: - description: - - Number of max_mirrors. - required: false - type: str - max_pools: - description: - - Number of max_pools. - required: false - type: str - max_volumes: - description: - - Number of max_volumes. - required: false - type: str - perf_class: - description: - - Add the domain to a performance class. - required: false - type: str + domain: + description: + - Name of the domain to be managed. + required: true + type: str + state: + description: + - The desired state of the domain. + default: "present" + choices: ["present", "absent"] + type: str + ldap_id: + description: + - LDAP id to add to the domain. + required: false + type: str + size: + description: + - Size of the domain. + required: false + type: str + hard_capacity: + description: + - Hard capacity of the domain. + required: false + type: str + soft_capacity: + description: + - Soft capacity of the domain. + required: false + type: str + max_cgs: + description: + - Number of max cgs. + required: false + type: str + max_dms: + description: + - Number of max dms. + required: false + type: str + max_mirrors: + description: + - Number of max_mirrors. + required: false + type: str + max_pools: + description: + - Number of max_pools. + required: false + type: str + max_volumes: + description: + - Number of max_volumes. + required: false + type: str + perf_class: + description: + - Add the domain to a performance class. + required: false + type: str extends_documentation_fragment: - community.general.ibm_storage - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Define new domain. community.general.ibm_sa_domain: domain: domain_name @@ -112,14 +110,14 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' +""" +RETURN = r""" msg: - description: module return status. - returned: as needed - type: str - sample: "domain 'domain_name' created successfully." -''' + description: Module return status. + returned: as needed + type: str + sample: "domain 'domain_name' created successfully." +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/ibm_sa_host.py b/plugins/modules/ibm_sa_host.py index 614865ae01..f6613b3b29 100644 --- a/plugins/modules/ibm_sa_host.py +++ b/plugins/modules/ibm_sa_host.py @@ -10,66 +10,61 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_host short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems description: - - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems." - + - This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - host: - description: - - Host name. - required: true - type: str - state: - description: - - Host state. - default: "present" - choices: [ "present", "absent" ] - type: str - cluster: - description: - - The name of the cluster to include the host. - required: false - type: str - domain: - description: - - The domains the cluster will be attached to. - To include more than one domain, - separate domain names with commas. - To include all existing domains, use an asterisk ("*"). - required: false - type: str - iscsi_chap_name: - description: - - The host's CHAP name identifier - required: false - type: str - iscsi_chap_secret: - description: - - The password of the initiator used to - authenticate to the system when CHAP is enable - required: false - type: str + host: + description: + - Host name. + required: true + type: str + state: + description: + - Host state. + default: "present" + choices: ["present", "absent"] + type: str + cluster: + description: + - The name of the cluster to include the host. + required: false + type: str + domain: + description: + - The domains the cluster will be attached to. To include more than one domain, separate domain names with commas. To + include all existing domains, use an asterisk (V(*)). + required: false + type: str + iscsi_chap_name: + description: + - The host's CHAP name identifier. + required: false + type: str + iscsi_chap_secret: + description: + - The password of the initiator used to authenticate to the system when CHAP is enable. + required: false + type: str extends_documentation_fragment: - community.general.ibm_storage - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Define new host. community.general.ibm_sa_host: host: host_name @@ -85,9 +80,9 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/ibm_sa_host_ports.py b/plugins/modules/ibm_sa_host_ports.py index fdb27f85a2..25342eb62e 100644 --- a/plugins/modules/ibm_sa_host_ports.py +++ b/plugins/modules/ibm_sa_host_ports.py @@ -10,58 +10,55 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_host_ports short_description: Add host ports on IBM Spectrum Accelerate Family storage systems description: - - "This module adds ports to or removes them from the hosts - on IBM Spectrum Accelerate Family storage systems." - + - This module adds ports to or removes them from the hosts on IBM Spectrum Accelerate Family storage systems. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - host: - description: - - Host name. - required: true - type: str - state: - description: - - Host ports state. - default: "present" - choices: [ "present", "absent" ] - type: str - iscsi_name: - description: - - iSCSI initiator name. - required: false - type: str - fcaddress: - description: - - Fiber channel address. - required: false - type: str - num_of_visible_targets: - description: - - Number of visible targets. - required: false - type: str + host: + description: + - Host name. + required: true + type: str + state: + description: + - Host ports state. + default: "present" + choices: ["present", "absent"] + type: str + iscsi_name: + description: + - The iSCSI initiator name. + required: false + type: str + fcaddress: + description: + - Fiber channel address. + required: false + type: str + num_of_visible_targets: + description: + - Number of visible targets. + required: false + type: str extends_documentation_fragment: - community.general.ibm_storage - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add ports for host. community.general.ibm_sa_host_ports: host: test_host @@ -79,10 +76,9 @@ EXAMPLES = ''' password: secret endpoints: hostdev-system state: absent - -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl, diff --git a/plugins/modules/ibm_sa_pool.py b/plugins/modules/ibm_sa_pool.py index 88065aa4ec..38f3820435 100644 --- a/plugins/modules/ibm_sa_pool.py +++ b/plugins/modules/ibm_sa_pool.py @@ -10,62 +10,60 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_pool short_description: Handles pools on IBM Spectrum Accelerate Family storage systems description: - - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems" - + - This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - pool: - description: - - Pool name. - required: true - type: str - state: - description: - - Pool state. - default: "present" - choices: [ "present", "absent" ] - type: str - size: - description: - - Pool size in GB - required: false - type: str - snapshot_size: - description: - - Pool snapshot size in GB - required: false - type: str - domain: - description: - - Adds the pool to the specified domain. - required: false - type: str - perf_class: - description: - - Assigns a perf_class to the pool. - required: false - type: str + pool: + description: + - Pool name. + required: true + type: str + state: + description: + - Pool state. + default: "present" + choices: ["present", "absent"] + type: str + size: + description: + - Pool size in GB. + required: false + type: str + snapshot_size: + description: + - Pool snapshot size in GB. + required: false + type: str + domain: + description: + - Adds the pool to the specified domain. + required: false + type: str + perf_class: + description: + - Assigns a perf_class to the pool. + required: false + type: str extends_documentation_fragment: - community.general.ibm_storage - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create new pool. community.general.ibm_sa_pool: name: pool_name @@ -82,9 +80,9 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/ibm_sa_vol.py b/plugins/modules/ibm_sa_vol.py index bc5f81b32f..f9d0837b17 100644 --- a/plugins/modules/ibm_sa_vol.py +++ b/plugins/modules/ibm_sa_vol.py @@ -10,52 +10,50 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_vol short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems description: - - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems." - + - This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - vol: - description: - - Volume name. - required: true - type: str - pool: - description: - - Volume pool. - required: false - type: str - state: - description: - - Volume state. - default: "present" - choices: [ "present", "absent" ] - type: str - size: - description: - - Volume size. - required: false - type: str + vol: + description: + - Volume name. + required: true + type: str + pool: + description: + - Volume pool. + required: false + type: str + state: + description: + - Volume state. + default: "present" + choices: ["present", "absent"] + type: str + size: + description: + - Volume size. + required: false + type: str extends_documentation_fragment: - community.general.ibm_storage - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new volume. community.general.ibm_sa_vol: vol: volume_name @@ -73,9 +71,9 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/ibm_sa_vol_map.py b/plugins/modules/ibm_sa_vol_map.py index ea8b485ef1..7f5edf83ba 100644 --- a/plugins/modules/ibm_sa_vol_map.py +++ b/plugins/modules/ibm_sa_vol_map.py @@ -10,65 +10,61 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_vol_map short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems description: - - "This module maps volumes to or unmaps them from the hosts on - IBM Spectrum Accelerate Family storage systems." - + - This module maps volumes to or unmaps them from the hosts on IBM Spectrum Accelerate Family storage systems. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - vol: - description: - - Volume name. - required: true - type: str - state: - default: "present" - choices: [ "present", "absent" ] - description: - - When the state is present the volume is mapped. - When the state is absent, the volume is meant to be unmapped. - type: str + vol: + description: + - Volume name. + required: true + type: str + state: + default: "present" + choices: ["present", "absent"] + description: + - When the state is present the volume is mapped. When the state is absent, the volume is meant to be unmapped. + type: str - cluster: - description: - - Maps the volume to a cluster. - required: false - type: str - host: - description: - - Maps the volume to a host. - required: false - type: str - lun: - description: - - The LUN identifier. - required: false - type: str - override: - description: - - Overrides the existing volume mapping. - required: false - type: str + cluster: + description: + - Maps the volume to a cluster. + required: false + type: str + host: + description: + - Maps the volume to a host. + required: false + type: str + lun: + description: + - The LUN identifier. + required: false + type: str + override: + description: + - Overrides the existing volume mapping. + required: false + type: str extends_documentation_fragment: - community.general.ibm_storage - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Map volume to host. community.general.ibm_sa_vol_map: vol: volume_name @@ -96,9 +92,9 @@ EXAMPLES = ''' password: secret endpoints: hostdev-system state: absent -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, diff --git a/plugins/modules/icinga2_feature.py b/plugins/modules/icinga2_feature.py index 0c79f6cba9..1b39a857e4 100644 --- a/plugins/modules/icinga2_feature.py +++ b/plugins/modules/icinga2_feature.py @@ -13,39 +13,38 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: icinga2_feature short_description: Manage Icinga2 feature description: - - This module can be used to enable or disable an Icinga2 feature. + - This module can be used to enable or disable an Icinga2 feature. author: "Loic Blot (@nerzhul)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: + name: + type: str + description: - This is the feature name to enable or disable. - required: true - state: - type: str - description: + required: true + state: + type: str + description: - If set to V(present) and feature is disabled, then feature is enabled. - If set to V(present) and feature is already enabled, then nothing is changed. - If set to V(absent) and feature is enabled, then feature is disabled. - If set to V(absent) and feature is already disabled, then nothing is changed. - choices: [ "present", "absent" ] - default: present -''' + choices: ["present", "absent"] + default: present +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Enable ido-pgsql feature community.general.icinga2_feature: name: ido-pgsql @@ -55,11 +54,11 @@ EXAMPLES = ''' community.general.icinga2_feature: name: api state: absent -''' +""" -RETURN = ''' +RETURN = r""" # -''' +""" import re from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/icinga2_host.py b/plugins/modules/icinga2_host.py index ec04d8df74..271a6387bc 100644 --- a/plugins/modules/icinga2_host.py +++ b/plugins/modules/icinga2_host.py @@ -11,13 +11,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: icinga2_host short_description: Manage a host in Icinga2 description: - - "Add or remove a host to Icinga2 through the API." - - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)" + - Add or remove a host to Icinga2 through the API. + - See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/). author: "Jurgen Brand (@t794104)" attributes: check_mode: @@ -28,17 +27,16 @@ options: url: type: str description: - - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path + - HTTP, HTTPS, or FTP URL in the form V((http|https|ftp\)://[user[:pass]]@host.domain[:port]/path). use_proxy: description: - - If V(false), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. + - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts. type: bool default: true validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using + self-signed certificates. type: bool default: true url_username: @@ -49,33 +47,30 @@ options: url_password: type: str description: - - The password for use in HTTP basic authentication. - - If the O(url_username) parameter is not specified, the O(url_password) parameter will not be used. + - The password for use in HTTP basic authentication. + - If the O(url_username) parameter is not specified, the O(url_password) parameter will not be used. force_basic_auth: description: - - httplib2, the library used by the uri module only sends authentication information when a webservice - responds to an initial request with a 401 status. Since some basic auth services do not properly - send a 401, logins will fail. This option forces the sending of the Basic authentication header - upon initial request. + - Httplib2, the library used by the uri module only sends authentication information when a webservice responds to an + initial request with a 401 status. Since some basic auth services do not properly send a 401, logins will fail. This + option forces the sending of the Basic authentication header upon initial request. type: bool default: false client_cert: type: path description: - - PEM formatted certificate chain file to be used for SSL client - authentication. This file can also include the key as well, and if - the key is included, O(client_key) is not required. + - PEM formatted certificate chain file to be used for SSL client authentication. This file can also include the key + as well, and if the key is included, O(client_key) is not required. client_key: type: path description: - - PEM formatted file that contains your private key to be used for SSL - client authentication. If O(client_cert) contains both the certificate - and key, this option is not required. + - PEM formatted file that contains your private key to be used for SSL client authentication. If O(client_cert) contains + both the certificate and key, this option is not required. state: type: str description: - Apply feature state. - choices: [ "present", "absent" ] + choices: ["present", "absent"] default: present name: type: str @@ -114,9 +109,9 @@ options: extends_documentation_fragment: - ansible.builtin.url - community.general.attributes -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add host to icinga community.general.icinga2_host: url: "https://icinga2.example.com" @@ -128,18 +123,18 @@ EXAMPLES = ''' variables: foo: "bar" delegate_to: 127.0.0.1 -''' +""" -RETURN = ''' +RETURN = r""" name: - description: The name used to create, modify or delete the host - type: str - returned: always + description: The name used to create, modify or delete the host. + type: str + returned: always data: - description: The data structure used for create, modify or delete of the host - type: dict - returned: always -''' + description: The data structure used for create, modify or delete of the host. + type: dict + returned: always +""" import json @@ -282,9 +277,7 @@ def main(): 'vars.made_by': "ansible" } } - - for key, value in variables.items(): - data['attrs']['vars.' + key] = value + data['attrs'].update({'vars.' + key: value for key, value in variables.items()}) changed = False if icinga.exists(name): diff --git a/plugins/modules/idrac_redfish_command.py b/plugins/modules/idrac_redfish_command.py index d760a2c3a3..531da53162 100644 --- a/plugins/modules/idrac_redfish_command.py +++ b/plugins/modules/idrac_redfish_command.py @@ -8,13 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: idrac_redfish_command short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. - For use with Dell iDRAC operations that require Redfish OEM extensions. extends_documentation_fragment: - community.general.attributes @@ -66,34 +64,32 @@ options: version_added: '0.2.0' author: "Jose Delarosa (@jose-delarosa)" -''' +""" -EXAMPLES = ''' - - name: Create BIOS configuration job (schedule BIOS setting update) - community.general.idrac_redfish_command: - category: Systems - command: CreateBiosConfigJob - resource_id: System.Embedded.1 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' +EXAMPLES = r""" +- name: Create BIOS configuration job (schedule BIOS setting update) + community.general.idrac_redfish_command: + category: Systems + command: CreateBiosConfigJob + resource_id: System.Embedded.1 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +""" -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" return_values: - description: Dictionary containing command-specific response data from the action. - returned: on success - type: dict - version_added: 6.6.0 - sample: { - "job_id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_471269252011" - } -''' + description: Dictionary containing command-specific response data from the action. + returned: on success + type: dict + version_added: 6.6.0 + sample: {"job_id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_471269252011"} +""" import re from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/idrac_redfish_config.py b/plugins/modules/idrac_redfish_config.py index 0388bf00fb..97d7a62d04 100644 --- a/plugins/modules/idrac_redfish_config.py +++ b/plugins/modules/idrac_redfish_config.py @@ -8,14 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: idrac_redfish_config short_description: Manages servers through iDRAC using Dell Redfish APIs description: - - For use with Dell iDRAC operations that require Redfish OEM extensions - - Builds Redfish URIs locally and sends them to remote iDRAC controllers to - set or update a configuration attribute. + - For use with Dell iDRAC operations that require Redfish OEM extensions. + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to set or update a configuration attribute. extends_documentation_fragment: - community.general.attributes attributes: @@ -33,9 +31,8 @@ options: required: true description: - List of commands to execute on iDRAC. - - V(SetManagerAttributes), V(SetLifecycleControllerAttributes) and - V(SetSystemAttributes) are mutually exclusive commands when O(category) - is V(Manager). + - V(SetManagerAttributes), V(SetLifecycleControllerAttributes) and V(SetSystemAttributes) are mutually exclusive commands + when O(category) is V(Manager). type: list elements: str baseuri: @@ -76,81 +73,81 @@ options: version_added: '0.2.0' author: "Jose Delarosa (@jose-delarosa)" -''' +""" -EXAMPLES = ''' - - name: Enable NTP and set NTP server and Time zone attributes in iDRAC - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - NTPConfigGroup.1.NTPEnable: "Enabled" - NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}" - Time.1.Timezone: "{{ timezone }}" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +EXAMPLES = r""" +- name: Enable NTP and set NTP server and Time zone attributes in iDRAC + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + NTPConfigGroup.1.NTPEnable: "Enabled" + NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}" + Time.1.Timezone: "{{ timezone }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Enable Syslog and set Syslog servers in iDRAC - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - SysLog.1.SysLogEnable: "Enabled" - SysLog.1.Server1: "{{ syslog_server1 }}" - SysLog.1.Server2: "{{ syslog_server2 }}" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +- name: Enable Syslog and set Syslog servers in iDRAC + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SysLog.1.SysLogEnable: "Enabled" + SysLog.1.Server1: "{{ syslog_server1 }}" + SysLog.1.Server2: "{{ syslog_server2 }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Configure SNMP community string, port, protocol and trap format - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - SNMP.1.AgentEnable: "Enabled" - SNMP.1.AgentCommunity: "public_community_string" - SNMP.1.TrapFormat: "SNMPv1" - SNMP.1.SNMPProtocol: "All" - SNMP.1.DiscoveryPort: 161 - SNMP.1.AlertPort: 162 - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +- name: Configure SNMP community string, port, protocol and trap format + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SNMP.1.AgentEnable: "Enabled" + SNMP.1.AgentCommunity: "public_community_string" + SNMP.1.TrapFormat: "SNMPv1" + SNMP.1.SNMPProtocol: "All" + SNMP.1.DiscoveryPort: 161 + SNMP.1.AlertPort: 162 + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Enable CSIOR - community.general.idrac_redfish_config: - category: Manager - command: SetLifecycleControllerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +- name: Enable CSIOR + community.general.idrac_redfish_config: + category: Manager + command: SetLifecycleControllerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Set Power Supply Redundancy Policy to A/B Grid Redundant - community.general.idrac_redfish_config: - category: Manager - command: SetSystemAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - ServerPwr.1.PSRedPolicy: "A/B Grid Redundant" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" -''' +- name: Set Power Supply Redundancy Policy to A/B Grid Redundant + community.general.idrac_redfish_config: + category: Manager + command: SetSystemAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + ServerPwr.1.PSRedPolicy: "A/B Grid Redundant" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" +""" -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.validation import ( diff --git a/plugins/modules/idrac_redfish_info.py b/plugins/modules/idrac_redfish_info.py index 90b355d13b..3a8ea8103f 100644 --- a/plugins/modules/idrac_redfish_info.py +++ b/plugins/modules/idrac_redfish_info.py @@ -8,13 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: idrac_redfish_info short_description: Gather PowerEdge server information through iDRAC using Redfish APIs description: - - Builds Redfish URIs locally and sends them to remote iDRAC controllers to - get information back. + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to get information back. - For use with Dell EMC iDRAC operations that require Redfish OEM extensions. extends_documentation_fragment: - community.general.attributes @@ -33,8 +31,7 @@ options: required: true description: - List of commands to execute on iDRAC. - - V(GetManagerAttributes) returns the list of dicts containing iDRAC, - LifecycleController and System attributes. + - V(GetManagerAttributes) returns the list of dicts containing iDRAC, LifecycleController and System attributes. type: list elements: str baseuri: @@ -62,67 +59,69 @@ options: type: int author: "Jose Delarosa (@jose-delarosa)" -''' +""" -EXAMPLES = ''' - - name: Get Manager attributes with a default of 20 seconds - community.general.idrac_redfish_info: - category: Manager - command: GetManagerAttributes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - register: result +EXAMPLES = r""" +- name: Get Manager attributes with a default of 20 seconds + community.general.idrac_redfish_info: + category: Manager + command: GetManagerAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + register: result - # Examples to display the value of all or a single iDRAC attribute - - name: Store iDRAC attributes as a fact variable - ansible.builtin.set_fact: - idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}" +# Examples to display the value of all or a single iDRAC attribute +- name: Store iDRAC attributes as a fact variable + ansible.builtin.set_fact: + idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') + | list | first }}" - - name: Display all iDRAC attributes - ansible.builtin.debug: - var: idrac_attributes +- name: Display all iDRAC attributes + ansible.builtin.debug: + var: idrac_attributes - - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute - ansible.builtin.debug: - var: idrac_attributes['Syslog.1.SysLogEnable'] +- name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute + ansible.builtin.debug: + var: idrac_attributes['Syslog.1.SysLogEnable'] - # Examples to display the value of all or a single LifecycleController attribute - - name: Store LifecycleController attributes as a fact variable - ansible.builtin.set_fact: - lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}" +# Examples to display the value of all or a single LifecycleController attribute +- name: Store LifecycleController attributes as a fact variable + ansible.builtin.set_fact: + lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') + | list | first }}" - - name: Display LifecycleController attributes - ansible.builtin.debug: - var: lc_attributes +- name: Display LifecycleController attributes + ansible.builtin.debug: + var: lc_attributes - - name: Display the value of 'CollectSystemInventoryOnRestart' attribute - ansible.builtin.debug: - var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart'] +- name: Display the value of 'CollectSystemInventoryOnRestart' attribute + ansible.builtin.debug: + var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart'] - # Examples to display the value of all or a single System attribute - - name: Store System attributes as a fact variable - ansible.builtin.set_fact: - system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}" +# Examples to display the value of all or a single System attribute +- name: Store System attributes as a fact variable + ansible.builtin.set_fact: + system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') + | list | first }}" - - name: Display System attributes - ansible.builtin.debug: - var: system_attributes +- name: Display System attributes + ansible.builtin.debug: + var: system_attributes - - name: Display the value of 'PSRedPolicy' - ansible.builtin.debug: - var: system_attributes['ServerPwr.1.PSRedPolicy'] +- name: Display the value of 'PSRedPolicy' + ansible.builtin.debug: + var: system_attributes['ServerPwr.1.PSRedPolicy'] +""" -''' - -RETURN = ''' +RETURN = r""" msg: - description: different results depending on task - returned: always - type: dict - sample: List of Manager attributes -''' + description: Different results depending on task. + returned: always + type: dict + sample: List of Manager attributes +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils diff --git a/plugins/modules/ilo_redfish_command.py b/plugins/modules/ilo_redfish_command.py index e0e28f855d..3e698fc049 100644 --- a/plugins/modules/ilo_redfish_command.py +++ b/plugins/modules/ilo_redfish_command.py @@ -6,14 +6,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ilo_redfish_command short_description: Manages Out-Of-Band controllers using Redfish APIs version_added: 6.6.0 description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. attributes: check_mode: support: none @@ -62,35 +60,35 @@ options: type: int author: - Varni H P (@varini-hp) -''' +""" -EXAMPLES = ''' - - name: Wait for iLO Reboot Completion - community.general.ilo_redfish_command: - category: Systems - command: WaitforiLORebootCompletion - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' +EXAMPLES = r""" +- name: Wait for iLO Reboot Completion + community.general.ilo_redfish_command: + category: Systems + command: WaitforiLORebootCompletion + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +""" -RETURN = ''' +RETURN = r""" ilo_redfish_command: - description: Returns the status of the operation performed on the iLO. - type: dict - contains: - WaitforiLORebootCompletion: - description: Returns the output msg and whether the function executed successfully. - type: dict - contains: - ret: - description: Return True/False based on whether the operation was performed successfully. - type: bool - msg: - description: Status of the operation performed on the iLO. - type: str - returned: always -''' + description: Returns the status of the operation performed on the iLO. + type: dict + contains: + WaitforiLORebootCompletion: + description: Returns the output msg and whether the function executed successfully. + type: dict + contains: + ret: + description: Return V(true)/V(false) based on whether the operation was performed successfully. + type: bool + msg: + description: Status of the operation performed on the iLO. + type: str + returned: always +""" # More will be added as module features are expanded CATEGORY_COMMANDS_ALL = { diff --git a/plugins/modules/ilo_redfish_config.py b/plugins/modules/ilo_redfish_config.py index 1f021895dc..fdda339ab3 100644 --- a/plugins/modules/ilo_redfish_config.py +++ b/plugins/modules/ilo_redfish_config.py @@ -6,14 +6,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ilo_redfish_config short_description: Sets or updates configuration attributes on HPE iLO with Redfish OEM extensions version_added: 4.2.0 description: - - Builds Redfish URIs locally and sends them to iLO to - set or update a configuration attribute. + - Builds Redfish URIs locally and sends them to iLO to set or update a configuration attribute. - For use with HPE iLO operations that require Redfish OEM extensions. extends_documentation_fragment: - community.general.attributes @@ -68,48 +66,47 @@ options: - Value of the attribute to be configured. type: str author: - - "Bhavya B (@bhavya06)" -''' + - "Bhavya B (@bhavya06)" +""" -EXAMPLES = ''' - - name: Disable WINS Registration - community.general.ilo_redfish_config: - category: Manager - command: SetWINSReg - baseuri: 15.X.X.X - username: Admin - password: Testpass123 - attribute_name: WINSRegistration +EXAMPLES = r""" +- name: Disable WINS Registration + community.general.ilo_redfish_config: + category: Manager + command: SetWINSReg + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: WINSRegistration - - name: Set Time Zone - community.general.ilo_redfish_config: - category: Manager - command: SetTimeZone - baseuri: 15.X.X.X - username: Admin - password: Testpass123 - attribute_name: TimeZone - attribute_value: Chennai +- name: Set Time Zone + community.general.ilo_redfish_config: + category: Manager + command: SetTimeZone + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: TimeZone + attribute_value: Chennai - - name: Set NTP Servers - community.general.ilo_redfish_config: - category: Manager - command: SetNTPServers - baseuri: 15.X.X.X - username: Admin - password: Testpass123 - attribute_name: StaticNTPServers - attribute_value: X.X.X.X +- name: Set NTP Servers + community.general.ilo_redfish_config: + category: Manager + command: SetNTPServers + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: StaticNTPServers + attribute_value: X.X.X.X +""" -''' - -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" CATEGORY_COMMANDS_ALL = { "Manager": ["SetTimeZone", "SetDNSserver", "SetDomainName", "SetNTPServers", "SetWINSReg"] diff --git a/plugins/modules/ilo_redfish_info.py b/plugins/modules/ilo_redfish_info.py index 90cafb8ec6..3bd379e80a 100644 --- a/plugins/modules/ilo_redfish_info.py +++ b/plugins/modules/ilo_redfish_info.py @@ -6,14 +6,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ilo_redfish_info short_description: Gathers server information through iLO using Redfish APIs version_added: 4.2.0 description: - - Builds Redfish URIs locally and sends them to iLO to - get information back. + - Builds Redfish URIs locally and sends them to iLO to get information back. - For use with HPE iLO operations that require Redfish OEM extensions. extends_documentation_fragment: - community.general.attributes @@ -54,51 +52,51 @@ options: default: 10 type: int author: - - "Bhavya B (@bhavya06)" -''' + - "Bhavya B (@bhavya06)" +""" -EXAMPLES = ''' - - name: Get iLO Sessions - community.general.ilo_redfish_info: - category: Sessions - command: GetiLOSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result_sessions -''' +EXAMPLES = r""" +- name: Get iLO Sessions + community.general.ilo_redfish_info: + category: Sessions + command: GetiLOSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result_sessions +""" -RETURN = ''' +RETURN = r""" ilo_redfish_info: - description: Returns iLO sessions. - type: dict - contains: - GetiLOSessions: - description: Returns the iLO session msg and whether the function executed successfully. - type: dict - contains: - ret: - description: Check variable to see if the information was successfully retrieved. - type: bool - msg: - description: Information of all active iLO sessions. - type: list - elements: dict - contains: - Description: - description: Provides a description of the resource. - type: str - Id: - description: The sessionId. - type: str - Name: - description: The name of the resource. - type: str - UserName: - description: Name to use to log in to the management processor. - type: str - returned: always -''' + description: Returns iLO sessions. + type: dict + contains: + GetiLOSessions: + description: Returns the iLO session msg and whether the function executed successfully. + type: dict + contains: + ret: + description: Check variable to see if the information was successfully retrieved. + type: bool + msg: + description: Information of all active iLO sessions. + type: list + elements: dict + contains: + Description: + description: Provides a description of the resource. + type: str + Id: + description: The sessionId. + type: str + Name: + description: The name of the resource. + type: str + UserName: + description: Name to use to log in to the management processor. + type: str + returned: always +""" CATEGORY_COMMANDS_ALL = { "Sessions": ["GetiLOSessions"] diff --git a/plugins/modules/imc_rest.py b/plugins/modules/imc_rest.py index 7f5a5e0814..d9313b973c 100644 --- a/plugins/modules/imc_rest.py +++ b/plugins/modules/imc_rest.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: imc_rest short_description: Manage Cisco IMC hardware through its REST API description: @@ -32,75 +31,74 @@ attributes: options: hostname: description: - - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. + - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. required: true - aliases: [ host, ip ] + aliases: [host, ip] type: str username: description: - - Username used to login to the switch. + - Username used to login to the switch. default: admin - aliases: [ user ] + aliases: [user] type: str password: description: - - The password to use for authentication. + - The password to use for authentication. default: password type: str path: description: - - Name of the absolute path of the filename that includes the body - of the http request being sent to the Cisco IMC REST API. - - Parameter O(path) is mutual exclusive with parameter O(content). - aliases: [ 'src', 'config_file' ] + - Name of the absolute path of the filename that includes the body of the http request being sent to the Cisco IMC REST + API. + - Parameter O(path) is mutual exclusive with parameter O(content). + aliases: ['src', 'config_file'] type: path content: description: - - When used instead of O(path), sets the content of the API requests directly. - - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module. - - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, - the Cisco IMC output is subsequently merged. - - Parameter O(content) is mutual exclusive with parameter O(path). + - When used instead of O(path), sets the content of the API requests directly. + - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module. + - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, the Cisco IMC + output is subsequently merged. + - Parameter O(content) is mutual exclusive with parameter O(path). type: str protocol: description: - - Connection protocol to use. + - Connection protocol to use. default: https - choices: [ http, https ] + choices: [http, https] type: str timeout: description: - - The socket level timeout in seconds. - - This is the time that every single connection (every fragment) can spend. - If this O(timeout) is reached, the module will fail with a - C(Connection failure) indicating that C(The read operation timed out). + - The socket level timeout in seconds. + - This is the time that every single connection (every fragment) can spend. If this O(timeout) is reached, the module + will fail with a C(Connection failure) indicating that C(The read operation timed out). default: 60 type: int validate_certs: description: - - If V(false), SSL certificates will not be validated. - - This should only set to V(false) used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool default: true notes: -- The XML fragments don't need an authentication cookie, this is injected by the module automatically. -- The Cisco IMC XML output is being translated to JSON using the Cobra convention. -- Any configConfMo change requested has a return status of 'modified', even if there was no actual change - from the previous configuration. As a result, this module will always report a change on subsequent runs. - In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt. -- If you get a C(Connection failure) related to C(The read operation timed out) increase the O(timeout) - parameter. Some XML fragments can take longer than the default timeout. -- More information about the IMC REST API is available from - U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) -''' + - The XML fragments do not need an authentication cookie, this is injected by the module automatically. + - The Cisco IMC XML output is being translated to JSON using the Cobra convention. + - Any configConfMo change requested has a return status of C(modified), even if there was no actual change from the previous + configuration. As a result, this module will always report a change on subsequent runs. In case this behaviour is fixed + in a future update to Cisco IMC, this module will automatically adapt. + - If you get a C(Connection failure) related to C(The read operation timed out) increase the O(timeout) parameter. Some + XML fragments can take longer than the default timeout. + - More information about the IMC REST API is available from + U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html). +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Power down server community.general.imc_rest: hostname: '{{ imc_hostname }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! content: | @@ -112,7 +110,7 @@ EXAMPLES = r''' hostname: '{{ imc_hostname }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! timeout: 120 content: | @@ -137,7 +135,7 @@ EXAMPLES = r''' hostname: '{{ imc_hostname }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! content: | @@ -155,7 +153,7 @@ EXAMPLES = r''' hostname: '{{ imc_host }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! content: | @@ -167,11 +165,11 @@ EXAMPLES = r''' hostname: '{{ imc_host }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! content: | - - - + + + delegate_to: localhost - name: Disable HTTP and increase session timeout to max value 10800 secs @@ -179,22 +177,22 @@ EXAMPLES = r''' hostname: '{{ imc_host }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false # only do this when you trust the network! + validate_certs: false # only do this when you trust the network! timeout: 120 content: | - - - + + + - - - + + + delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" aaLogin: - description: Cisco IMC XML output for the login, translated to JSON using Cobra convention + description: Cisco IMC XML output for the login, translated to JSON using Cobra convention. returned: success type: dict sample: | @@ -208,27 +206,27 @@ aaLogin: "response": "yes" } configConfMo: - description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention + description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention. returned: success type: dict sample: | elapsed: - description: Elapsed time in seconds + description: Elapsed time in seconds. returned: always type: int sample: 31 response: - description: HTTP response message, including content length + description: HTTP response message, including content length. returned: always type: str sample: OK (729 bytes) status: - description: The HTTP response status code + description: The HTTP response status code. returned: always type: dict sample: 200 error: - description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention + description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention. returned: failed type: dict sample: | @@ -240,24 +238,24 @@ error: "response": "yes" } error_code: - description: Cisco IMC error code + description: Cisco IMC error code. returned: failed type: str sample: ERR-xml-parse-error error_text: - description: Cisco IMC error message + description: Cisco IMC error message. returned: failed type: str sample: | XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. input: - description: RAW XML input sent to the Cisco IMC, causing the error + description: RAW XML input sent to the Cisco IMC, causing the error. returned: failed type: str sample: | output: - description: RAW XML output received from the Cisco IMC, with error details + description: RAW XML output received from the Cisco IMC, with error details. returned: failed type: str sample: > @@ -266,7 +264,7 @@ output: errorCode="ERR-xml-parse-error" invocationResult="594" errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/> -''' +""" import os import traceback @@ -323,8 +321,7 @@ def merge(one, two): ''' Merge two complex nested datastructures into one''' if isinstance(one, dict) and isinstance(two, dict): copy = dict(one) - # copy.update({key: merge(one.get(key, None), two[key]) for key in two}) - copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two)) + copy.update({key: merge(one.get(key, None), two[key]) for key in two}) return copy elif isinstance(one, list) and isinstance(two, list): diff --git a/plugins/modules/imgadm.py b/plugins/modules/imgadm.py index a247547fc7..344bf9cc56 100644 --- a/plugins/modules/imgadm.py +++ b/plugins/modules/imgadm.py @@ -9,62 +9,60 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: imgadm short_description: Manage SmartOS images description: - - Manage SmartOS virtual machine images through imgadm(1M) + - Manage SmartOS virtual machine images through imgadm(1M). author: Jasper Lievisse Adriaanse (@jasperla) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - force: - required: false - type: bool - description: - - Force a given operation (where supported by imgadm(1M)). - pool: - required: false - default: zones - description: - - zpool to import to or delete images from. - type: str - source: - required: false - description: - - URI for the image source. - type: str - state: - required: true - choices: [ present, absent, deleted, imported, updated, vacuumed ] - description: - - State the object operated on should be in. V(imported) is an alias for - for V(present) and V(deleted) for V(absent). When set to V(vacuumed) - and O(uuid=*), it will remove all unused images. - type: str + force: + required: false + type: bool + description: + - Force a given operation (where supported by imgadm(1M)). + pool: + required: false + default: zones + description: + - The zpool to import to or delete images from. + type: str + source: + required: false + description: + - URI for the image source. + type: str + state: + required: true + choices: [present, absent, deleted, imported, updated, vacuumed] + description: + - State the object operated on should be in. V(imported) is an alias for for V(present) and V(deleted) for V(absent). + When set to V(vacuumed) and O(uuid=*), it will remove all unused images. + type: str - type: - required: false - choices: [ imgapi, docker, dsapi ] - default: imgapi - description: - - Type for image sources. - type: str + type: + required: false + choices: [imgapi, docker, dsapi] + default: imgapi + description: + - Type for image sources. + type: str - uuid: - required: false - description: - - Image UUID. Can either be a full UUID or V(*) for all images. - type: str -''' + uuid: + required: false + description: + - Image UUID. Can either be a full UUID or V(*) for all images. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Import an image community.general.imgadm: uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' @@ -100,25 +98,25 @@ EXAMPLES = ''' community.general.imgadm: source: 'https://docker.io' state: absent -''' +""" -RETURN = ''' +RETURN = r""" source: - description: Source that is managed. - returned: When not managing an image. - type: str - sample: https://datasets.project-fifo.net + description: Source that is managed. + returned: When not managing an image. + type: str + sample: https://datasets.project-fifo.net uuid: - description: UUID for an image operated on. - returned: When not managing an image source. - type: str - sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764 + description: UUID for an image operated on. + returned: When not managing an image source. + type: str + sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764 state: - description: State of the target, after execution. - returned: success - type: str - sample: 'present' -''' + description: State of the target, after execution. + returned: success + type: str + sample: 'present' +""" import re diff --git a/plugins/modules/infinity.py b/plugins/modules/infinity.py index 65aa591f4c..5a0f093247 100644 --- a/plugins/modules/infinity.py +++ b/plugins/modules/infinity.py @@ -8,7 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: infinity short_description: Manage Infinity IPAM using Rest API description: @@ -41,10 +41,10 @@ options: required: true action: description: - - Action to perform + - Action to perform. type: str required: true - choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip ] + choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip] network_id: description: - Network ID. @@ -55,11 +55,11 @@ options: type: str network_address: description: - - Network address with CIDR format (e.g., 192.168.310.0). + - Network address with CIDR format (for example V(192.168.310.0)). type: str network_size: description: - - Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26). + - Network bitmask (for example V(255.255.255.220) or CIDR format V(/26)). type: str network_name: description: @@ -72,20 +72,19 @@ options: default: -1 network_type: description: - - Network type defined by Infinity + - Network type defined by Infinity. type: str - choices: [ lan, shared_lan, supernet ] + choices: [lan, shared_lan, supernet] default: lan network_family: description: - - Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack + - Network family defined by Infinity, for example V(IPv4), V(IPv6) and V(Dual stack). type: str - choices: [ '4', '6', dual ] + choices: ['4', '6', dual] default: '4' -''' +""" -EXAMPLES = r''' ---- +EXAMPLES = r""" - hosts: localhost connection: local strategy: debug @@ -102,35 +101,35 @@ EXAMPLES = r''' network_id: 1201 network_size: /28 register: infinity -''' +""" -RETURN = r''' +RETURN = r""" network_id: - description: id for a given network - returned: success - type: str - sample: '1501' + description: Id for a given network. + returned: success + type: str + sample: '1501' ip_info: - description: when reserve next available ip address from a network, the ip address info ) is returned. - returned: success - type: str - sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}' + description: When reserve next available ip address from a network, the ip address info ) is returned. + returned: success + type: str + sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}' network_info: - description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned. - returned: success - type: str - sample: { - "network_address": "192.168.10.32/28", - "network_family": "4", - "network_id": 3102, - "network_size": null, - "description": null, - "network_location": "3085", - "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null}, - "network_type": "lan", - "network_name": "'reserve_new_ansible_network'" - } -''' + description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned. + returned: success + type: str + sample: { + "network_address": "192.168.10.32/28", + "network_family": "4", + "network_id": 3102, + "network_size": null, + "description": null, + "network_location": "3085", + "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null}, + "network_type": "lan", + "network_name": "'reserve_new_ansible_network'" + } +""" from ansible.module_utils.basic import AnsibleModule, json diff --git a/plugins/modules/influxdb_database.py b/plugins/modules/influxdb_database.py index a12326da52..e5246ebfe6 100644 --- a/plugins/modules/influxdb_database.py +++ b/plugins/modules/influxdb_database.py @@ -9,65 +9,63 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_database short_description: Manage InfluxDB databases description: - - Manage InfluxDB databases. + - Manage InfluxDB databases. author: "Kamil Szczygiel (@kamsz)" requirements: - - "influxdb >= 0.9" - - requests + - "influxdb >= 0.9" + - requests attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - database_name: - description: - - Name of the database. - required: true - type: str - state: - description: - - Determines if the database should be created or destroyed. - choices: [ absent, present ] - default: present - type: str + database_name: + description: + - Name of the database. + required: true + type: str + state: + description: + - Determines if the database should be created or destroyed. + choices: [absent, present] + default: present + type: str extends_documentation_fragment: - community.general.influxdb - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Example influxdb_database command from Ansible Playbooks - name: Create database community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" - name: Destroy database community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - state: absent + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + state: absent - name: Create database using custom credentials community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - username: "{{influxdb_username}}" - password: "{{influxdb_password}}" - database_name: "{{influxdb_database_name}}" - ssl: true - validate_certs: true -''' + hostname: "{{influxdb_ip_address}}" + username: "{{influxdb_username}}" + password: "{{influxdb_password}}" + database_name: "{{influxdb_database_name}}" + ssl: true + validate_certs: true +""" -RETURN = r''' +RETURN = r""" # only defaults -''' +""" try: import requests.exceptions diff --git a/plugins/modules/influxdb_query.py b/plugins/modules/influxdb_query.py index fda98d1843..98b8066b67 100644 --- a/plugins/modules/influxdb_query.py +++ b/plugins/modules/influxdb_query.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_query short_description: Query data points from InfluxDB description: @@ -36,10 +35,9 @@ options: extends_documentation_fragment: - community.general.influxdb - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Query connections community.general.influxdb_query: hostname: "{{ influxdb_ip_address }}" @@ -57,17 +55,17 @@ EXAMPLES = r''' - name: Print results from the query ansible.builtin.debug: var: connection.query_results -''' +""" -RETURN = r''' +RETURN = r""" query_results: - description: Result from the query + description: Result from the query. returned: success type: list sample: - mean: 1245.5333333333333 time: "1970-01-01T00:00:00Z" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/influxdb_retention_policy.py b/plugins/modules/influxdb_retention_policy.py index f1c13a8111..cdd6bafb6d 100644 --- a/plugins/modules/influxdb_retention_policy.py +++ b/plugins/modules/influxdb_retention_policy.py @@ -9,136 +9,131 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_retention_policy short_description: Manage InfluxDB retention policies description: - - Manage InfluxDB retention policies. + - Manage InfluxDB retention policies. author: "Kamil Szczygiel (@kamsz)" requirements: - - "influxdb >= 0.9" - - requests + - "influxdb >= 0.9" + - requests attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - database_name: - description: - - Name of the database. - required: true - type: str - policy_name: - description: - - Name of the retention policy. - required: true - type: str - state: - description: - - State of the retention policy. - choices: [ absent, present ] - default: present - type: str - version_added: 3.1.0 - duration: - description: - - Determines how long InfluxDB should keep the data. If specified, it - should be V(INF) or at least one hour. If not specified, V(INF) is - assumed. Supports complex duration expressions with multiple units. - - Required only if O(state) is set to V(present). - type: str - replication: - description: - - Determines how many independent copies of each point are stored in the cluster. - - Required only if O(state) is set to V(present). - type: int - default: - description: - - Sets the retention policy as default retention policy. - type: bool - default: false - shard_group_duration: - description: - - Determines the time range covered by a shard group. If specified it - must be at least one hour. If none, it's determined by InfluxDB by - the rentention policy's duration. Supports complex duration expressions - with multiple units. - type: str - version_added: '2.0.0' + database_name: + description: + - Name of the database. + required: true + type: str + policy_name: + description: + - Name of the retention policy. + required: true + type: str + state: + description: + - State of the retention policy. + choices: [absent, present] + default: present + type: str + version_added: 3.1.0 + duration: + description: + - Determines how long InfluxDB should keep the data. If specified, it should be V(INF) or at least one hour. If not + specified, V(INF) is assumed. Supports complex duration expressions with multiple units. + - Required only if O(state) is set to V(present). + type: str + replication: + description: + - Determines how many independent copies of each point are stored in the cluster. + - Required only if O(state) is set to V(present). + type: int + default: + description: + - Sets the retention policy as default retention policy. + type: bool + default: false + shard_group_duration: + description: + - Determines the time range covered by a shard group. If specified it must be at least one hour. If none, it's determined + by InfluxDB by the rentention policy's duration. Supports complex duration expressions with multiple units. + type: str + version_added: '2.0.0' extends_documentation_fragment: - community.general.influxdb - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Example influxdb_retention_policy command from Ansible Playbooks - name: Create 1 hour retention policy community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1h - replication: 1 - ssl: true - validate_certs: true - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1h + replication: 1 + ssl: true + validate_certs: true + state: present - name: Create 1 day retention policy with 1 hour shard group duration community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1d - replication: 1 - shard_group_duration: 1h - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1d + replication: 1 + shard_group_duration: 1h + state: present - name: Create 1 week retention policy with 1 day shard group duration community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1w - replication: 1 - shard_group_duration: 1d - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1w + replication: 1 + shard_group_duration: 1d + state: present - name: Create infinite retention policy with 1 week of shard group duration community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: INF - replication: 1 - ssl: false - shard_group_duration: 1w - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: INF + replication: 1 + ssl: false + shard_group_duration: 1w + state: present - name: Create retention policy with complex durations community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 5d1h30m - replication: 1 - ssl: false - shard_group_duration: 1d10h30m - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 5d1h30m + replication: 1 + ssl: false + shard_group_duration: 1d10h30m + state: present - name: Drop retention policy community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - state: absent -''' + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + state: absent +""" -RETURN = r''' +RETURN = r""" # only defaults -''' +""" import re diff --git a/plugins/modules/influxdb_user.py b/plugins/modules/influxdb_user.py index ca4201db1b..bc66ff693d 100644 --- a/plugins/modules/influxdb_user.py +++ b/plugins/modules/influxdb_user.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_user short_description: Manage InfluxDB users description: @@ -44,7 +43,7 @@ options: state: description: - State of the user. - choices: [ absent, present ] + choices: [absent, present] default: present type: str grants: @@ -58,10 +57,9 @@ options: extends_documentation_fragment: - community.general.influxdb - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a user on localhost using default login credentials community.general.influxdb_user: user_name: john @@ -101,11 +99,11 @@ EXAMPLES = r''' login_username: "{{ influxdb_username }}" login_password: "{{ influxdb_password }}" state: absent -''' +""" -RETURN = r''' +RETURN = r""" #only defaults -''' +""" import json diff --git a/plugins/modules/influxdb_write.py b/plugins/modules/influxdb_write.py index 76e6449bb0..c67e57699b 100644 --- a/plugins/modules/influxdb_write.py +++ b/plugins/modules/influxdb_write.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_write short_description: Write data points into InfluxDB description: @@ -37,34 +36,33 @@ options: extends_documentation_fragment: - community.general.influxdb - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Write points into database community.general.influxdb_write: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - data_points: - - measurement: connections - tags: - host: server01 - region: us-west - time: "{{ ansible_date_time.iso8601 }}" - fields: - value: 2000 - - measurement: connections - tags: - host: server02 - region: us-east - time: "{{ ansible_date_time.iso8601 }}" - fields: - value: 3000 -''' + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + data_points: + - measurement: connections + tags: + host: server01 + region: us-west + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 2000 + - measurement: connections + tags: + host: server02 + region: us-east + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 3000 +""" -RETURN = r''' +RETURN = r""" # only defaults -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py index affee2a4f7..61e6662d95 100644 --- a/plugins/modules/ini_file.py +++ b/plugins/modules/ini_file.py @@ -12,19 +12,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ini_file short_description: Tweak settings in INI files extends_documentation_fragment: - files - community.general.attributes description: - - Manage (add, remove, change) individual settings in an INI-style file without having - to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). - - Adds missing sections if they don't exist. - - This module adds missing ending newlines to files to keep in line with the POSIX standard, even when - no other modifications need to be applied. + - Manage (add, remove, change) individual settings in an INI-style file without having to manage the file as a whole with, + say, M(ansible.builtin.template) or M(ansible.builtin.assemble). + - Adds missing sections if they do not exist. + - This module adds missing ending newlines to files to keep in line with the POSIX standard, even when no other modifications + need to be applied. attributes: check_mode: support: full @@ -36,11 +35,10 @@ options: - Path to the INI-style file; this file is created if required. type: path required: true - aliases: [ dest ] + aliases: [dest] section: description: - - Section name in INI file. This is added if O(state=present) automatically when - a single value is being set. + - Section name in INI file. This is added if O(state=present) automatically when a single value is being set. - If being omitted, the O(option) will be placed before the first O(section). - Omitting O(section) is also required if the config format does not support sections. type: str @@ -91,28 +89,27 @@ options: version_added: 3.6.0 backup: description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. type: bool default: false state: description: - If set to V(absent) and O(exclusive) set to V(true) all matching O(option) lines are removed. - - If set to V(absent) and O(exclusive) set to V(false) the specified O(option=value) lines are removed, - but the other O(option)s with the same name are not touched. - - If set to V(present) and O(exclusive) set to V(false) the specified O(option=values) lines are added, - but the other O(option)s with the same name are not touched. - - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines will be - added and the other O(option)s with the same name are removed. + - If set to V(absent) and O(exclusive) set to V(false) the specified O(option=value) lines are removed, but the other + O(option)s with the same name are not touched. + - If set to V(present) and O(exclusive) set to V(false) the specified O(option=values) lines are added, but the other + O(option)s with the same name are not touched. + - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines will be added and the other + O(option)s with the same name are removed. type: str - choices: [ absent, present ] + choices: [absent, present] default: present exclusive: description: - - If set to V(true) (default), all matching O(option) lines are removed when O(state=absent), - or replaced when O(state=present). - - If set to V(false), only the specified O(value)/O(values) are added when O(state=present), - or removed when O(state=absent), and existing ones are not modified. + - If set to V(true) (default), all matching O(option) lines are removed when O(state=absent), or replaced when O(state=present). + - If set to V(false), only the specified O(value)/O(values) are added when O(state=present), or removed when O(state=absent), + and existing ones are not modified. type: bool default: true version_added: 3.6.0 @@ -141,27 +138,27 @@ options: modify_inactive_option: description: - By default the module replaces a commented line that matches the given option. - - Set this option to V(false) to avoid this. This is useful when you want to keep commented example - C(key=value) pairs for documentation purposes. + - Set this option to V(false) to avoid this. This is useful when you want to keep commented example C(key=value) pairs + for documentation purposes. type: bool default: true version_added: 8.0.0 follow: description: - - This flag indicates that filesystem links, if they exist, should be followed. - - O(follow=true) can modify O(path) when combined with parameters such as O(mode). + - This flag indicates that filesystem links, if they exist, should be followed. + - O(follow=true) can modify O(path) when combined with parameters such as O(mode). type: bool default: false version_added: 7.1.0 notes: - - While it is possible to add an O(option) without specifying a O(value), this makes no sense. - - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. + - While it is possible to add an O(option) without specifying a O(value), this makes no sense. + - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. author: - - Jan-Piet Mens (@jpmens) - - Ales Nosek (@noseka1) -''' + - Jan-Piet Mens (@jpmens) + - Ales Nosek (@noseka1) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure "fav=lemonade is in section "[drinks]" in specified file community.general.ini_file: path: /etc/conf @@ -257,7 +254,7 @@ EXAMPLES = r''' value: xxxxxxxxxxxxxxxxxxxx mode: '0600' state: present -''' +""" import io import os @@ -569,7 +566,7 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) try: - module.atomic_move(tmpfile, target_filename) + module.atomic_move(tmpfile, os.path.abspath(target_filename)) except IOError: module.ansible.fail_json(msg='Unable to move temporary \ file %s to %s, IOError' % (tmpfile, target_filename), traceback=traceback.format_exc()) diff --git a/plugins/modules/installp.py b/plugins/modules/installp.py index 1531d2cad2..e54a56949f 100644 --- a/plugins/modules/installp.py +++ b/plugins/modules/installp.py @@ -8,14 +8,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: installp author: - Kairo Araujo (@kairoaraujo) short_description: Manage packages on AIX description: - - Manage packages using 'installp' on AIX + - Manage packages using 'installp' on AIX. extends_documentation_fragment: - community.general.attributes attributes: @@ -26,32 +25,32 @@ attributes: options: accept_license: description: - - Whether to accept the license for the package(s). + - Whether to accept the license for the package(s). type: bool default: false name: description: - - One or more packages to install or remove. - - Use V(all) to install all packages available on informed O(repository_path). + - One or more packages to install or remove. + - Use V(all) to install all packages available on informed O(repository_path). type: list elements: str required: true - aliases: [ pkg ] + aliases: [pkg] repository_path: description: - - Path with AIX packages (required to install). + - Path with AIX packages (required to install). type: path state: description: - - Whether the package needs to be present on or absent from the system. + - Whether the package needs to be present on or absent from the system. type: str - choices: [ absent, present ] + choices: [absent, present] default: present notes: -- If the package is already installed, even the package/fileset is new, the module will not install it. -''' + - If the package is already installed, even the package/fileset is new, the module will not install it. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install package foo community.general.installp: name: foo @@ -84,9 +83,9 @@ EXAMPLES = r''' community.general.installp: name: bos.sysmgt.nim.master state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ import os import re diff --git a/plugins/modules/interfaces_file.py b/plugins/modules/interfaces_file.py index 98103082ec..e878d10d1f 100644 --- a/plugins/modules/interfaces_file.py +++ b/plugins/modules/interfaces_file.py @@ -9,16 +9,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: interfaces_file short_description: Tweak settings in C(/etc/network/interfaces) files extends_documentation_fragment: - ansible.builtin.files - community.general.attributes description: - - Manage (add, remove, change) individual interface options in an interfaces-style file without having - to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file. + - Manage (add, remove, change) individual interface options in an interfaces-style file without having to manage the file + as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file. - Read information about interfaces from interfaces-styled files. attributes: check_mode: @@ -46,14 +45,14 @@ options: value: type: str description: - - If O(option) is not presented for the O(iface) and O(state) is V(present) option will be added. - If O(option) already exists and is not V(pre-up), V(up), V(post-up) or V(down), it's value will be updated. - V(pre-up), V(up), V(post-up) and V(down) options cannot be updated, only adding new options, removing existing - ones or cleaning the whole option set are supported. + - If O(option) is not presented for the O(iface) and O(state) is V(present) option will be added. If O(option) already + exists and is not V(pre-up), V(up), V(post-up) or V(down), it's value will be updated. V(pre-up), V(up), V(post-up) + and V(down) options cannot be updated, only adding new options, removing existing ones or cleaning the whole option + set are supported. backup: description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. type: bool default: false state: @@ -61,86 +60,85 @@ options: description: - If set to V(absent) the option or section will be removed if present instead of created. default: "present" - choices: [ "present", "absent" ] + choices: ["present", "absent"] notes: - - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state. + - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state. requirements: [] author: "Roman Belyakovsky (@hryamzik)" -''' +""" -RETURN = ''' +RETURN = r""" dest: - description: Destination file/path. - returned: success - type: str - sample: "/etc/network/interfaces" + description: Destination file/path. + returned: success + type: str + sample: "/etc/network/interfaces" ifaces: - description: Interfaces dictionary. - returned: success - type: dict - contains: - ifaces: - description: Interface dictionary. - returned: success - type: dict - contains: - eth0: - description: Name of the interface. - returned: success - type: dict - contains: - address_family: - description: Interface address family. - returned: success - type: str - sample: "inet" - method: - description: Interface method. - returned: success - type: str - sample: "manual" - mtu: - description: Other options, all values returned as strings. - returned: success - type: str - sample: "1500" - pre-up: - description: List of C(pre-up) scripts. - returned: success - type: list - elements: str - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - up: - description: List of C(up) scripts. - returned: success - type: list - elements: str - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - post-up: - description: List of C(post-up) scripts. - returned: success - type: list - elements: str - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - down: - description: List of C(down) scripts. - returned: success - type: list - elements: str - sample: - - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" -... -''' + description: Interfaces dictionary. + returned: success + type: dict + contains: + ifaces: + description: Interface dictionary. + returned: success + type: dict + contains: + eth0: + description: Name of the interface. + returned: success + type: dict + contains: + address_family: + description: Interface address family. + returned: success + type: str + sample: "inet" + method: + description: Interface method. + returned: success + type: str + sample: "manual" + mtu: + description: Other options, all values returned as strings. + returned: success + type: str + sample: "1500" + pre-up: + description: List of C(pre-up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + up: + description: List of C(up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + post-up: + description: List of C(post-up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + down: + description: List of C(down) scripts. + returned: success + type: list + elements: str + sample: + - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set eth1 mtu configuration value to 8000 community.general.interfaces_file: dest: /etc/network/interfaces.d/eth1.cfg @@ -150,7 +148,7 @@ EXAMPLES = ''' backup: true state: present register: eth1_cfg -''' +""" import os import re diff --git a/plugins/modules/ipa_dnsrecord.py b/plugins/modules/ipa_dnsrecord.py index cb4ce03ddd..1dad138377 100644 --- a/plugins/modules/ipa_dnsrecord.py +++ b/plugins/modules/ipa_dnsrecord.py @@ -35,42 +35,42 @@ options: record_type: description: - The type of DNS record name. - - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV' and 'MX' are supported. - - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5." - - "'SRV' and 'MX' are added in version 2.8." - - "'NS' are added in comunity.general 8.2.0." + - Support for V(NS) was added in comunity.general 8.2.0. + - Support for V(SSHFP) was added in community.general 9.1.0. required: false default: 'A' - choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'NS', 'PTR', 'SRV', 'TXT'] + choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'NS', 'PTR', 'SRV', 'TXT', 'SSHFP'] type: str record_value: description: - Manage DNS record name with this value. - Mutually exclusive with O(record_values), and exactly one of O(record_value) and O(record_values) has to be specified. - Use O(record_values) if you need to specify multiple values. - - In the case of 'A' or 'AAAA' record types, this will be the IP address. - - In the case of 'A6' record type, this will be the A6 Record data. - - In the case of 'CNAME' record type, this will be the hostname. - - In the case of 'DNAME' record type, this will be the DNAME target. - - In the case of 'NS' record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record. - - In the case of 'PTR' record type, this will be the hostname. - - In the case of 'TXT' record type, this will be a text. - - In the case of 'SRV' record type, this will be a service record. - - In the case of 'MX' record type, this will be a mail exchanger record. + - In the case of V(A) or V(AAAA) record types, this will be the IP address. + - In the case of V(A6) record type, this will be the A6 Record data. + - In the case of V(CNAME) record type, this will be the hostname. + - In the case of V(DNAME) record type, this will be the DNAME target. + - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record. + - In the case of V(PTR) record type, this will be the hostname. + - In the case of V(TXT) record type, this will be a text. + - In the case of V(SRV) record type, this will be a service record. + - In the case of V(MX) record type, this will be a mail exchanger record. + - In the case of V(SSHFP) record type, this will be an SSH fingerprint record. type: str record_values: description: - Manage DNS record name with this value. - Mutually exclusive with O(record_value), and exactly one of O(record_value) and O(record_values) has to be specified. - - In the case of 'A' or 'AAAA' record types, this will be the IP address. - - In the case of 'A6' record type, this will be the A6 Record data. - - In the case of 'CNAME' record type, this will be the hostname. - - In the case of 'DNAME' record type, this will be the DNAME target. - - In the case of 'NS' record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record. - - In the case of 'PTR' record type, this will be the hostname. - - In the case of 'TXT' record type, this will be a text. - - In the case of 'SRV' record type, this will be a service record. - - In the case of 'MX' record type, this will be a mail exchanger record. + - In the case of V(A) or V(AAAA) record types, this will be the IP address. + - In the case of V(A6) record type, this will be the A6 Record data. + - In the case of V(CNAME) record type, this will be the hostname. + - In the case of V(DNAME) record type, this will be the DNAME target. + - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record. + - In the case of V(PTR) record type, this will be the hostname. + - In the case of V(TXT) record type, this will be a text. + - In the case of V(SRV) record type, this will be a service record. + - In the case of V(MX) record type, this will be a mail exchanger record. + - In the case of V(SSHFP) record type, this will be an SSH fingerprint record. type: list elements: str record_ttl: @@ -175,6 +175,20 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: ChangeMe! + +- name: Retrieve the current sshfp fingerprints + ansible.builtin.command: ssh-keyscan -D localhost + register: ssh_hostkeys + +- name: Update the SSHFP records in DNS + community.general.ipa_dnsrecord: + name: "{{ inventory_hostname}}" + zone_name: example.com + record_type: 'SSHFP' + record_values: "{{ ssh_hostkeys.stdout.split('\n') | map('split', 'SSHFP ') | map('last') | list }}" + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: ChangeMe! ''' RETURN = r''' @@ -228,6 +242,8 @@ class DNSRecordIPAClient(IPAClient): item.update(srvrecord=value) elif details['record_type'] == 'MX': item.update(mxrecord=value) + elif details['record_type'] == 'SSHFP': + item.update(sshfprecord=value) self._post_json(method='dnsrecord_add', name=zone_name, item=item) @@ -266,6 +282,8 @@ def get_dnsrecord_dict(details=None): module_dnsrecord.update(srvrecord=details['record_values']) elif details['record_type'] == 'MX' and details['record_values']: module_dnsrecord.update(mxrecord=details['record_values']) + elif details['record_type'] == 'SSHFP' and details['record_values']: + module_dnsrecord.update(sshfprecord=details['record_values']) if details.get('record_ttl'): module_dnsrecord.update(dnsttl=details['record_ttl']) @@ -328,7 +346,7 @@ def ensure(module, client): def main(): - record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV', 'MX'] + record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV', 'MX', 'SSHFP'] argument_spec = ipa_argument_spec() argument_spec.update( zone_name=dict(type='str', required=True), diff --git a/plugins/modules/ipa_getkeytab.py b/plugins/modules/ipa_getkeytab.py new file mode 100644 index 0000000000..3d4f81d5b1 --- /dev/null +++ b/plugins/modules/ipa_getkeytab.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2024 Alexander Bakanovskii +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ipa_getkeytab +short_description: Manage keytab file in FreeIPA +version_added: 9.5.0 +description: + - Manage keytab file with C(ipa-getkeytab) utility. + - See U(https://manpages.ubuntu.com/manpages/jammy/man1/ipa-getkeytab.1.html) for reference. +author: "Alexander Bakanovskii (@abakanovskii)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + description: + - The base path where to put generated keytab file. + type: path + aliases: ["keytab"] + required: true + principal: + description: + - The non-realm part of the full principal name. + type: str + required: true + ipa_host: + description: + - The IPA server to retrieve the keytab from (FQDN). + type: str + ldap_uri: + description: + - LDAP URI. If V(ldap://) is specified, STARTTLS is initiated by default. + - Can not be used with the O(ipa_host) option. + type: str + bind_dn: + description: + - The LDAP DN to bind as when retrieving a keytab without Kerberos credentials. + - Generally used with the O(bind_pw) option. + type: str + bind_pw: + description: + - The LDAP password to use when not binding with Kerberos. + type: str + password: + description: + - Use this password for the key instead of one randomly generated. + type: str + ca_cert: + description: + - The path to the IPA CA certificate used to validate LDAPS/STARTTLS connections. + type: path + sasl_mech: + description: + - SASL mechanism to use if O(bind_dn) and O(bind_pw) are not specified. + choices: ["GSSAPI", "EXTERNAL"] + type: str + retrieve_mode: + description: + - Retrieve an existing key from the server instead of generating a new one. + - This is incompatible with the O(password), and will work only against a IPA server more recent than version 3.3. + - The user requesting the keytab must have access to the keys for this operation to succeed. + - Be aware that if set V(true), a new keytab will be generated. + - This invalidates all previously retrieved keytabs for this service principal. + type: bool + encryption_types: + description: + - The list of encryption types to use to generate keys. + - It will use local client defaults if not provided. + - Valid values depend on the Kerberos library version and configuration. + type: str + state: + description: + - The state of the keytab file. + - V(present) only check for existence of a file, if you want to recreate keytab with other parameters you should set O(force=true). + type: str + default: present + choices: ["present", "absent"] + force: + description: + - Force recreation if exists already. + type: bool +requirements: + - freeipa-client + - Managed host is FreeIPA client +extends_documentation_fragment: + - community.general.attributes +''' + +EXAMPLES = r''' +- name: Get kerberos ticket + ansible.builtin.shell: kinit admin + args: + stdin: "{{ aldpro_admin_password }}" + changed_when: true + +- name: Create keytab + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + +- name: Retrieve already existing keytab + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + retrieve_mode: true + +- name: Force keytab recreation + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + force: true +''' + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +class IPAKeytab(object): + def __init__(self, module, **kwargs): + self.module = module + self.path = kwargs['path'] + self.state = kwargs['state'] + self.principal = kwargs['principal'] + self.ipa_host = kwargs['ipa_host'] + self.ldap_uri = kwargs['ldap_uri'] + self.bind_dn = kwargs['bind_dn'] + self.bind_pw = kwargs['bind_pw'] + self.password = kwargs['password'] + self.ca_cert = kwargs['ca_cert'] + self.sasl_mech = kwargs['sasl_mech'] + self.retrieve_mode = kwargs['retrieve_mode'] + self.encryption_types = kwargs['encryption_types'] + + self.runner = CmdRunner( + module, + command='ipa-getkeytab', + arg_formats=dict( + retrieve_mode=cmd_runner_fmt.as_bool('--retrieve'), + path=cmd_runner_fmt.as_opt_val('--keytab'), + ipa_host=cmd_runner_fmt.as_opt_val('--server'), + principal=cmd_runner_fmt.as_opt_val('--principal'), + ldap_uri=cmd_runner_fmt.as_opt_val('--ldapuri'), + bind_dn=cmd_runner_fmt.as_opt_val('--binddn'), + bind_pw=cmd_runner_fmt.as_opt_val('--bindpw'), + password=cmd_runner_fmt.as_opt_val('--password'), + ca_cert=cmd_runner_fmt.as_opt_val('--cacert'), + sasl_mech=cmd_runner_fmt.as_opt_val('--mech'), + encryption_types=cmd_runner_fmt.as_opt_val('--enctypes'), + ) + ) + + def _exec(self, check_rc=True): + with self.runner( + "retrieve_mode path ipa_host principal ldap_uri bind_dn bind_pw password ca_cert sasl_mech encryption_types", + check_rc=check_rc + ) as ctx: + rc, out, err = ctx.run() + return out + + +def main(): + arg_spec = dict( + path=dict(type='path', required=True, aliases=["keytab"]), + state=dict(default='present', choices=['present', 'absent']), + principal=dict(type='str', required=True), + ipa_host=dict(type='str'), + ldap_uri=dict(type='str'), + bind_dn=dict(type='str'), + bind_pw=dict(type='str'), + password=dict(type='str', no_log=True), + ca_cert=dict(type='path'), + sasl_mech=dict(type='str', choices=["GSSAPI", "EXTERNAL"]), + retrieve_mode=dict(type='bool'), + encryption_types=dict(type='str'), + force=dict(type='bool'), + ) + module = AnsibleModule( + argument_spec=arg_spec, + mutually_exclusive=[('ipa_host', 'ldap_uri'), ('retrieve_mode', 'password')], + supports_check_mode=True, + ) + + path = module.params['path'] + state = module.params['state'] + force = module.params['force'] + + keytab = IPAKeytab(module, + path=path, + state=state, + principal=module.params['principal'], + ipa_host=module.params['ipa_host'], + ldap_uri=module.params['ldap_uri'], + bind_dn=module.params['bind_dn'], + bind_pw=module.params['bind_pw'], + password=module.params['password'], + ca_cert=module.params['ca_cert'], + sasl_mech=module.params['sasl_mech'], + retrieve_mode=module.params['retrieve_mode'], + encryption_types=module.params['encryption_types'], + ) + + changed = False + if state == 'present': + if os.path.exists(path): + if force and not module.check_mode: + try: + os.remove(path) + except OSError as e: + module.fail_json(msg="Error deleting: %s - %s." % (e.filename, e.strerror)) + keytab._exec() + changed = True + if force and module.check_mode: + changed = True + else: + changed = True + keytab._exec() + + if state == 'absent': + if os.path.exists(path): + changed = True + if not module.check_mode: + try: + os.remove(path) + except OSError as e: + module.fail_json(msg="Error deleting: %s - %s." % (e.filename, e.strerror)) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_host.py b/plugins/modules/ipa_host.py index b37a606d75..791cee91f3 100644 --- a/plugins/modules/ipa_host.py +++ b/plugins/modules/ipa_host.py @@ -74,10 +74,17 @@ options: type: list elements: str state: - description: State to ensure. + description: + - State to ensure. default: present choices: ["absent", "disabled", "enabled", "present"] type: str + force_creation: + description: + - Create host if O(state=disabled) or O(state=enabled) but not present. + default: true + type: bool + version_added: 9.5.0 update_dns: description: - If set V(true) with O(state=absent), then removes DNS records of the host managed by FreeIPA DNS. @@ -233,26 +240,31 @@ def get_host_diff(client, ipa_host, module_host): def ensure(module, client): name = module.params['fqdn'] state = module.params['state'] + force_creation = module.params['force_creation'] ipa_host = client.host_find(name=name) module_host = get_host_dict(description=module.params['description'], - force=module.params['force'], ip_address=module.params['ip_address'], + force=module.params['force'], + ip_address=module.params['ip_address'], ns_host_location=module.params['ns_host_location'], ns_hardware_platform=module.params['ns_hardware_platform'], ns_os_version=module.params['ns_os_version'], user_certificate=module.params['user_certificate'], mac_address=module.params['mac_address'], - random_password=module.params.get('random_password'), + random_password=module.params['random_password'], ) changed = False if state in ['present', 'enabled', 'disabled']: - if not ipa_host: + if not ipa_host and (force_creation or state == 'present'): changed = True if not module.check_mode: # OTP password generated by FreeIPA is visible only for host_add command # so, return directly from here. return changed, client.host_add(name=name, host=module_host) else: + if state in ['disabled', 'enabled']: + module.fail_json(msg="No host with name " + ipa_host + " found") + diff = get_host_diff(client, ipa_host, module_host) if len(diff) > 0: changed = True @@ -261,11 +273,10 @@ def ensure(module, client): for key in diff: data[key] = module_host.get(key) ipa_host_show = client.host_show(name=name) - if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'): + if ipa_host_show.get('has_keytab', True) and (state == 'disabled' or module.params.get('random_password')): client.host_disable(name=name) return changed, client.host_mod(name=name, host=data) - - else: + elif state == 'absent': if ipa_host: changed = True update_dns = module.params.get('update_dns', False) @@ -288,7 +299,8 @@ def main(): mac_address=dict(type='list', aliases=['macaddress'], elements='str'), update_dns=dict(type='bool'), state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - random_password=dict(type='bool', no_log=False),) + random_password=dict(type='bool', no_log=False), + force_creation=dict(type='bool', default=True),) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) diff --git a/plugins/modules/ipa_hostgroup.py b/plugins/modules/ipa_hostgroup.py index 70749c35b3..9e6abf32aa 100644 --- a/plugins/modules/ipa_hostgroup.py +++ b/plugins/modules/ipa_hostgroup.py @@ -57,13 +57,14 @@ options: state: description: - State to ensure. + - V("absent") and V("disabled") give the same results. + - V("present") and V("enabled") give the same results. default: "present" choices: ["absent", "disabled", "enabled", "present"] type: str extends_documentation_fragment: - community.general.ipa.documentation - community.general.attributes - ''' EXAMPLES = r''' @@ -160,7 +161,7 @@ def ensure(module, client): module_hostgroup = get_hostgroup_dict(description=module.params['description']) changed = False - if state == 'present': + if state in ['present', 'enabled']: if not ipa_hostgroup: changed = True if not module.check_mode: diff --git a/plugins/modules/ipa_otptoken.py b/plugins/modules/ipa_otptoken.py index 567674f935..d8a5b3cf1d 100644 --- a/plugins/modules/ipa_otptoken.py +++ b/plugins/modules/ipa_otptoken.py @@ -392,9 +392,7 @@ def ensure(module, client): 'counter': 'ipatokenhotpcounter'} # Create inverse dictionary for mapping return values - ipa_to_ansible = {} - for (k, v) in ansible_to_ipa.items(): - ipa_to_ansible[v] = k + ipa_to_ansible = {v: k for k, v in ansible_to_ipa.items()} unmodifiable_after_creation = ['otptype', 'secretkey', 'algorithm', 'digits', 'offset', 'interval', 'counter'] diff --git a/plugins/modules/iptables_state.py b/plugins/modules/iptables_state.py index b0cc3bd3f6..c97b5694c9 100644 --- a/plugins/modules/iptables_state.py +++ b/plugins/modules/iptables_state.py @@ -459,6 +459,7 @@ def main(): if not os.access(b_path, os.R_OK): module.fail_json(msg="Source %s not readable" % path) state_to_restore = read_state(b_path) + cmd = None else: cmd = ' '.join(SAVECOMMAND) diff --git a/plugins/modules/java_keystore.py b/plugins/modules/java_keystore.py index 2aeab75c06..0a8e3398d5 100644 --- a/plugins/modules/java_keystore.py +++ b/plugins/modules/java_keystore.py @@ -150,13 +150,11 @@ EXAMPLES = ''' name: example certificate: | -----BEGIN CERTIFICATE----- - h19dUZ2co2fI/ibYiwxWk4aeNE6KWvCaTQOMQ8t6Uo2XKhpL/xnjoAgh1uCQN/69 - MG+34+RhUWzCfdZH7T8/qDxJw2kEPKluaYh7KnMsba+5jHjmtzix5QIDAQABo4IB + h19dUZ2co2f... -----END CERTIFICATE----- private_key: | -----BEGIN RSA PRIVATE KEY----- - DBVFTEVDVFJJQ0lURSBERSBGUkFOQ0UxFzAVBgNVBAsMDjAwMDIgNTUyMDgxMzE3 - GLlDNMw/uHyME7gHFsqJA7O11VY6O5WQ4IDP3m/s5ZV6s+Nn6Lerz17VZ99 + DBVFTEVDVFJ... -----END RSA PRIVATE KEY----- password: changeit dest: /etc/security/keystore.jks @@ -472,7 +470,7 @@ class JavaKeystore: if self.keystore_type == 'pkcs12': # Preserve properties of the destination file, if any. - self.module.atomic_move(keystore_p12_path, self.keystore_path) + self.module.atomic_move(os.path.abspath(keystore_p12_path), os.path.abspath(self.keystore_path)) self.update_permissions() self.result['changed'] = True return self.result diff --git a/plugins/modules/jenkins_plugin.py b/plugins/modules/jenkins_plugin.py index 13a804a508..8834e0a2b2 100644 --- a/plugins/modules/jenkins_plugin.py +++ b/plugins/modules/jenkins_plugin.py @@ -685,7 +685,7 @@ class JenkinsPlugin(object): # Move the updates file to the right place if we could read it if tmp_updates_file != updates_file: - self.module.atomic_move(tmp_updates_file, updates_file) + self.module.atomic_move(os.path.abspath(tmp_updates_file), os.path.abspath(updates_file)) # Check if we have the plugin data available if not data.get('plugins', {}).get(self.params['name']): @@ -718,7 +718,7 @@ class JenkinsPlugin(object): details=to_native(e)) # Move the file onto the right place - self.module.atomic_move(tmp_f, f) + self.module.atomic_move(os.path.abspath(tmp_f), os.path.abspath(f)) def uninstall(self): changed = False diff --git a/plugins/modules/jira.py b/plugins/modules/jira.py index c36cf99375..64aed7e149 100644 --- a/plugins/modules/jira.py +++ b/plugins/modules/jira.py @@ -531,7 +531,7 @@ class JIRA(StateModuleHelper): ), supports_check_mode=False ) - + use_old_vardict = False state_param = 'operation' def __init_module__(self): @@ -544,7 +544,7 @@ class JIRA(StateModuleHelper): self.vars.uri = self.vars.uri.strip('/') self.vars.set('restbase', self.vars.uri + '/rest/api/2') - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_create(self): createfields = { 'project': {'key': self.vars.project}, @@ -562,7 +562,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_comment(self): data = { 'body': self.vars.comment @@ -578,7 +578,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' + self.vars.issue + '/comment' self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_worklog(self): data = { 'comment': self.vars.comment @@ -594,7 +594,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' + self.vars.issue + '/worklog' self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_edit(self): data = { 'fields': self.vars.fields @@ -602,7 +602,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' + self.vars.issue self.vars.meta = self.put(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_update(self): data = { "update": self.vars.fields, @@ -624,7 +624,7 @@ class JIRA(StateModuleHelper): self.vars.meta = self.get(url) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_transition(self): # Find the transition id turl = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" @@ -657,7 +657,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_link(self): data = { 'type': {'name': self.vars.linktype}, @@ -667,7 +667,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issueLink/' self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_attach(self): v = self.vars filename = v.attachment.get('filename') diff --git a/plugins/modules/kdeconfig.py b/plugins/modules/kdeconfig.py index 4e8d395215..96d7df8b8d 100644 --- a/plugins/modules/kdeconfig.py +++ b/plugins/modules/kdeconfig.py @@ -214,7 +214,7 @@ def run_module(module, tmpdir, kwriteconfig): if module.params['backup'] and os.path.exists(b_path): result['backup_file'] = module.backup_local(result['path']) try: - module.atomic_move(b_tmpfile, b_path) + module.atomic_move(b_tmpfile, os.path.abspath(b_path)) except IOError: module.ansible.fail_json(msg='Unable to move temporary file %s to %s, IOError' % (tmpfile, result['path']), traceback=traceback.format_exc()) diff --git a/plugins/modules/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py index 2a281440a7..224b5bba8c 100644 --- a/plugins/modules/kernel_blacklist.py +++ b/plugins/modules/kernel_blacklist.py @@ -67,7 +67,7 @@ class Blacklist(StateModuleHelper): ), supports_check_mode=True, ) - mute_vardict_deprecation = True + use_old_vardict = False def __init_module__(self): self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name))) diff --git a/plugins/modules/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py index bc2898d9be..5945890bb7 100644 --- a/plugins/modules/keycloak_authentication.py +++ b/plugins/modules/keycloak_authentication.py @@ -257,6 +257,7 @@ def create_or_update_executions(kc, config, realm='master'): changed = False after = "" before = "" + execution = None if "authenticationExecutions" in config: # Get existing executions on the Keycloak server for this alias existing_executions = kc.get_executions_representation(config, realm=realm) @@ -283,27 +284,27 @@ def create_or_update_executions(kc, config, realm='master'): if new_exec['index'] is None: new_exec_index = exec_index before += str(existing_executions[exec_index]) + '\n' - id_to_update = existing_executions[exec_index]["id"] + execution = existing_executions[exec_index].copy() # Remove exec from list in case 2 exec with same name existing_executions[exec_index].clear() elif new_exec["providerId"] is not None: kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) + execution = kc.get_executions_representation(config, realm=realm)[exec_index] exec_found = True exec_index = new_exec_index - id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] after += str(new_exec) + '\n' elif new_exec["displayName"] is not None: kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm, flowType=new_exec["subFlowType"]) + execution = kc.get_executions_representation(config, realm=realm)[exec_index] exec_found = True exec_index = new_exec_index - id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] after += str(new_exec) + '\n' if exec_found: changed = True if exec_index != -1: # Update the existing execution updated_exec = { - "id": id_to_update + "id": execution["id"] } # add the execution configuration if new_exec["authenticationConfig"] is not None: @@ -313,6 +314,8 @@ def create_or_update_executions(kc, config, realm='master'): if key not in ("flowAlias", "authenticationConfig", "subFlowType"): updated_exec[key] = new_exec[key] if new_exec["requirement"] is not None: + if "priority" in execution: + updated_exec["priority"] = execution["priority"] kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm) diff = exec_index - new_exec_index kc.change_execution_priority(updated_exec["id"], diff, realm=realm) diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py index cd9c60bacf..da82d5460e 100644 --- a/plugins/modules/keycloak_client.py +++ b/plugins/modules/keycloak_client.py @@ -108,13 +108,14 @@ options: client_authenticator_type: description: - - How do clients authenticate with the auth server? Either V(client-secret) or - V(client-jwt) can be chosen. When using V(client-secret), the module parameter - O(secret) can set it, while for V(client-jwt), you can use the keys C(use.jwks.url), + - How do clients authenticate with the auth server? Either V(client-secret), + V(client-jwt), or V(client-x509) can be chosen. When using V(client-secret), the module parameter + O(secret) can set it, for V(client-jwt), you can use the keys C(use.jwks.url), C(jwks.url), and C(jwt.credential.certificate) in the O(attributes) module parameter - to configure its behavior. + to configure its behavior. For V(client-x509) you can use the keys C(x509.allow.regex.pattern.comparison) + and C(x509.subjectdn) in the O(attributes) module parameter to configure which certificate(s) to accept. - This is 'clientAuthenticatorType' in the Keycloak REST API. - choices: ['client-secret', 'client-jwt'] + choices: ['client-secret', 'client-jwt', 'client-x509'] aliases: - clientAuthenticatorType type: str @@ -340,6 +341,42 @@ options: description: - Override realm authentication flow bindings. type: dict + suboptions: + browser: + description: + - Flow ID of the browser authentication flow. + - O(authentication_flow_binding_overrides.browser) + and O(authentication_flow_binding_overrides.browser_name) are mutually exclusive. + type: str + + browser_name: + description: + - Flow name of the browser authentication flow. + - O(authentication_flow_binding_overrides.browser) + and O(authentication_flow_binding_overrides.browser_name) are mutually exclusive. + aliases: + - browserName + type: str + version_added: 9.1.0 + + direct_grant: + description: + - Flow ID of the direct grant authentication flow. + - O(authentication_flow_binding_overrides.direct_grant) + and O(authentication_flow_binding_overrides.direct_grant_name) are mutually exclusive. + aliases: + - directGrant + type: str + + direct_grant_name: + description: + - Flow name of the direct grant authentication flow. + - O(authentication_flow_binding_overrides.direct_grant) + and O(authentication_flow_binding_overrides.direct_grant_name) are mutually exclusive. + aliases: + - directGrantName + type: str + version_added: 9.1.0 aliases: - authenticationFlowBindingOverrides version_added: 3.4.0 @@ -497,7 +534,6 @@ options: description: - SAML Redirect Binding URL for the client's assertion consumer service (login responses). - saml_force_name_id_format: description: - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead. @@ -545,6 +581,18 @@ options: - For OpenID-Connect clients, client certificate for validating JWT issued by client and signed by its key, base64-encoded. + x509.subjectdn: + description: + - For OpenID-Connect clients, subject which will be used to authenticate the client. + type: str + version_added: 9.5.0 + + x509.allow.regex.pattern.comparison: + description: + - For OpenID-Connect clients, boolean specifying whether to allow C(x509.subjectdn) as regular expression. + type: bool + version_added: 9.5.0 + extends_documentation_fragment: - community.general.keycloak - community.general.attributes @@ -588,6 +636,22 @@ EXAMPLES = ''' delegate_to: localhost +- name: Create or update a Keycloak client (minimal example), with x509 authentication + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + state: present + client_id: test + client_authenticator_type: client-x509 + attributes: + x509.subjectdn: "CN=client" + x509.allow.regex.pattern.comparison: false + + - name: Create or update a Keycloak client (with all the bells and whistles) community.general.keycloak_client: auth_client_id: admin-cli @@ -741,9 +805,6 @@ def normalise_cr(clientrep, remove_ids=False): # Avoid the dict passed in to be modified clientrep = clientrep.copy() - if 'attributes' in clientrep: - clientrep['attributes'] = list(sorted(clientrep['attributes'])) - if 'defaultClientScopes' in clientrep: clientrep['defaultClientScopes'] = list(sorted(clientrep['defaultClientScopes'])) @@ -775,11 +836,73 @@ def sanitize_cr(clientrep): if 'secret' in result: result['secret'] = 'no_log' if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes']['saml.signing.private.key'] = 'no_log' + attributes = result['attributes'] + if isinstance(attributes, dict): + if 'saml.signing.private.key' in attributes: + attributes['saml.signing.private.key'] = 'no_log' + if 'saml.encryption.private.key' in attributes: + attributes['saml.encryption.private.key'] = 'no_log' return normalise_cr(result) +def get_authentication_flow_id(flow_name, realm, kc): + """ Get the authentication flow ID based on the flow name, realm, and Keycloak client. + + Args: + flow_name (str): The name of the authentication flow. + realm (str): The name of the realm. + kc (KeycloakClient): The Keycloak client instance. + + Returns: + str: The ID of the authentication flow. + + Raises: + KeycloakAPIException: If the authentication flow with the given name is not found in the realm. + """ + flow = kc.get_authentication_flow_by_alias(flow_name, realm) + if flow: + return flow["id"] + kc.module.fail_json(msg='Authentification flow %s not found in realm %s' % (flow_name, realm)) + + +def flow_binding_from_dict_to_model(newClientFlowBinding, realm, kc): + """ Convert a dictionary representing client flow bindings to a model representation. + + Args: + newClientFlowBinding (dict): A dictionary containing client flow bindings. + realm (str): The name of the realm. + kc (KeycloakClient): An instance of the KeycloakClient class. + + Returns: + dict: A dictionary representing the model flow bindings. The dictionary has two keys: + - "browser" (str or None): The ID of the browser authentication flow binding, or None if not provided. + - "direct_grant" (str or None): The ID of the direct grant authentication flow binding, or None if not provided. + + Raises: + KeycloakAPIException: If the authentication flow with the given name is not found in the realm. + + """ + + modelFlow = { + "browser": None, + "direct_grant": None + } + + for k, v in newClientFlowBinding.items(): + if not v: + continue + if k == "browser": + modelFlow["browser"] = v + elif k == "browser_name": + modelFlow["browser"] = get_authentication_flow_id(v, realm, kc) + elif k == "direct_grant": + modelFlow["direct_grant"] = v + elif k == "direct_grant_name": + modelFlow["direct_grant"] = get_authentication_flow_id(v, realm, kc) + + return modelFlow + + def main(): """ Module execution @@ -798,6 +921,13 @@ def main(): config=dict(type='dict'), ) + authentication_flow_spec = dict( + browser=dict(type='str'), + browser_name=dict(type='str', aliases=['browserName']), + direct_grant=dict(type='str', aliases=['directGrant']), + direct_grant_name=dict(type='str', aliases=['directGrantName']), + ) + meta_args = dict( state=dict(default='present', choices=['present', 'absent']), realm=dict(type='str', default='master'), @@ -811,7 +941,7 @@ def main(): base_url=dict(type='str', aliases=['baseUrl']), surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']), enabled=dict(type='bool'), - client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']), + client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt', 'client-x509'], aliases=['clientAuthenticatorType']), secret=dict(type='str', no_log=True), registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True), default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), @@ -837,7 +967,13 @@ def main(): use_template_scope=dict(type='bool', aliases=['useTemplateScope']), use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), always_display_in_console=dict(type='bool', aliases=['alwaysDisplayInConsole']), - authentication_flow_binding_overrides=dict(type='dict', aliases=['authenticationFlowBindingOverrides']), + authentication_flow_binding_overrides=dict( + type='dict', + aliases=['authenticationFlowBindingOverrides'], + options=authentication_flow_spec, + required_one_of=[['browser', 'direct_grant', 'browser_name', 'direct_grant_name']], + mutually_exclusive=[['browser', 'browser_name'], ['direct_grant', 'direct_grant_name']], + ), protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), authorization_settings=dict(type='dict', aliases=['authorizationSettings']), default_client_scopes=dict(type='list', elements='str', aliases=['defaultClientScopes']), @@ -888,17 +1024,12 @@ def main(): for client_param in client_params: new_param_value = module.params.get(client_param) - # some lists in the Keycloak API are sorted, some are not. - if isinstance(new_param_value, list): - if client_param in ['attributes']: - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass # Unfortunately, the ansible argument spec checker introduces variables with null values when # they are not specified if client_param == 'protocol_mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] + elif client_param == 'authentication_flow_binding_overrides': + new_param_value = flow_binding_from_dict_to_model(new_param_value, realm, kc) changeset[camel(client_param)] = new_param_value diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py index d24e0f1f27..35ac3d9500 100644 --- a/plugins/modules/keycloak_clientscope.py +++ b/plugins/modules/keycloak_clientscope.py @@ -301,10 +301,34 @@ end_state: ''' from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError + keycloak_argument_spec, get_token, KeycloakError, is_struct_included from ansible.module_utils.basic import AnsibleModule +def normalise_cr(clientscoperep, remove_ids=False): + """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the + the change detection is more effective. + + :param clientscoperep: the clientscoperep dict to be sanitized + :param remove_ids: If set to true, then the unique ID's of objects is removed to make the diff and checks for changed + not alert when the ID's of objects are not usually known, (e.g. for protocol_mappers) + :return: normalised clientscoperep dict + """ + # Avoid the dict passed in to be modified + clientscoperep = clientscoperep.copy() + + if 'protocolMappers' in clientscoperep: + clientscoperep['protocolMappers'] = sorted(clientscoperep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) + for mapper in clientscoperep['protocolMappers']: + if remove_ids: + mapper.pop('id', None) + + # Set to a default value. + mapper['consentRequired'] = mapper.get('consentRequired', False) + + return clientscoperep + + def sanitize_cr(clientscoperep): """ Removes probably sensitive details from a clientscoperep representation. @@ -317,7 +341,7 @@ def sanitize_cr(clientscoperep): if 'attributes' in result: if 'saml.signing.private.key' in result['attributes']: result['attributes']['saml.signing.private.key'] = 'no_log' - return result + return normalise_cr(result) def main(): @@ -391,17 +415,10 @@ def main(): for clientscope_param in clientscope_params: new_param_value = module.params.get(clientscope_param) - # some lists in the Keycloak API are sorted, some are not. - if isinstance(new_param_value, list): - if clientscope_param in ['attributes']: - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass # Unfortunately, the ansible argument spec checker introduces variables with null values when # they are not specified if clientscope_param == 'protocol_mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] changeset[camel(clientscope_param)] = new_param_value # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) @@ -445,7 +462,9 @@ def main(): # Process an update # no changes - if desired_clientscope == before_clientscope: + # remove ids for compare, problematic if desired has no ids set (not required), + # normalize for consentRequired in protocolMappers + if normalise_cr(desired_clientscope, remove_ids=True) == normalise_cr(before_clientscope, remove_ids=True): result['changed'] = False result['end_state'] = sanitize_cr(desired_clientscope) result['msg'] = "No changes required to clientscope {name}.".format(name=before_clientscope['name']) @@ -458,6 +477,13 @@ def main(): result['diff'] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(desired_clientscope)) if module.check_mode: + # We can only compare the current clientscope with the proposed updates we have + before_norm = normalise_cr(before_clientscope, remove_ids=True) + desired_norm = normalise_cr(desired_clientscope, remove_ids=True) + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_norm), + after=sanitize_cr(desired_norm)) + result['changed'] = not is_struct_included(desired_norm, before_norm) module.exit_json(**result) # do the update diff --git a/plugins/modules/keycloak_clientscope_type.py b/plugins/modules/keycloak_clientscope_type.py index 37a5d3be94..055bb053c5 100644 --- a/plugins/modules/keycloak_clientscope_type.py +++ b/plugins/modules/keycloak_clientscope_type.py @@ -40,7 +40,7 @@ options: client_id: description: - - The O(client_id) of the client. If not set the clientscop types are set as a default for the realm. + - The O(client_id) of the client. If not set the clientscope types are set as a default for the realm. aliases: - clientId type: str @@ -190,6 +190,15 @@ def extract_field(dictionary, field='name'): return [cs[field] for cs in dictionary] +def normalize_scopes(scopes): + scopes_copy = scopes.copy() + if isinstance(scopes_copy.get('default_clientscopes'), list): + scopes_copy['default_clientscopes'] = sorted(scopes_copy['default_clientscopes']) + if isinstance(scopes_copy.get('optional_clientscopes'), list): + scopes_copy['optional_clientscopes'] = sorted(scopes_copy['optional_clientscopes']) + return scopes_copy + + def main(): """ Module keycloak_clientscope_type @@ -244,10 +253,7 @@ def main(): }) if module._diff: - result['diff'] = dict(before=result['existing'], after=result['proposed']) - - if module.check_mode: - module.exit_json(**result) + result['diff'] = dict(before=normalize_scopes(result['existing']), after=normalize_scopes(result['proposed'])) default_clientscopes_add = clientscopes_to_add(default_clientscopes_existing, default_clientscopes_real) optional_clientscopes_add = clientscopes_to_add(optional_clientscopes_existing, optional_clientscopes_real) @@ -255,6 +261,13 @@ def main(): default_clientscopes_delete = clientscopes_to_delete(default_clientscopes_existing, default_clientscopes_real) optional_clientscopes_delete = clientscopes_to_delete(optional_clientscopes_existing, optional_clientscopes_real) + result["changed"] = any(len(x) > 0 for x in [ + default_clientscopes_add, optional_clientscopes_add, default_clientscopes_delete, optional_clientscopes_delete + ]) + + if module.check_mode: + module.exit_json(**result) + # first delete so clientscopes can change type for clientscope in default_clientscopes_delete: kc.delete_default_clientscope(clientscope['id'], realm, client_id) @@ -266,13 +279,6 @@ def main(): for clientscope in optional_clientscopes_add: kc.add_optional_clientscope(clientscope['id'], realm, client_id) - result["changed"] = ( - len(default_clientscopes_add) > 0 - or len(optional_clientscopes_add) > 0 - or len(default_clientscopes_delete) > 0 - or len(optional_clientscopes_delete) > 0 - ) - result['end_state'].update({ 'default_clientscopes': extract_field(kc.get_default_clientscopes(realm, client_id)), 'optional_clientscopes': extract_field(kc.get_optional_clientscopes(realm, client_id)) diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py index 2eca3a06d2..609673653b 100644 --- a/plugins/modules/keycloak_identity_provider.py +++ b/plugins/modules/keycloak_identity_provider.py @@ -445,6 +445,15 @@ def get_identity_provider_with_mappers(kc, alias, realm): idp = kc.get_identity_provider(alias, realm) if idp is not None: idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name')) + # clientSecret returned by API when using `get_identity_provider(alias, realm)` is always ********** + # to detect changes to the secret, we get the actual cleartext secret from the full realm info + if 'config' in idp: + if 'clientSecret' in idp['config']: + for idp_from_realm in kc.get_realm_by_id(realm).get('identityProviders', []): + if idp_from_realm['internalId'] == idp['internalId']: + cleartext_secret = idp_from_realm.get('config', {}).get('clientSecret') + if cleartext_secret: + idp['config']['clientSecret'] = cleartext_secret if idp is None: idp = {} return idp @@ -525,7 +534,7 @@ def main(): # special handling of mappers list to allow change detection if module.params.get('mappers') is not None: for change in module.params['mappers']: - change = dict((k, v) for k, v in change.items() if change[k] is not None) + change = {k: v for k, v in change.items() if v is not None} if change.get('id') is None and change.get('name') is None: module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') if before_idp == dict(): diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py index 6128c9e4c7..9bbcdb6b1a 100644 --- a/plugins/modules/keycloak_realm.py +++ b/plugins/modules/keycloak_realm.py @@ -803,7 +803,7 @@ def main(): if module._diff: result['diff'] = dict(before=sanitize_cr(before_norm), after=sanitize_cr(desired_norm)) - result['changed'] = (before_realm != desired_realm) + result['changed'] = (before_norm != desired_norm) module.exit_json(**result) diff --git a/plugins/modules/keycloak_realm_key.py b/plugins/modules/keycloak_realm_key.py index 6e762fba9d..edc8a6068e 100644 --- a/plugins/modules/keycloak_realm_key.py +++ b/plugins/modules/keycloak_realm_key.py @@ -68,7 +68,7 @@ options: type: bool parent_id: description: - - The parent_id of the realm key. In practice the ID (name) of the realm. + - The parent_id of the realm key. In practice the name of the realm. type: str required: true provider_id: @@ -300,7 +300,7 @@ def main(): kc = KeycloakAPI(module, connection_header) - params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "force"] + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "force", "parent_id"] # Filter and map the parameters names that apply to the role component_params = [x for x in module.params @@ -371,7 +371,7 @@ def main(): parent_id = module.params.get('parent_id') # Get a list of all Keycloak components that are of keyprovider type. - realm_keys = kc.get_components(urlencode(dict(type=provider_type, parent=parent_id)), parent_id) + realm_keys = kc.get_components(urlencode(dict(type=provider_type)), parent_id) # If this component is present get its key ID. Confusingly the key ID is # also known as the Provider ID. diff --git a/plugins/modules/keycloak_realm_keys_metadata_info.py b/plugins/modules/keycloak_realm_keys_metadata_info.py new file mode 100644 index 0000000000..ef4048b891 --- /dev/null +++ b/plugins/modules/keycloak_realm_keys_metadata_info.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: keycloak_realm_keys_metadata_info + +short_description: Allows obtaining Keycloak realm keys metadata via Keycloak API + +version_added: 9.3.0 + +description: + - This module allows you to get Keycloak realm keys metadata via the Keycloak REST API. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). + +options: + realm: + type: str + description: + - They Keycloak realm to fetch keys metadata. + default: 'master' + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + - community.general.attributes.info_module + +author: + - Thomas Bach (@thomasbach-dev) +""" + +EXAMPLES = """ +- name: Fetch Keys metadata + community.general.keycloak_realm_keys_metadata_info: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + delegate_to: localhost + register: keycloak_keys_metadata + +- name: Write the Keycloak keys certificate into a file + ansible.builtin.copy: + dest: /tmp/keycloak.cert + content: | + {{ keys_metadata['keycloak_keys_metadata']['keys'] + | selectattr('algorithm', 'equalto', 'RS256') + | map(attribute='certificate') + | first + }} + delegate_to: localhost +""" + +RETURN = """ +msg: + description: Message as to what action was taken. + returned: always + type: str + +keys_metadata: + description: + + - Representation of the realm keys metadata (see + U(https://www.keycloak.org/docs-api/latest/rest-api/index.html#KeysMetadataRepresentation)). + + returned: always + type: dict + contains: + active: + description: A mapping (that is, a dict) from key algorithms to UUIDs. + type: dict + returned: always + keys: + description: A list of dicts providing detailed information on the keys. + type: list + elements: dict + returned: always +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token, keycloak_argument_spec) + + +def main(): + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(default="master"), + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([["token", "auth_realm", "auth_username", "auth_password"]]), + required_together=([["auth_realm", "auth_username", "auth_password"]]), + ) + + result = dict(changed=False, msg="", keys_metadata="") + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get("realm") + + keys_metadata = kc.get_realm_keys_metadata_by_id(realm=realm) + + result["keys_metadata"] = keys_metadata + result["msg"] = "Get realm keys metadata successful for ID {realm}".format( + realm=realm + ) + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index f87ef936ce..215aa7f4ca 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -85,6 +85,32 @@ options: - parentId type: str + remove_unspecified_mappers: + description: + - Remove mappers that are not specified in the configuration for this federation. + - Set to V(false) to keep mappers that are not listed in O(mappers). + type: bool + default: true + version_added: 9.4.0 + + bind_credential_update_mode: + description: + - The value of the config parameter O(config.bindCredential) is redacted in the Keycloak responses. + Comparing the redacted value with the desired value always evaluates to not equal. This means + the before and desired states are never equal if the parameter is set. + - Set to V(always) to include O(config.bindCredential) in the comparison of before and desired state. + Because of the redacted value returned by Keycloak the module will always detect a change + and make an update if a O(config.bindCredential) value is set. + - Set to V(only_indirect) to exclude O(config.bindCredential) when comparing the before state with the + desired state. The value of O(config.bindCredential) will only be updated if there are other changes + to the user federation that require an update. + type: str + default: always + choices: + - always + - only_indirect + version_added: 9.5.0 + config: description: - Dict specifying the configuration options for the provider; the contents differ depending on @@ -434,6 +460,17 @@ options: - Max lifespan of cache entry in milliseconds. type: int + referral: + description: + - Specifies if LDAP referrals should be followed or ignored. Please note that enabling + referrals can slow down authentication as it allows the LDAP server to decide which other + LDAP servers to use. This could potentially include untrusted servers. + type: str + choices: + - ignore + - follow + version_added: 9.5.0 + mappers: description: - A list of dicts defining mappers associated with this Identity Provider. @@ -713,19 +750,27 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode from copy import deepcopy +def normalize_kc_comp(comp): + if 'config' in comp: + # kc completely removes the parameter `krbPrincipalAttribute` if it is set to `''`; the unset kc parameter is equivalent to `''`; + # to make change detection and diff more accurate we set it again in the kc responses + if 'krbPrincipalAttribute' not in comp['config']: + comp['config']['krbPrincipalAttribute'] = [''] + + # kc stores a timestamp of the last sync in `lastSync` to time the periodic sync, it is removed to minimize diff/changes + comp['config'].pop('lastSync', None) + + def sanitize(comp): compcopy = deepcopy(comp) if 'config' in compcopy: - compcopy['config'] = dict((k, v[0]) for k, v in compcopy['config'].items()) + compcopy['config'] = {k: v[0] for k, v in compcopy['config'].items()} if 'bindCredential' in compcopy['config']: compcopy['config']['bindCredential'] = '**********' - # an empty string is valid for krbPrincipalAttribute but is filtered out in diff - if 'krbPrincipalAttribute' not in compcopy['config']: - compcopy['config']['krbPrincipalAttribute'] = '' if 'mappers' in compcopy: for mapper in compcopy['mappers']: if 'config' in mapper: - mapper['config'] = dict((k, v[0]) for k, v in mapper['config'].items()) + mapper['config'] = {k: v[0] for k, v in mapper['config'].items()} return compcopy @@ -772,6 +817,7 @@ def main(): priority=dict(type='int', default=0), rdnLDAPAttribute=dict(type='str'), readTimeout=dict(type='int'), + referral=dict(type='str', choices=['ignore', 'follow']), searchScope=dict(type='str', choices=['1', '2'], default='1'), serverPrincipal=dict(type='str'), krbPrincipalAttribute=dict(type='str'), @@ -808,6 +854,8 @@ def main(): provider_id=dict(type='str', aliases=['providerId']), provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'), parent_id=dict(type='str', aliases=['parentId']), + remove_unspecified_mappers=dict(type='bool', default=True), + bind_credential_update_mode=dict(type='str', default='always', choices=['always', 'only_indirect']), mappers=dict(type='list', elements='dict', options=mapper_spec), ) @@ -838,19 +886,26 @@ def main(): # Keycloak API expects config parameters to be arrays containing a single string element if config is not None: - module.params['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) - for k, v in config.items() if config[k] is not None) + module.params['config'] = { + k: [str(v).lower() if not isinstance(v, str) else v] + for k, v in config.items() + if config[k] is not None + } if mappers is not None: for mapper in mappers: if mapper.get('config') is not None: - mapper['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) - for k, v in mapper['config'].items() if mapper['config'][k] is not None) + mapper['config'] = { + k: [str(v).lower() if not isinstance(v, str) else v] + for k, v in mapper['config'].items() + if mapper['config'][k] is not None + } # Filter and map the parameters names that apply comp_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and - module.params.get(x) is not None] + if x not in list(keycloak_argument_spec().keys()) + + ['state', 'realm', 'mappers', 'remove_unspecified_mappers', 'bind_credential_update_mode'] + and module.params.get(x) is not None] # See if it already exists in Keycloak if cid is None: @@ -868,7 +923,9 @@ def main(): # if user federation exists, get associated mappers if cid is not None and before_comp: - before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name')) + before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') + + normalize_kc_comp(before_comp) # Build a proposed changeset from parameters given to this module changeset = {} @@ -877,7 +934,7 @@ def main(): new_param_value = module.params.get(param) old_value = before_comp[camel(param)] if camel(param) in before_comp else None if param == 'mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] if new_param_value != old_value: changeset[camel(param)] = new_param_value @@ -886,17 +943,17 @@ def main(): if module.params['provider_id'] in ['kerberos', 'sssd']: module.fail_json(msg='Cannot configure mappers for {type} provider.'.format(type=module.params['provider_id'])) for change in module.params['mappers']: - change = dict((k, v) for k, v in change.items() if change[k] is not None) + change = {k: v for k, v in change.items() if v is not None} if change.get('id') is None and change.get('name') is None: module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') if cid is None: old_mapper = {} elif change.get('id') is not None: - old_mapper = kc.get_component(change['id'], realm) + old_mapper = next((before_mapper for before_mapper in before_comp.get('mappers', []) if before_mapper["id"] == change['id']), None) if old_mapper is None: old_mapper = {} else: - found = kc.get_components(urlencode(dict(parent=cid, name=change['name'])), realm) + found = [before_mapper for before_mapper in before_comp.get('mappers', []) if before_mapper['name'] == change['name']] if len(found) > 1: module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=change['name'])) if len(found) == 1: @@ -905,10 +962,16 @@ def main(): old_mapper = {} new_mapper = old_mapper.copy() new_mapper.update(change) - if new_mapper != old_mapper: - if changeset.get('mappers') is None: - changeset['mappers'] = list() - changeset['mappers'].append(new_mapper) + # changeset contains all desired mappers: those existing, to update or to create + if changeset.get('mappers') is None: + changeset['mappers'] = list() + changeset['mappers'].append(new_mapper) + changeset['mappers'] = sorted(changeset['mappers'], key=lambda x: x.get('name') or '') + + # to keep unspecified existing mappers we add them to the desired mappers list, unless they're already present + if not module.params['remove_unspecified_mappers'] and 'mappers' in before_comp: + changeset_mapper_ids = [mapper['id'] for mapper in changeset['mappers'] if 'id' in mapper] + changeset['mappers'].extend([mapper for mapper in before_comp['mappers'] if mapper['id'] not in changeset_mapper_ids]) # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) desired_comp = before_comp.copy() @@ -931,50 +994,68 @@ def main(): # Process a creation result['changed'] = True - if module._diff: - result['diff'] = dict(before='', after=sanitize(desired_comp)) - if module.check_mode: + if module._diff: + result['diff'] = dict(before='', after=sanitize(desired_comp)) module.exit_json(**result) # create it - desired_comp = desired_comp.copy() - updated_mappers = desired_comp.pop('mappers', []) + desired_mappers = desired_comp.pop('mappers', []) after_comp = kc.create_component(desired_comp, realm) - cid = after_comp['id'] + updated_mappers = [] + # when creating a user federation, keycloak automatically creates default mappers + default_mappers = kc.get_components(urlencode(dict(parent=cid)), realm) - for mapper in updated_mappers: - found = kc.get_components(urlencode(dict(parent=cid, name=mapper['name'])), realm) + # create new mappers or update existing default mappers + for desired_mapper in desired_mappers: + found = [default_mapper for default_mapper in default_mappers if default_mapper['name'] == desired_mapper['name']] if len(found) > 1: - module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=mapper['name'])) + module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=desired_mapper['name'])) if len(found) == 1: old_mapper = found[0] else: old_mapper = {} new_mapper = old_mapper.copy() - new_mapper.update(mapper) + new_mapper.update(desired_mapper) if new_mapper.get('id') is not None: kc.update_component(new_mapper, realm) + updated_mappers.append(new_mapper) else: if new_mapper.get('parentId') is None: - new_mapper['parentId'] = after_comp['id'] - mapper = kc.create_component(new_mapper, realm) + new_mapper['parentId'] = cid + updated_mappers.append(kc.create_component(new_mapper, realm)) - after_comp['mappers'] = updated_mappers + if module.params['remove_unspecified_mappers']: + # we remove all unwanted default mappers + # we use ids so we dont accidently remove one of the previously updated default mapper + for default_mapper in default_mappers: + if not default_mapper['id'] in [x['id'] for x in updated_mappers]: + kc.delete_component(default_mapper['id'], realm) + + after_comp['mappers'] = kc.get_components(urlencode(dict(parent=cid)), realm) + normalize_kc_comp(after_comp) + if module._diff: + result['diff'] = dict(before='', after=sanitize(after_comp)) result['end_state'] = sanitize(after_comp) - - result['msg'] = "User federation {id} has been created".format(id=after_comp['id']) + result['msg'] = "User federation {id} has been created".format(id=cid) module.exit_json(**result) else: if state == 'present': # Process an update + desired_copy = deepcopy(desired_comp) + before_copy = deepcopy(before_comp) + # exclude bindCredential when checking wether an update is required, therefore + # updating it only if there are other changes + if module.params['bind_credential_update_mode'] == 'only_indirect': + desired_copy.get('config', []).pop('bindCredential', None) + before_copy.get('config', []).pop('bindCredential', None) # no changes - if desired_comp == before_comp: + if desired_copy == before_copy: result['changed'] = False result['end_state'] = sanitize(desired_comp) result['msg'] = "No changes required to user federation {id}.".format(id=cid) @@ -990,22 +1071,33 @@ def main(): module.exit_json(**result) # do the update - desired_comp = desired_comp.copy() - updated_mappers = desired_comp.pop('mappers', []) + desired_mappers = desired_comp.pop('mappers', []) kc.update_component(desired_comp, realm) - after_comp = kc.get_component(cid, realm) - for mapper in updated_mappers: + for before_mapper in before_comp.get('mappers', []): + # remove unwanted existing mappers that will not be updated + if not before_mapper['id'] in [x['id'] for x in desired_mappers if 'id' in x]: + kc.delete_component(before_mapper['id'], realm) + + for mapper in desired_mappers: + if mapper in before_comp.get('mappers', []): + continue if mapper.get('id') is not None: kc.update_component(mapper, realm) else: if mapper.get('parentId') is None: mapper['parentId'] = desired_comp['id'] - mapper = kc.create_component(mapper, realm) - - after_comp['mappers'] = updated_mappers - result['end_state'] = sanitize(after_comp) + kc.create_component(mapper, realm) + after_comp = kc.get_component(cid, realm) + after_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') + normalize_kc_comp(after_comp) + after_comp_sanitized = sanitize(after_comp) + before_comp_sanitized = sanitize(before_comp) + result['end_state'] = after_comp_sanitized + if module._diff: + result['diff'] = dict(before=before_comp_sanitized, after=after_comp_sanitized) + result['changed'] = before_comp_sanitized != after_comp_sanitized result['msg'] = "User federation {id} has been updated".format(id=cid) module.exit_json(**result) diff --git a/plugins/modules/keycloak_userprofile.py b/plugins/modules/keycloak_userprofile.py new file mode 100644 index 0000000000..57e1c42e96 --- /dev/null +++ b/plugins/modules/keycloak_userprofile.py @@ -0,0 +1,738 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_userprofile + +short_description: Allows managing Keycloak User Profiles + +description: + - This module allows you to create, update, or delete Keycloak User Profiles via Keycloak API. You can also customize the "Unmanaged Attributes" with it. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/24.0.5/rest-api/index.html). + For compatibility reasons, the module also accepts the camelCase versions of the options. + +version_added: "9.4.0" + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the User Profile provider. + - On V(present), the User Profile provider will be created if it does not yet exist, or updated with + the parameters you provide. + - On V(absent), the User Profile provider will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + parent_id: + description: + - The parent ID of the realm key. In practice the ID (name) of the realm. + aliases: + - parentId + - realm + type: str + required: true + + provider_id: + description: + - The name of the provider ID for the key (supported value is V(declarative-user-profile)). + aliases: + - providerId + choices: ['declarative-user-profile'] + default: 'declarative-user-profile' + type: str + + provider_type: + description: + - Component type for User Profile (only supported value is V(org.keycloak.userprofile.UserProfileProvider)). + aliases: + - providerType + choices: ['org.keycloak.userprofile.UserProfileProvider'] + default: org.keycloak.userprofile.UserProfileProvider + type: str + + config: + description: + - The configuration of the User Profile Provider. + type: dict + required: false + suboptions: + kc_user_profile_config: + description: + - Define a declarative User Profile. See EXAMPLES for more context. + aliases: + - kcUserProfileConfig + type: list + elements: dict + suboptions: + attributes: + description: + - A list of attributes to be included in the User Profile. + type: list + elements: dict + suboptions: + name: + description: + - The name of the attribute. + type: str + required: true + + display_name: + description: + - The display name of the attribute. + aliases: + - displayName + type: str + required: true + + validations: + description: + - The validations to be applied to the attribute. + type: dict + suboptions: + length: + description: + - The length validation for the attribute. + type: dict + suboptions: + min: + description: + - The minimum length of the attribute. + type: int + max: + description: + - The maximum length of the attribute. + type: int + required: true + + email: + description: + - The email validation for the attribute. + type: dict + + username_prohibited_characters: + description: + - The prohibited characters validation for the username attribute. + type: dict + aliases: + - usernameProhibitedCharacters + + up_username_not_idn_homograph: + description: + - The validation to prevent IDN homograph attacks in usernames. + type: dict + aliases: + - upUsernameNotIdnHomograph + + person_name_prohibited_characters: + description: + - The prohibited characters validation for person name attributes. + type: dict + aliases: + - personNameProhibitedCharacters + + uri: + description: + - The URI validation for the attribute. + type: dict + + pattern: + description: + - The pattern validation for the attribute using regular expressions. + type: dict + + options: + description: + - Validation to ensure the attribute matches one of the provided options. + type: dict + + annotations: + description: + - Annotations for the attribute. + type: dict + + group: + description: + - Specifies the User Profile group where this attribute will be added. + type: str + + permissions: + description: + - The permissions for viewing and editing the attribute. + type: dict + suboptions: + view: + description: + - The roles that can view the attribute. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - admin + - user + + edit: + description: + - The roles that can edit the attribute. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - admin + - user + + multivalued: + description: + - Whether the attribute can have multiple values. + type: bool + default: false + + required: + description: + - The roles that require this attribute. + type: dict + suboptions: + roles: + description: + - The roles for which this attribute is required. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - user + + groups: + description: + - A list of attribute groups to be included in the User Profile. + type: list + elements: dict + suboptions: + name: + description: + - The name of the group. + type: str + required: true + + display_header: + description: + - The display header for the group. + aliases: + - displayHeader + type: str + required: true + + display_description: + description: + - The display description for the group. + aliases: + - displayDescription + type: str + required: false + + annotations: + description: + - The annotations included in the group. + type: dict + required: false + + unmanaged_attribute_policy: + description: + - Policy for unmanaged attributes. + aliases: + - unmanagedAttributePolicy + type: str + choices: + - ENABLED + - ADMIN_EDIT + - ADMIN_VIEW + +notes: + - Currently, only a single V(declarative-user-profile) entry is supported for O(provider_id) (design of the Keyckoak API). + However, there can be multiple O(config.kc_user_profile_config[].attributes[]) entries. + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Eike Waldt (@yeoldegrove) +''' + +EXAMPLES = ''' +- name: Create a Declarative User Profile with default settings + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - attributes: + - name: username + displayName: ${username} + validations: + length: + min: 3 + max: 255 + username_prohibited_characters: {} + up_username_not_idn_homograph: {} + annotations: {} + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: email + displayName: ${email} + validations: + email: {} + length: + max: 255 + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: firstName + displayName: ${firstName} + validations: + length: + max: 255 + person_name_prohibited_characters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: lastName + displayName: ${lastName} + validations: + length: + max: 255 + person_name_prohibited_characters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + groups: + - name: user-metadata + displayHeader: User metadata + displayDescription: Attributes, which refer to user metadata + annotations: {} + +- name: Delete a Keycloak User Profile Provider + keycloak_userprofile: + state: absent + parent_id: master + +# Unmanaged attributes are user attributes not explicitly defined in the User Profile +# configuration. By default, unmanaged attributes are "Disabled" and are not +# available from any context such as registration, account, and the +# administration console. By setting "Enabled", unmanaged attributes are fully +# recognized by the server and accessible through all contexts, useful if you are +# starting migrating an existing realm to the declarative User Profile +# and you don't have yet all user attributes defined in the User Profile configuration. +- name: Enable Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ENABLED + +# By setting "Only administrators can write", unmanaged attributes can be managed +# only through the administration console and API, useful if you have already +# defined any custom attribute that can be managed by users but you are unsure +# about adding other attributes that should only be managed by administrators. +- name: Enable ADMIN_EDIT on Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_EDIT + +# By setting `Only administrators can view`, unmanaged attributes are read-only +# and only available through the administration console and API. +- name: Enable ADMIN_VIEW on Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_VIEW +''' + +RETURN = ''' +msg: + description: The output message generated by the module. + returned: always + type: str + sample: UserProfileProvider created successfully +data: + description: The data returned by the Keycloak API. + returned: when state is present + type: dict + sample: {...} +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from copy import deepcopy +import json + + +def remove_null_values(data): + if isinstance(data, dict): + # Recursively remove null values from dictionaries + return {k: remove_null_values(v) for k, v in data.items() if v is not None} + elif isinstance(data, list): + # Recursively remove null values from lists + return [remove_null_values(item) for item in data if item is not None] + else: + # Return the data if it's neither a dictionary nor a list + return data + + +def camel_recursive(data): + if isinstance(data, dict): + # Convert keys to camelCase and apply recursively + return {camel(k): camel_recursive(v) for k, v in data.items()} + elif isinstance(data, list): + # Apply camelCase conversion to each item in the list + return [camel_recursive(item) for item in data] + else: + # Return the data as is if it's not a dict or list + return data + + +def main(): + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + parent_id=dict(type='str', aliases=['parentId', 'realm'], required=True), + provider_id=dict(type='str', aliases=['providerId'], default='declarative-user-profile', choices=['declarative-user-profile']), + provider_type=dict( + type='str', + aliases=['providerType'], + default='org.keycloak.userprofile.UserProfileProvider', + choices=['org.keycloak.userprofile.UserProfileProvider'] + ), + config=dict( + type='dict', + required=False, + options={ + 'kc_user_profile_config': dict( + type='list', + aliases=['kcUserProfileConfig'], + elements='dict', + options={ + 'attributes': dict( + type='list', + elements='dict', + required=False, + options={ + 'name': dict(type='str', required=True), + 'display_name': dict(type='str', aliases=['displayName'], required=True), + 'validations': dict( + type='dict', + options={ + 'length': dict( + type='dict', + options={ + 'min': dict(type='int', required=False), + 'max': dict(type='int', required=True) + } + ), + 'email': dict(type='dict', required=False), + 'username_prohibited_characters': dict(type='dict', aliases=['usernameProhibitedCharacters'], required=False), + 'up_username_not_idn_homograph': dict(type='dict', aliases=['upUsernameNotIdnHomograph'], required=False), + 'person_name_prohibited_characters': dict(type='dict', aliases=['personNameProhibitedCharacters'], required=False), + 'uri': dict(type='dict', required=False), + 'pattern': dict(type='dict', required=False), + 'options': dict(type='dict', required=False) + } + ), + 'annotations': dict(type='dict'), + 'group': dict(type='str'), + 'permissions': dict( + type='dict', + options={ + 'view': dict(type='list', elements='str', default=['admin', 'user']), + 'edit': dict(type='list', elements='str', default=['admin', 'user']) + } + ), + 'multivalued': dict(type='bool', default=False), + 'required': dict( + type='dict', + options={ + 'roles': dict(type='list', elements='str', default=['user']) + } + ) + } + ), + 'groups': dict( + type='list', + elements='dict', + options={ + 'name': dict(type='str', required=True), + 'display_header': dict(type='str', aliases=['displayHeader'], required=True), + 'display_description': dict(type='str', aliases=['displayDescription'], required=False), + 'annotations': dict(type='dict', required=False) + } + ), + 'unmanaged_attribute_policy': dict( + type='str', + aliases=['unmanagedAttributePolicy'], + choices=['ENABLED', 'ADMIN_EDIT', 'ADMIN_VIEW'], + required=False + ) + } + ) + } + ) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + # Initialize the result object. Only "changed" seems to have special + # meaning for Ansible. + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # This will include the current state of the realm userprofile if it is already + # present. This is only used for diff-mode. + before_realm_userprofile = {} + before_realm_userprofile['config'] = {} + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state"] + + # Filter and map the parameters names that apply to the role + component_params = [ + x + for x in module.params + if x not in params_to_ignore and module.params.get(x) is not None + ] + + # Build a proposed changeset from parameters given to this module + changeset = {} + + # Build the changeset with proper JSON serialization for kc_user_profile_config + config = module.params.get('config') + changeset['config'] = {} + + # Generate a JSON payload for Keycloak Admin API from the module + # parameters. Parameters that do not belong to the JSON payload (e.g. + # "state" or "auth_keycloal_url") have been filtered away earlier (see + # above). + # + # This loop converts Ansible module parameters (snake-case) into + # Keycloak-compatible format (camel-case). For example proider_id + # becomes providerId. It also handles some special cases, e.g. aliases. + for component_param in component_params: + # realm/parent_id parameter + if component_param == 'realm' or component_param == 'parent_id': + changeset['parent_id'] = module.params.get(component_param) + changeset.pop(component_param, None) + # complex parameters in config suboptions + elif component_param == 'config': + for config_param in config: + # special parameter kc_user_profile_config + if config_param in ('kcUserProfileConfig', 'kc_user_profile_config'): + config_param_org = config_param + # rename parameter to be accepted by Keycloak API + config_param = 'kc.user.profile.config' + # make sure no null values are passed to Keycloak API + kc_user_profile_config = remove_null_values(config[config_param_org]) + changeset[camel(component_param)][config_param] = [] + if len(kc_user_profile_config) > 0: + # convert aliases to camelCase + kc_user_profile_config = camel_recursive(kc_user_profile_config) + # rename validations to be accepted by Keycloak API + if 'attributes' in kc_user_profile_config[0]: + for attribute in kc_user_profile_config[0]['attributes']: + if 'validations' in attribute: + if 'usernameProhibitedCharacters' in attribute['validations']: + attribute['validations']['username-prohibited-characters'] = ( + attribute['validations'].pop('usernameProhibitedCharacters') + ) + if 'upUsernameNotIdnHomograph' in attribute['validations']: + attribute['validations']['up-username-not-idn-homograph'] = ( + attribute['validations'].pop('upUsernameNotIdnHomograph') + ) + if 'personNameProhibitedCharacters' in attribute['validations']: + attribute['validations']['person-name-prohibited-characters'] = ( + attribute['validations'].pop('personNameProhibitedCharacters') + ) + changeset[camel(component_param)][config_param].append(kc_user_profile_config[0]) + # usual camelCase parameters + else: + changeset[camel(component_param)][camel(config_param)] = [] + raw_value = module.params.get(component_param)[config_param] + if isinstance(raw_value, bool): + value = str(raw_value).lower() + else: + value = raw_value # Directly use the raw value + changeset[camel(component_param)][camel(config_param)].append(value) + # usual parameters + else: + new_param_value = module.params.get(component_param) + changeset[camel(component_param)] = new_param_value + + # Make it easier to refer to current module parameters + state = module.params.get('state') + enabled = module.params.get('enabled') + parent_id = module.params.get('parent_id') + provider_type = module.params.get('provider_type') + provider_id = module.params.get('provider_id') + + # Make a deep copy of the changeset. This is use when determining + # changes to the current state. + changeset_copy = deepcopy(changeset) + + # Get a list of all Keycloak components that are of userprofile provider type. + realm_userprofiles = kc.get_components(urlencode(dict(type=provider_type)), parent_id) + + # If this component is present get its userprofile ID. Confusingly the userprofile ID is + # also known as the Provider ID. + userprofile_id = None + + # Track individual parameter changes + changes = "" + + # This tells Ansible whether the userprofile was changed (added, removed, modified) + result['changed'] = False + + # Loop through the list of components. If we encounter a component whose + # name matches the value of the name parameter then assume the userprofile is + # already present. + for userprofile in realm_userprofiles: + if provider_id == "declarative-user-profile": + userprofile_id = userprofile['id'] + changeset['id'] = userprofile_id + changeset_copy['id'] = userprofile_id + + # keycloak returns kc.user.profile.config as a single JSON formatted string, so we have to deserialize it + if 'config' in userprofile and 'kc.user.profile.config' in userprofile['config']: + userprofile['config']['kc.user.profile.config'][0] = json.loads(userprofile['config']['kc.user.profile.config'][0]) + + # Compare top-level parameters + for param, value in changeset.items(): + before_realm_userprofile[param] = userprofile[param] + + if changeset_copy[param] != userprofile[param] and param != 'config': + changes += "%s: %s -> %s, " % (param, userprofile[param], changeset_copy[param]) + result['changed'] = True + + # Compare parameters under the "config" userprofile + for p, v in changeset_copy['config'].items(): + before_realm_userprofile['config'][p] = userprofile['config'][p] + if changeset_copy['config'][p] != userprofile['config'][p]: + changes += "config.%s: %s -> %s, " % (p, userprofile['config'][p], changeset_copy['config'][p]) + result['changed'] = True + + # Check all the possible states of the resource and do what is needed to + # converge current state with desired state (create, update or delete + # the userprofile). + + # keycloak expects kc.user.profile.config as a single JSON formatted string, so we have to serialize it + if 'config' in changeset and 'kc.user.profile.config' in changeset['config']: + changeset['config']['kc.user.profile.config'][0] = json.dumps(changeset['config']['kc.user.profile.config'][0]) + if userprofile_id and state == 'present': + if result['changed']: + if module._diff: + result['diff'] = dict(before=before_realm_userprofile, after=changeset_copy) + + if module.check_mode: + result['msg'] = "Userprofile %s would be changed: %s" % (provider_id, changes.strip(", ")) + else: + kc.update_component(changeset, parent_id) + result['msg'] = "Userprofile %s changed: %s" % (provider_id, changes.strip(", ")) + else: + result['msg'] = "Userprofile %s was in sync" % (provider_id) + + result['end_state'] = changeset_copy + elif userprofile_id and state == 'absent': + if module._diff: + result['diff'] = dict(before=before_realm_userprofile, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Userprofile %s would be deleted" % (provider_id) + else: + kc.delete_component(userprofile_id, parent_id) + result['changed'] = True + result['msg'] = "Userprofile %s deleted" % (provider_id) + + result['end_state'] = {} + elif not userprofile_id and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=changeset_copy) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Userprofile %s would be created" % (provider_id) + else: + kc.create_component(changeset, parent_id) + result['changed'] = True + result['msg'] = "Userprofile %s created" % (provider_id) + + result['end_state'] = changeset_copy + elif not userprofile_id and state == 'absent': + result['changed'] = False + result['msg'] = "Userprofile %s not present" % (provider_id) + result['end_state'] = {} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/launchd.py b/plugins/modules/launchd.py index e5942ea7cf..a6427bdb2f 100644 --- a/plugins/modules/launchd.py +++ b/plugins/modules/launchd.py @@ -514,7 +514,8 @@ def main(): result['status']['current_pid'] != result['status']['previous_pid']): result['changed'] = True if module.check_mode: - result['changed'] = True + if result['status']['current_state'] != action: + result['changed'] = True module.exit_json(**result) diff --git a/plugins/modules/linode.py b/plugins/modules/linode.py index 9e04ac63da..9b0dabdff2 100644 --- a/plugins/modules/linode.py +++ b/plugins/modules/linode.py @@ -670,7 +670,7 @@ def main(): backupwindow=backupwindow, ) - kwargs = dict((k, v) for k, v in check_items.items() if v is not None) + kwargs = {k: v for k, v in check_items.items() if v is not None} # setup the auth try: diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py index 0dd76c9ab4..8886cdc9cd 100644 --- a/plugins/modules/locale_gen.py +++ b/plugins/modules/locale_gen.py @@ -25,9 +25,11 @@ attributes: support: none options: name: - type: str + type: list + elements: str description: - - Name and encoding of the locale, such as "en_GB.UTF-8". + - Name and encoding of the locales, such as V(en_GB.UTF-8). + - Before community.general 9.3.0, this was a string. Using a string still works. required: true state: type: str @@ -44,6 +46,13 @@ EXAMPLES = ''' community.general.locale_gen: name: de_CH.UTF-8 state: present + +- name: Ensure multiple locales exist + community.general.locale_gen: + name: + - en_GB.UTF-8 + - nl_NL.UTF-8 + state: present ''' import os @@ -74,11 +83,12 @@ class LocaleGen(StateModuleHelper): output_params = ["name"] module = dict( argument_spec=dict( - name=dict(type='str', required=True), + name=dict(type="list", elements="str", required=True), state=dict(type='str', default='present', choices=['absent', 'present']), ), supports_check_mode=True, ) + use_old_vardict = False def __init_module__(self): self.vars.set("ubuntu_mode", False) @@ -90,9 +100,7 @@ class LocaleGen(StateModuleHelper): self.LOCALE_SUPPORTED, self.LOCALE_GEN )) - if not self.is_available(): - self.do_raise("The locale you've entered is not available on your system.") - + self.assert_available() self.vars.set("is_present", self.is_present(), output=False) self.vars.set("state_tracking", self._state_name(self.vars.is_present), output=False, change=True) @@ -103,8 +111,8 @@ class LocaleGen(StateModuleHelper): def _state_name(present): return "present" if present else "absent" - def is_available(self): - """Check if the given locale is available on the system. This is done by + def assert_available(self): + """Check if the given locales are available on the system. This is done by checking either : * if the locale is present in /etc/locales.gen * or if the locale is present in /usr/share/i18n/SUPPORTED""" @@ -120,18 +128,35 @@ class LocaleGen(StateModuleHelper): res = [re_compiled.match(line) for line in lines] if self.verbosity >= 4: self.vars.available_lines = lines - if any(r.group("locale") == self.vars.name for r in res if r): - return True + + locales_not_found = [] + for locale in self.vars.name: + # Check if the locale is not found in any of the matches + if not any(match and match.group("locale") == locale for match in res): + locales_not_found.append(locale) + # locale may be installed but not listed in the file, for example C.UTF-8 in some systems - return self.is_present() + locales_not_found = self.locale_get_not_present(locales_not_found) + + if locales_not_found: + self.do_raise("The following locales you've entered are not available on your system: {0}".format(', '.join(locales_not_found))) def is_present(self): + return not self.locale_get_not_present(self.vars.name) + + def locale_get_not_present(self, locales): runner = locale_runner(self.module) with runner() as ctx: rc, out, err = ctx.run() if self.verbosity >= 4: self.vars.locale_run_info = ctx.run_info - return any(self.fix_case(self.vars.name) == self.fix_case(line) for line in out.splitlines()) + + not_found = [] + for locale in locales: + if not any(self.fix_case(locale) == self.fix_case(line) for line in out.splitlines()): + not_found.append(locale) + + return not_found def fix_case(self, name): """locale -a might return the encoding in either lower or upper case. @@ -140,39 +165,50 @@ class LocaleGen(StateModuleHelper): name = name.replace(s, r) return name - def set_locale(self, name, enabled=True): + def set_locale(self, names, enabled=True): """ Sets the state of the locale. Defaults to enabled. """ - search_string = r'#?\s*%s (?P.+)' % re.escape(name) - if enabled: - new_string = r'%s \g' % (name) - else: - new_string = r'# %s \g' % (name) - re_search = re.compile(search_string) - with open("/etc/locale.gen", "r") as fr: - lines = [re_search.sub(new_string, line) for line in fr] - with open("/etc/locale.gen", "w") as fw: - fw.write("".join(lines)) + with open("/etc/locale.gen", 'r') as fr: + lines = fr.readlines() - def apply_change(self, targetState, name): + locale_regexes = [] + + for name in names: + search_string = r'^#?\s*%s (?P.+)' % re.escape(name) + if enabled: + new_string = r'%s \g' % (name) + else: + new_string = r'# %s \g' % (name) + re_search = re.compile(search_string) + locale_regexes.append([re_search, new_string]) + + for i in range(len(lines)): + for [search, replace] in locale_regexes: + lines[i] = search.sub(replace, lines[i]) + + # Write the modified content back to the file + with open("/etc/locale.gen", 'w') as fw: + fw.writelines(lines) + + def apply_change(self, targetState, names): """Create or remove locale. Keyword arguments: targetState -- Desired state, either present or absent. - name -- Name including encoding such as de_CH.UTF-8. + names -- Names list including encoding such as de_CH.UTF-8. """ - self.set_locale(name, enabled=(targetState == "present")) + self.set_locale(names, enabled=(targetState == "present")) runner = locale_gen_runner(self.module) with runner() as ctx: ctx.run() - def apply_change_ubuntu(self, targetState, name): + def apply_change_ubuntu(self, targetState, names): """Create or remove locale. Keyword arguments: targetState -- Desired state, either present or absent. - name -- Name including encoding such as de_CH.UTF-8. + names -- Name list including encoding such as de_CH.UTF-8. """ runner = locale_gen_runner(self.module) @@ -188,7 +224,7 @@ class LocaleGen(StateModuleHelper): with open("/var/lib/locales/supported.d/local", "w") as fw: for line in content: locale, charset = line.split(' ') - if locale != name: + if locale not in names: fw.write(line) # Purge locales and regenerate. # Please provide a patch if you know how to avoid regenerating the locales to keep! diff --git a/plugins/modules/lxc_container.py b/plugins/modules/lxc_container.py index 7ded041e93..2d768eaafd 100644 --- a/plugins/modules/lxc_container.py +++ b/plugins/modules/lxc_container.py @@ -683,11 +683,11 @@ class LxcContainerManagement(object): variables.pop(v, None) false_values = BOOLEANS_FALSE.union([None, '']) - result = dict( - (v, self.module.params[k]) + result = { + v: self.module.params[k] for k, v in variables.items() if self.module.params[k] not in false_values - ) + } return result def _config(self): diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py index b82e2be9b7..5c5d8a4d8d 100644 --- a/plugins/modules/lxd_container.py +++ b/plugins/modules/lxd_container.py @@ -400,7 +400,7 @@ EXAMPLES = ''' protocol: simplestreams type: image mode: pull - server: https://images.linuxcontainers.org + server: [...] # URL to the image server alias: debian/11 timeout: 600 ''' @@ -616,8 +616,15 @@ class LXDContainerManagement(object): def _instance_ipv4_addresses(self, ignore_devices=None): ignore_devices = ['lo'] if ignore_devices is None else ignore_devices data = (self._get_instance_state_json() or {}).get('metadata', None) or {} - network = dict((k, v) for k, v in (data.get('network', None) or {}).items() if k not in ignore_devices) - addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) + network = { + k: v + for k, v in (data.get('network') or {}).items() + if k not in ignore_devices + } + addresses = { + k: [a['address'] for a in v['addresses'] if a['family'] == 'inet'] + for k, v in network.items() + } return addresses @staticmethod @@ -748,19 +755,22 @@ class LXDContainerManagement(object): def run(self): """Run the main method.""" + def adjust_content(content): + return content if not isinstance(content, dict) else { + k: v for k, v in content.items() if not (self.ignore_volatile_options and k.startswith('volatile.')) + } + try: if self.trust_password is not None: self.client.authenticate(self.trust_password) self.ignore_volatile_options = self.module.params.get('ignore_volatile_options') self.old_instance_json = self._get_instance_json() - self.old_sections = dict( - (section, content) if not isinstance(content, dict) - else (section, dict((k, v) for k, v in content.items() - if not (self.ignore_volatile_options and k.startswith('volatile.')))) - for section, content in (self.old_instance_json.get('metadata', None) or {}).items() + self.old_sections = { + section: adjust_content(content) + for section, content in (self.old_instance_json.get('metadata') or {}).items() if section in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS) - ) + } self.diff['before']['instance'] = self.old_sections # preliminary, will be overwritten in _apply_instance_configs() if called diff --git a/plugins/modules/manageiq_provider.py b/plugins/modules/manageiq_provider.py index e6ded9ea7a..35c73a38b3 100644 --- a/plugins/modules/manageiq_provider.py +++ b/plugins/modules/manageiq_provider.py @@ -304,22 +304,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: auth_key: 'topSecret' @@ -330,22 +315,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- manageiq_connection: url: 'https://127.0.0.1:80' @@ -367,22 +337,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: auth_key: 'topSecret' @@ -392,22 +347,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- manageiq_connection: url: 'https://127.0.0.1' @@ -455,22 +395,7 @@ EXAMPLES = ''' validate_certs: true certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: hostname: 'metrics.example.com' @@ -480,22 +405,7 @@ EXAMPLES = ''' validate_certs: true certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- manageiq_connection: url: 'https://127.0.0.1' @@ -551,22 +461,7 @@ EXAMPLES = ''' validate_certs: 'true' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- ssh_keypair: hostname: director.example.com @@ -590,22 +485,7 @@ EXAMPLES = ''' validate_certs: 'true' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: role: amqp @@ -715,7 +595,7 @@ def delete_nulls(h): if isinstance(h, list): return [delete_nulls(i) for i in h] if isinstance(h, dict): - return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None) + return {k: delete_nulls(v) for k, v in h.items() if v is not None} return h diff --git a/plugins/modules/maven_artifact.py b/plugins/modules/maven_artifact.py index 0dc020c37a..e239b4a164 100644 --- a/plugins/modules/maven_artifact.py +++ b/plugins/modules/maven_artifact.py @@ -11,7 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: maven_artifact short_description: Downloads an Artifact from a Maven Repository @@ -22,7 +22,7 @@ description: author: "Chris Schmidt (@chrisisbeef)" requirements: - lxml - - boto if using a S3 repository (s3://...) + - boto if using a S3 repository (V(s3://...)) attributes: check_mode: support: none @@ -32,52 +32,52 @@ options: group_id: type: str description: - - The Maven groupId coordinate + - The Maven groupId coordinate. required: true artifact_id: type: str description: - - The maven artifactId coordinate + - The maven artifactId coordinate. required: true version: type: str description: - - The maven version coordinate + - The maven version coordinate. - Mutually exclusive with O(version_by_spec). version_by_spec: type: str description: - The maven dependency version ranges. - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution) - - The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported. + - The range type V((,1.0],[1.2,\)) and V((,1.1\),(1.1,\)) is not supported. - Mutually exclusive with O(version). version_added: '0.2.0' classifier: type: str description: - - The maven classifier coordinate + - The maven classifier coordinate. default: '' extension: type: str description: - - The maven type/extension coordinate + - The maven type/extension coordinate. default: jar repository_url: type: str description: - The URL of the Maven Repository to download from. - - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2. - - Use file://... if the repository is local, added in version 2.6 + - Use V(s3://...) if the repository is hosted on Amazon S3. + - Use V(file://...) if the repository is local. default: https://repo1.maven.org/maven2 username: type: str description: - - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3 + - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3. aliases: [ "aws_secret_key" ] password: type: str description: - - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3 + - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3. aliases: [ "aws_secret_access_key" ] headers: description: @@ -95,19 +95,19 @@ options: dest: type: path description: - - The path where the artifact should be written to - - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file + - The path where the artifact should be written to. + - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file. required: true state: type: str description: - - The desired state of the artifact + - The desired state of the artifact. default: present choices: [present,absent] timeout: type: int description: - - Specifies a timeout in seconds for the connection attempt + - Specifies a timeout in seconds for the connection attempt. default: 10 validate_certs: description: diff --git a/plugins/modules/memset_dns_reload.py b/plugins/modules/memset_dns_reload.py index 668c8c0bf3..8cff51ade1 100644 --- a/plugins/modules/memset_dns_reload.py +++ b/plugins/modules/memset_dns_reload.py @@ -178,9 +178,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) retvals = reload_dns(args) diff --git a/plugins/modules/memset_memstore_info.py b/plugins/modules/memset_memstore_info.py index c00ef15eb4..5dfd1f956a 100644 --- a/plugins/modules/memset_memstore_info.py +++ b/plugins/modules/memset_memstore_info.py @@ -163,9 +163,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) retvals = get_facts(args) diff --git a/plugins/modules/memset_server_info.py b/plugins/modules/memset_server_info.py index 78ea99df31..40862ae944 100644 --- a/plugins/modules/memset_server_info.py +++ b/plugins/modules/memset_server_info.py @@ -288,9 +288,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) retvals = get_facts(args) diff --git a/plugins/modules/memset_zone.py b/plugins/modules/memset_zone.py index f520d54460..e405ad3e86 100644 --- a/plugins/modules/memset_zone.py +++ b/plugins/modules/memset_zone.py @@ -300,9 +300,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) args['check_mode'] = module.check_mode # validate some API-specific limitations. diff --git a/plugins/modules/memset_zone_domain.py b/plugins/modules/memset_zone_domain.py index e07ac1ff02..7443e6c256 100644 --- a/plugins/modules/memset_zone_domain.py +++ b/plugins/modules/memset_zone_domain.py @@ -244,9 +244,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) args['check_mode'] = module.check_mode # validate some API-specific limitations. diff --git a/plugins/modules/memset_zone_record.py b/plugins/modules/memset_zone_record.py index 8406d93d21..349240b84e 100644 --- a/plugins/modules/memset_zone_record.py +++ b/plugins/modules/memset_zone_record.py @@ -181,6 +181,7 @@ def api_validation(args=None): https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) ''' failed_validation = False + error = None # priority can only be integer 0 > 999 if not 0 <= args['priority'] <= 999: @@ -373,9 +374,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) args['check_mode'] = module.check_mode # perform some Memset API-specific validation diff --git a/plugins/modules/mksysb.py b/plugins/modules/mksysb.py index 8272dbf7de..d1f49ca82e 100644 --- a/plugins/modules/mksysb.py +++ b/plugins/modules/mksysb.py @@ -10,15 +10,20 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ --- author: Kairo Araujo (@kairoaraujo) module: mksysb short_description: Generates AIX mksysb rootvg backups description: - - This module manages a basic AIX mksysb (image) of rootvg. +- This module manages a basic AIX mksysb (image) of rootvg. +seealso: +- name: C(mksysb) command manual page + description: Manual page for the command. + link: https://www.ibm.com/docs/en/aix/7.3?topic=m-mksysb-command + extends_documentation_fragment: - - community.general.attributes +- community.general.attributes attributes: check_mode: support: full @@ -27,72 +32,73 @@ attributes: options: backup_crypt_files: description: - - Backup encrypted files. + - Backup encrypted files. type: bool default: true backup_dmapi_fs: description: - - Back up DMAPI filesystem files. + - Back up DMAPI filesystem files. type: bool default: true create_map_files: description: - - Creates a new MAP files. + - Creates a new MAP files. type: bool default: false exclude_files: description: - - Excludes files using C(/etc/rootvg.exclude). + - Excludes files using C(/etc/rootvg.exclude). type: bool default: false exclude_wpar_files: description: - - Excludes WPAR files. + - Excludes WPAR files. type: bool default: false extended_attrs: description: - - Backup extended attributes. + - Backup extended attributes. type: bool default: true name: type: str description: - - Backup name + - Backup name required: true new_image_data: description: - - Creates a new file data. + - Creates a new file data. type: bool default: true software_packing: description: - - Exclude files from packing option listed in - C(/etc/exclude_packing.rootvg). + - Exclude files from packing option listed in C(/etc/exclude_packing.rootvg). type: bool default: false storage_path: type: str description: - - Storage path where the mksysb will stored. + - Storage path where the mksysb will stored. required: true use_snapshot: description: - - Creates backup using snapshots. + - Creates backup using snapshots. type: bool default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = """ +--- - name: Running a backup image mksysb community.general.mksysb: name: myserver storage_path: /repository/images exclude_files: true exclude_wpar_files: true -''' +""" -RETURN = ''' +RETURN = """ +--- changed: description: Return changed for mksysb actions as true or false. returned: always @@ -101,7 +107,7 @@ msg: description: Return message regarding the action. returned: always type: str -''' +""" import os @@ -138,6 +144,7 @@ class MkSysB(ModuleHelper): backup_dmapi_fs=cmd_runner_fmt.as_bool("-A"), combined_path=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda p, n: ["%s/%s" % (p, n)])), ) + use_old_vardict = False def __init_module__(self): if not os.path.isdir(self.vars.storage_path): diff --git a/plugins/modules/modprobe.py b/plugins/modules/modprobe.py index f271b3946f..3d6a7c2410 100644 --- a/plugins/modules/modprobe.py +++ b/plugins/modules/modprobe.py @@ -46,6 +46,7 @@ options: type: str choices: [ disabled, absent, present ] default: disabled + version_added: 7.0.0 description: - Persistency between reboots for configured module. - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent during reboots. @@ -163,8 +164,9 @@ class Modprobe(object): def create_module_file(self): file_path = os.path.join(MODULES_LOAD_LOCATION, self.name + '.conf') - with open(file_path, 'w') as file: - file.write(self.name + '\n') + if not self.check_mode: + with open(file_path, 'w') as file: + file.write(self.name + '\n') @property def module_options_file_content(self): @@ -175,8 +177,9 @@ class Modprobe(object): def create_module_options_file(self): new_file_path = os.path.join(PARAMETERS_FILES_LOCATION, self.name + '.conf') - with open(new_file_path, 'w') as file: - file.write(self.module_options_file_content) + if not self.check_mode: + with open(new_file_path, 'w') as file: + file.write(self.module_options_file_content) def disable_old_params(self): @@ -190,7 +193,7 @@ class Modprobe(object): file_content[index] = '#' + line content_changed = True - if content_changed: + if not self.check_mode and content_changed: with open(modprobe_file, 'w') as file: file.write('\n'.join(file_content)) @@ -206,7 +209,7 @@ class Modprobe(object): file_content[index] = '#' + line content_changed = True - if content_changed: + if not self.check_mode and content_changed: with open(module_file, 'w') as file: file.write('\n'.join(file_content)) diff --git a/plugins/modules/nagios.py b/plugins/modules/nagios.py index 783aa88e24..0f1f0b7c50 100644 --- a/plugins/modules/nagios.py +++ b/plugins/modules/nagios.py @@ -39,8 +39,6 @@ options: action: description: - Action to take. - - servicegroup options were added in 2.0. - - delete_downtime options were added in 2.2. - The V(acknowledge) and V(forced_check) actions were added in community.general 1.2.0. required: true choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py index 6f0884da92..e2803432a9 100644 --- a/plugins/modules/nmcli.py +++ b/plugins/modules/nmcli.py @@ -34,9 +34,11 @@ options: state: description: - Whether the device should exist or not, taking action if the state is different from what is stated. + - Using O(state=present) to create connection will automatically bring connection up. + - Using O(state=up) and O(state=down) will not modify connection with other parameters. These states have been added in community.general 9.5.0. type: str required: true - choices: [ absent, present ] + choices: [ absent, present, up, down ] autoconnect: description: - Whether the connection should start on boot. @@ -48,6 +50,13 @@ options: - The name used to call the connection. Pattern is [-][-]. type: str required: true + conn_reload: + description: + - Whether the connection should be reloaded if it was modified. + type: bool + required: false + default: false + version_added: 9.5.0 ifname: description: - The interface to bind the connection to. @@ -1309,6 +1318,25 @@ EXAMPLES = r''' type: ethernet state: present + - name: Change the property of a setting e.g. MTU and reload connection + community.general.nmcli: + conn_name: my-eth1 + mtu: 1500 + type: ethernet + state: present + conn_reload: true + + - name: Disable connection + community.general.nmcli: + conn_name: my-eth1 + state: down + + - name: Reload and enable connection + community.general.nmcli: + conn_name: my-eth1 + state: up + reload: true + - name: Add second ip4 address community.general.nmcli: conn_name: my-eth1 @@ -1581,6 +1609,7 @@ class Nmcli(object): self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] self.autoconnect = module.params['autoconnect'] self.conn_name = module.params['conn_name'] + self.conn_reload = module.params['conn_reload'] self.slave_type = module.params['slave_type'] self.master = module.params['master'] self.ifname = module.params['ifname'] @@ -1944,7 +1973,7 @@ class Nmcli(object): convert_func = self.list_to_string if callable(convert_func): - options[setting] = convert_func(options[setting]) + options[setting] = convert_func(value) return options @@ -2165,6 +2194,10 @@ class Nmcli(object): cmd = [self.nmcli_bin, 'con', 'up', self.conn_name] return self.execute_command(cmd) + def reload_connection(self): + cmd = [self.nmcli_bin, 'con', 'reload'] + return self.execute_command(cmd) + def connection_update(self, nmcli_command): if nmcli_command == 'create': cmd = [self.nmcli_bin, 'con', 'add', 'type'] @@ -2431,8 +2464,9 @@ def main(): argument_spec=dict( ignore_unsupported_suboptions=dict(type='bool', default=False), autoconnect=dict(type='bool', default=True), - state=dict(type='str', required=True, choices=['absent', 'present']), + state=dict(type='str', required=True, choices=['absent', 'present', 'up', 'down']), conn_name=dict(type='str', required=True), + conn_reload=dict(type='bool', default=False), master=dict(type='str'), slave_type=dict(type='str', choices=['bond', 'bridge', 'team', 'ovs-port']), ifname=dict(type='str'), @@ -2639,6 +2673,8 @@ def main(): if module.check_mode: module.exit_json(changed=True, **result) (rc, out, err) = nmcli.modify_connection() + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() else: result['Exists'] = 'Connections already exist and no changes made' if module.check_mode: @@ -2650,6 +2686,27 @@ def main(): (rc, out, err) = nmcli.create_connection() if rc is not None and rc != 0: module.fail_json(name=nmcli.conn_name, msg=err, rc=rc) + + elif nmcli.state == 'up': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() + (rc, out, err) = nmcli.up_connection() + if rc != 0: + module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) + + elif nmcli.state == 'down': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() + (rc, out, err) = nmcli.down_connection() + if rc != 0: + module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) + except NmcliModuleError as e: module.fail_json(name=nmcli.conn_name, msg=str(e)) diff --git a/plugins/modules/npm.py b/plugins/modules/npm.py index e6dc0b772a..a906b2c127 100644 --- a/plugins/modules/npm.py +++ b/plugins/modules/npm.py @@ -96,6 +96,12 @@ options: type: bool default: false version_added: 2.5.0 + force: + description: + - Use the C(--force) flag when installing. + type: bool + default: false + version_added: 9.5.0 requirements: - npm installed in bin path (recommended /usr/local/bin) ''' @@ -117,6 +123,11 @@ EXAMPLES = r''' name: coffee-script global: true +- name: Force Install "coffee-script" node.js package. + community.general.npm: + name: coffee-script + force: true + - name: Remove the globally package "coffee-script". community.general.npm: name: coffee-script @@ -167,6 +178,7 @@ class Npm(object): self.state = kwargs['state'] self.no_optional = kwargs['no_optional'] self.no_bin_links = kwargs['no_bin_links'] + self.force = kwargs['force'] if kwargs['executable']: self.executable = kwargs['executable'].split(' ') @@ -191,6 +203,7 @@ class Npm(object): registry=cmd_runner_fmt.as_opt_val('--registry'), no_optional=cmd_runner_fmt.as_bool('--no-optional'), no_bin_links=cmd_runner_fmt.as_bool('--no-bin-links'), + force=cmd_runner_fmt.as_bool('--force'), ) ) @@ -212,7 +225,7 @@ class Npm(object): params['name_version'] = self.name_version if add_package_name else None with self.runner( - "exec_args global_ production ignore_scripts unsafe_perm name_version registry no_optional no_bin_links", + "exec_args global_ production ignore_scripts unsafe_perm name_version registry no_optional no_bin_links force", check_rc=check_rc, cwd=cwd ) as ctx: rc, out, err = ctx.run(**params) @@ -289,6 +302,7 @@ def main(): ci=dict(default=False, type='bool'), no_optional=dict(default=False, type='bool'), no_bin_links=dict(default=False, type='bool'), + force=dict(default=False, type='bool'), ) arg_spec['global'] = dict(default=False, type='bool') module = AnsibleModule( @@ -318,7 +332,8 @@ def main(): unsafe_perm=module.params['unsafe_perm'], state=state, no_optional=module.params['no_optional'], - no_bin_links=module.params['no_bin_links']) + no_bin_links=module.params['no_bin_links'], + force=module.params['force']) changed = False if module.params['ci']: diff --git a/plugins/modules/nsupdate.py b/plugins/modules/nsupdate.py index 63750165ca..c9a6ba2133 100644 --- a/plugins/modules/nsupdate.py +++ b/plugins/modules/nsupdate.py @@ -370,7 +370,8 @@ class RecordManager(object): except (socket_error, dns.exception.Timeout) as e: self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) - entries_to_remove = [n.to_text() for n in lookup.answer[0].items if n.to_text() not in self.value] + lookup_result = lookup.answer[0] if lookup.answer else lookup.authority[0] + entries_to_remove = [n.to_text() for n in lookup_result.items if n.to_text() not in self.value] else: update.delete(self.module.params['record'], self.module.params['type']) diff --git a/plugins/modules/one_host.py b/plugins/modules/one_host.py index eea1121733..6188f3d0f7 100644 --- a/plugins/modules/one_host.py +++ b/plugins/modules/one_host.py @@ -152,16 +152,19 @@ class HostModule(OpenNebulaModule): def allocate_host(self): """ Creates a host entry in OpenNebula + self.one.host.allocate returns ID of a host Returns: True on success, fails otherwise. """ - if not self.one.host.allocate(self.get_parameter('name'), - self.get_parameter('vmm_mad_name'), - self.get_parameter('im_mad_name'), - self.get_parameter('cluster_id')): - self.fail(msg="could not allocate host") - else: + try: + self.one.host.allocate(self.get_parameter('name'), + self.get_parameter('vmm_mad_name'), + self.get_parameter('im_mad_name'), + self.get_parameter('cluster_id')) self.result['changed'] = True + except Exception as e: + self.fail(msg="Could not allocate host, ERROR: " + str(e)) + return True def wait_for_host_state(self, host, target_states): @@ -221,11 +224,13 @@ class HostModule(OpenNebulaModule): if current_state == HOST_ABSENT: self.fail(msg='absent host cannot be put in disabled state') elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]: - if one.host.status(host.ID, HOST_STATUS.DISABLED): - self.wait_for_host_state(host, [HOST_STATES.DISABLED]) + # returns host ID integer + try: + one.host.status(host.ID, HOST_STATUS.DISABLED) result['changed'] = True - else: - self.fail(msg="could not disable host") + except Exception as e: + self.fail(msg="Could not disable host, ERROR: " + str(e)) + self.wait_for_host_state(host, [HOST_STATES.DISABLED]) elif current_state in [HOST_STATES.DISABLED]: pass else: @@ -235,11 +240,13 @@ class HostModule(OpenNebulaModule): if current_state == HOST_ABSENT: self.fail(msg='absent host cannot be placed in offline state') elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]: - if one.host.status(host.ID, HOST_STATUS.OFFLINE): - self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) + # returns host ID integer + try: + one.host.status(host.ID, HOST_STATUS.OFFLINE) result['changed'] = True - else: - self.fail(msg="could not set host offline") + except Exception as e: + self.fail(msg="Could not set host offline, ERROR: " + str(e)) + self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) elif current_state in [HOST_STATES.OFFLINE]: pass else: @@ -247,10 +254,12 @@ class HostModule(OpenNebulaModule): elif desired_state == 'absent': if current_state != HOST_ABSENT: - if one.host.delete(host.ID): + # returns host ID integer + try: + one.host.delete(host.ID) result['changed'] = True - else: - self.fail(msg="could not delete host from cluster") + except Exception as e: + self.fail(msg="Could not delete host from cluster, ERROR: " + str(e)) # if we reach this point we can assume that the host was taken to the desired state @@ -268,17 +277,21 @@ class HostModule(OpenNebulaModule): if self.requires_template_update(host.TEMPLATE, desired_template_changes): # setup the root element so that pyone will generate XML instead of attribute vector desired_template_changes = {"TEMPLATE": desired_template_changes} - if one.host.update(host.ID, desired_template_changes, 1): # merge the template + # merge the template, returns host ID integer + try: + one.host.update(host.ID, desired_template_changes, 1) result['changed'] = True - else: - self.fail(msg="failed to update the host template") + except Exception as e: + self.fail(msg="Failed to update the host template, ERROR: " + str(e)) # the cluster if host.CLUSTER_ID != self.get_parameter('cluster_id'): - if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID): + # returns cluster id in int + try: + one.cluster.addhost(self.get_parameter('cluster_id'), host.ID) result['changed'] = True - else: - self.fail(msg="failed to update the host cluster") + except Exception as e: + self.fail(msg="Failed to update the host cluster, ERROR: " + str(e)) # return self.exit() diff --git a/plugins/modules/one_image.py b/plugins/modules/one_image.py index a0081a0fe0..5877142cdf 100644 --- a/plugins/modules/one_image.py +++ b/plugins/modules/one_image.py @@ -17,6 +17,7 @@ description: requirements: - pyone extends_documentation_fragment: + - community.general.opennebula - community.general.attributes attributes: check_mode: @@ -24,23 +25,6 @@ attributes: diff_mode: support: none options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the E(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the E(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the E(ONE_PASSWORD) environment variable is used. - type: str id: description: - A O(id) of the image you would like to manage. @@ -67,6 +51,11 @@ options: - A name that will be assigned to the existing or new image. - In the case of cloning, by default O(new_name) will take the name of the origin image with the prefix 'Copy of'. type: str + persistent: + description: + - Whether the image should be persistent or non-persistent. + type: bool + version_added: 9.5.0 author: - "Milan Ilic (@ilicmilan)" ''' @@ -92,6 +81,11 @@ EXAMPLES = ''' id: 37 enabled: false +- name: Make the IMAGE persistent + community.general.one_image: + id: 37 + persistent: true + - name: Enable the IMAGE by name community.general.one_image: name: bar-image @@ -114,300 +108,448 @@ RETURN = ''' id: description: image id type: int - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: 153 name: description: image name type: str - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: app1 group_id: description: image's group id type: int - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: 1 group_name: description: image's group name type: str - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: one-users owner_id: description: image's owner id type: int - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: 143 owner_name: description: image's owner name type: str - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: ansible-test state: description: state of image instance type: str - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: READY used: description: is image in use type: bool - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: true running_vms: description: count of running vms that use this image type: int - returned: success + returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: 7 +permissions: + description: The image's permissions. + type: dict + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 + contains: + owner_u: + description: The image's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The image's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The image's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The image's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The image's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The image's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The image's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The image's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The image's other users ADMIN permissions + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 +type: + description: The image's type. + type: str + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +disk_type: + description: The image's format type. + type: str + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +persistent: + description: The image's persistence status (1 means true, 0 means false). + type: int + sample: 1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +source: + description: The image's source. + type: str + sample: /var/lib/one//datastores/100/somerandomstringxd + returned: when O(state=present), O(state=cloned), or O(state=renamed) +path: + description: The image's filesystem path. + type: str + sample: /var/tmp/hello.qcow2 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +fstype: + description: The image's filesystem type. + type: str + sample: ext4 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +size: + description: The image's size in MegaBytes. + type: int + sample: 10000 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +cloning_ops: + description: The image's cloning operations per second. + type: int + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +cloning_id: + description: The image's cloning ID. + type: int + sample: -1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +target_snapshot: + description: The image's target snapshot. + type: int + sample: 1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +datastore_id: + description: The image's datastore ID. + type: int + sample: 100 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +datastore: + description: The image's datastore name. + type: int + sample: image_datastore + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +vms: + description: The image's list of vm ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +clones: + description: The image's list of clones ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +app_clones: + description: The image's list of app_clones ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +snapshots: + description: The image's list of snapshots. + type: list + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 + sample: + - date: 123123 + parent: 1 + size: 10228 + allow_orphans: 1 + children: 0 + active: 1 + name: SampleName ''' -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_image(module, client, predicate): - # Filter -2 means fetch all images user can Use - pool = client.imagepool.info(-2, -1, -1, -1) - - for image in pool.IMAGE: - if predicate(image): - return image - - return None - - -def get_image_by_name(module, client, image_name): - return get_image(module, client, lambda image: (image.NAME == image_name)) - - -def get_image_by_id(module, client, image_id): - return get_image(module, client, lambda image: (image.ID == image_id)) - - -def get_image_instance(module, client, requested_id, requested_name): - if requested_id: - return get_image_by_id(module, client, requested_id) - else: - return get_image_by_name(module, client, requested_name) +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] -def get_image_info(image): - info = { - 'id': image.ID, - 'name': image.NAME, - 'state': IMAGE_STATES[image.STATE], - 'running_vms': image.RUNNING_VMS, - 'used': bool(image.RUNNING_VMS), - 'user_name': image.UNAME, - 'user_id': image.UID, - 'group_name': image.GNAME, - 'group_id': image.GID, - } +class ImageModule(OpenNebulaModule): + def __init__(self): + argument_spec = dict( + id=dict(type='int', required=False), + name=dict(type='str', required=False), + state=dict(type='str', choices=['present', 'absent', 'cloned', 'renamed'], default='present'), + enabled=dict(type='bool', required=False), + new_name=dict(type='str', required=False), + persistent=dict(type='bool', required=False), + ) + required_if = [ + ['state', 'renamed', ['id']] + ] + mutually_exclusive = [ + ['id', 'name'], + ] - return info + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_if=required_if) + def run(self, one, module, result): + params = module.params + id = params.get('id') + name = params.get('name') + desired_state = params.get('state') + enabled = params.get('enabled') + new_name = params.get('new_name') + persistent = params.get('persistent') -def wait_for_state(module, client, image_id, wait_timeout, state_predicate): - import time - start_time = time.time() + self.result = {} + + image = self.get_image_instance(id, name) + if not image and desired_state != 'absent': + # Using 'if id:' doesn't work properly when id=0 + if id is not None: + module.fail_json(msg="There is no image with id=" + str(id)) + elif name is not None: + module.fail_json(msg="There is no image with name=" + name) + + if desired_state == 'absent': + self.result = self.delete_image(image) + else: + if persistent is not None: + self.result = self.change_persistence(image, persistent) + if enabled is not None: + self.result = self.enable_image(image, enabled) + if desired_state == "cloned": + self.result = self.clone_image(image, new_name) + elif desired_state == "renamed": + self.result = self.rename_image(image, new_name) + + self.exit() + + def get_image(self, predicate): + # Filter -2 means fetch all images user can Use + pool = self.one.imagepool.info(-2, -1, -1, -1) + + for image in pool.IMAGE: + if predicate(image): + return image + + return None + + def get_image_by_name(self, image_name): + return self.get_image(lambda image: (image.NAME == image_name)) + + def get_image_by_id(self, image_id): + return self.get_image(lambda image: (image.ID == image_id)) + + def get_image_instance(self, requested_id, requested_name): + # Using 'if requested_id:' doesn't work properly when requested_id=0 + if requested_id is not None: + return self.get_image_by_id(requested_id) + else: + return self.get_image_by_name(requested_name) + + def wait_for_ready(self, image_id, wait_timeout=60): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + image = self.one.image.info(image_id) + state = image.STATE + + if state in [IMAGE_STATES.index('ERROR')]: + self.module.fail_json(msg="Got an ERROR state: " + image.TEMPLATE['ERROR']) + + if state in [IMAGE_STATES.index('READY')]: + return True + + time.sleep(1) + self.module.fail_json(msg="Wait timeout has expired!") + + def wait_for_delete(self, image_id, wait_timeout=60): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + # It might be already deleted by the time this function is called + try: + image = self.one.image.info(image_id) + except Exception: + check_image = self.get_image_instance(image_id) + if not check_image: + return True + + state = image.STATE + + if state in [IMAGE_STATES.index('DELETE')]: + return True + + time.sleep(1) + + self.module.fail_json(msg="Wait timeout has expired!") + + def enable_image(self, image, enable): + image = self.one.image.info(image.ID) + changed = False - while (time.time() - start_time) < wait_timeout: - image = client.image.info(image_id) state = image.STATE - if state_predicate(state): - return image + if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if enable: + self.module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") + else: + self.module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") - time.sleep(1) + if ((enable and state != IMAGE_STATES.index('READY')) or + (not enable and state != IMAGE_STATES.index('DISABLED'))): + changed = True - module.fail_json(msg="Wait timeout has expired!") + if changed and not self.module.check_mode: + self.one.image.enable(image.ID, enable) + result = self.get_image_info(image) + result['changed'] = changed -def wait_for_ready(module, client, image_id, wait_timeout=60): - return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')])) - - -def wait_for_delete(module, client, image_id, wait_timeout=60): - return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')])) - - -def enable_image(module, client, image, enable): - image = client.image.info(image.ID) - changed = False - - state = image.STATE - - if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: - if enable: - module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") - else: - module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") - - if ((enable and state != IMAGE_STATES.index('READY')) or - (not enable and state != IMAGE_STATES.index('DISABLED'))): - changed = True - - if changed and not module.check_mode: - client.image.enable(image.ID, enable) - - result = get_image_info(image) - result['changed'] = changed - - return result - - -def clone_image(module, client, image, new_name): - if new_name is None: - new_name = "Copy of " + image.NAME - - tmp_image = get_image_by_name(module, client, new_name) - if tmp_image: - result = get_image_info(tmp_image) - result['changed'] = False return result - if image.STATE == IMAGE_STATES.index('DISABLED'): - module.fail_json(msg="Cannot clone DISABLED image") + def change_persistence(self, image, enable): + image = self.one.image.info(image.ID) + changed = False - if not module.check_mode: - new_id = client.image.clone(image.ID, new_name) - wait_for_ready(module, client, new_id) - image = client.image.info(new_id) + state = image.STATE - result = get_image_info(image) - result['changed'] = True + if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if enable: + self.module.fail_json(msg="Cannot enable persistence for " + IMAGE_STATES[state] + " image!") + else: + self.module.fail_json(msg="Cannot disable persistence for " + IMAGE_STATES[state] + " image!") - return result + if ((enable and state != IMAGE_STATES.index('READY')) or + (not enable and state != IMAGE_STATES.index('DISABLED'))): + changed = True + if changed and not self.module.check_mode: + self.one.image.persistent(image.ID, enable) -def rename_image(module, client, image, new_name): - if new_name is None: - module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") + result = self.get_image_info(image) + result['changed'] = changed - if new_name == image.NAME: - result = get_image_info(image) - result['changed'] = False return result - tmp_image = get_image_by_name(module, client, new_name) - if tmp_image: - module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID)) + def clone_image(self, image, new_name): + if new_name is None: + new_name = "Copy of " + image.NAME - if not module.check_mode: - client.image.rename(image.ID, new_name) + tmp_image = self.get_image_by_name(new_name) + if tmp_image: + result = self.get_image_info(image) + result['changed'] = False + return result - result = get_image_info(image) - result['changed'] = True - return result + if image.STATE == IMAGE_STATES.index('DISABLED'): + self.module.fail_json(msg="Cannot clone DISABLED image") + if not self.module.check_mode: + new_id = self.one.image.clone(image.ID, new_name) + self.wait_for_ready(new_id) + image = self.one.image.info(new_id) -def delete_image(module, client, image): + result = self.get_image_info(image) + result['changed'] = True - if not image: - return {'changed': False} + return result - if image.RUNNING_VMS > 0: - module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.") + def rename_image(self, image, new_name): + if new_name is None: + self.module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") - if not module.check_mode: - client.image.delete(image.ID) - wait_for_delete(module, client, image.ID) + if new_name == image.NAME: + result = self.get_image_info(image) + result['changed'] = False + return result - return {'changed': True} + tmp_image = self.get_image_by_name(new_name) + if tmp_image: + self.module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID)) + if not self.module.check_mode: + self.one.image.rename(image.ID, new_name) -def get_connection_info(module): + result = self.get_image_info(image) + result['changed'] = True + return result - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') + def delete_image(self, image): + if not image: + return {'changed': False} - if not url: - url = os.environ.get('ONE_URL') + if image.RUNNING_VMS > 0: + self.module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.") - if not username: - username = os.environ.get('ONE_USERNAME') + if not self.module.check_mode: + self.one.image.delete(image.ID) + self.wait_for_delete(image.ID) - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not (url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) + return {'changed': True} def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "id": {"required": False, "type": "int"}, - "name": {"required": False, "type": "str"}, - "state": { - "default": "present", - "choices": ['present', 'absent', 'cloned', 'renamed'], - "type": "str" - }, - "enabled": {"required": False, "type": "bool"}, - "new_name": {"required": False, "type": "str"}, - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[['id', 'name']], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - id = params.get('id') - name = params.get('name') - state = params.get('state') - enabled = params.get('enabled') - new_name = params.get('new_name') - client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - result = {} - - if not id and state == 'renamed': - module.fail_json(msg="Option 'id' is required when the state is 'renamed'") - - image = get_image_instance(module, client, id, name) - if not image and state != 'absent': - if id: - module.fail_json(msg="There is no image with id=" + str(id)) - else: - module.fail_json(msg="There is no image with name=" + name) - - if state == 'absent': - result = delete_image(module, client, image) - else: - result = get_image_info(image) - changed = False - result['changed'] = False - - if enabled is not None: - result = enable_image(module, client, image, enabled) - if state == "cloned": - result = clone_image(module, client, image, new_name) - elif state == "renamed": - result = rename_image(module, client, image, new_name) - - changed = changed or result['changed'] - result['changed'] = changed - - module.exit_json(**result) + ImageModule().run_module() if __name__ == '__main__': diff --git a/plugins/modules/one_image_info.py b/plugins/modules/one_image_info.py index c9d7c4035f..4bc48dfda1 100644 --- a/plugins/modules/one_image_info.py +++ b/plugins/modules/one_image_info.py @@ -17,29 +17,14 @@ description: requirements: - pyone extends_documentation_fragment: + - community.general.opennebula - community.general.attributes - community.general.attributes.info_module options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the E(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the E(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the E(ONE_PASSWORD) environment variable is used. - type: str ids: description: - A list of images ids whose facts you want to gather. + - Module can use integers too. aliases: ['id'] type: list elements: str @@ -66,9 +51,16 @@ EXAMPLES = ''' msg: result - name: Gather facts about an image using ID + community.general.one_image_info: + ids: 123 + +- name: Gather facts about an image using list of ID community.general.one_image_info: ids: - 123 + - 456 + - 789 + - 0 - name: Gather facts about an image using the name community.general.one_image_info: @@ -93,182 +85,285 @@ images: returned: success contains: id: - description: image id + description: The image's id. type: int sample: 153 name: - description: image name + description: The image's name. type: str sample: app1 group_id: - description: image's group id + description: The image's group id type: int sample: 1 group_name: - description: image's group name + description: The image's group name. type: str sample: one-users owner_id: - description: image's owner id + description: The image's owner id. type: int sample: 143 owner_name: - description: image's owner name + description: The image's owner name. type: str sample: ansible-test state: - description: state of image instance + description: The image's state. type: str sample: READY used: - description: is image in use + description: The image's usage status. type: bool sample: true running_vms: - description: count of running vms that use this image + description: The image's count of running vms that use this image. type: int sample: 7 + permissions: + description: The image's permissions. + type: dict + version_added: 9.5.0 + contains: + owner_u: + description: The image's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The image's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The image's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The image's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The image's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The image's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The image's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The image's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The image's other users ADMIN permissions + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 + type: + description: The image's type. + type: int + sample: 0 + version_added: 9.5.0 + disk_type: + description: The image's format type. + type: int + sample: 0 + version_added: 9.5.0 + persistent: + description: The image's persistence status (1 means true, 0 means false). + type: int + sample: 1 + version_added: 9.5.0 + source: + description: The image's source. + type: str + sample: /var/lib/one//datastores/100/somerandomstringxd + version_added: 9.5.0 + path: + description: The image's filesystem path. + type: str + sample: /var/tmp/hello.qcow2 + version_added: 9.5.0 + fstype: + description: The image's filesystem type. + type: str + sample: ext4 + version_added: 9.5.0 + size: + description: The image's size in MegaBytes. + type: int + sample: 10000 + version_added: 9.5.0 + cloning_ops: + description: The image's cloning operations per second. + type: int + sample: 0 + version_added: 9.5.0 + cloning_id: + description: The image's cloning ID. + type: int + sample: -1 + version_added: 9.5.0 + target_snapshot: + description: The image's target snapshot. + type: int + sample: 1 + version_added: 9.5.0 + datastore_id: + description: The image's datastore ID. + type: int + sample: 100 + version_added: 9.5.0 + datastore: + description: The image's datastore name. + type: int + sample: image_datastore + version_added: 9.5.0 + vms: + description: The image's list of vm ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + clones: + description: The image's list of clones ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + app_clones: + description: The image's list of app_clones ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + snapshots: + description: The image's list of snapshots. + type: list + version_added: 9.5.0 + sample: + - date: 123123 + parent: 1 + size: 10228 + allow_orphans: 1 + children: 0 + active: 1 + name: SampleName ''' -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_all_images(client): - pool = client.imagepool.info(-2, -1, -1, -1) - # Filter -2 means fetch all images user can Use - - return pool +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] -def get_image_info(image): - info = { - 'id': image.ID, - 'name': image.NAME, - 'state': IMAGE_STATES[image.STATE], - 'running_vms': image.RUNNING_VMS, - 'used': bool(image.RUNNING_VMS), - 'user_name': image.UNAME, - 'user_id': image.UID, - 'group_name': image.GNAME, - 'group_id': image.GID, - } - return info +class ImageInfoModule(OpenNebulaModule): + def __init__(self): + argument_spec = dict( + ids=dict(type='list', aliases=['id'], elements='str', required=False), + name=dict(type='str', required=False), + ) + mutually_exclusive = [ + ['ids', 'name'], + ] + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive) -def get_images_by_ids(module, client, ids): - images = [] - pool = get_all_images(client) + def run(self, one, module, result): + params = module.params + ids = params.get('ids') + name = params.get('name') - for image in pool.IMAGE: - if str(image.ID) in ids: - images.append(image) - ids.remove(str(image.ID)) - if len(ids) == 0: + if ids: + images = self.get_images_by_ids(ids) + elif name: + images = self.get_images_by_name(name) + else: + images = self.get_all_images().IMAGE + + self.result = { + 'images': [self.get_image_info(image) for image in images] + } + + self.exit() + + def get_all_images(self): + pool = self.one.imagepool.info(-2, -1, -1, -1) + # Filter -2 means fetch all images user can Use + + return pool + + def get_images_by_ids(self, ids): + images = [] + pool = self.get_all_images() + + for image in pool.IMAGE: + if str(image.ID) in ids: + images.append(image) + ids.remove(str(image.ID)) + if len(ids) == 0: + break + + if len(ids) > 0: + self.module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids)) + + return images + + def get_images_by_name(self, name_pattern): + images = [] + pattern = None + + pool = self.get_all_images() + + if name_pattern.startswith('~'): + import re + if name_pattern[1] == '*': + pattern = re.compile(name_pattern[2:], re.IGNORECASE) + else: + pattern = re.compile(name_pattern[1:]) + + for image in pool.IMAGE: + if pattern is not None: + if pattern.match(image.NAME): + images.append(image) + elif name_pattern == image.NAME: + images.append(image) break - if len(ids) > 0: - module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids)) + # if the specific name is indicated + if pattern is None and len(images) == 0: + self.module.fail_json(msg="There is no IMAGE with name=" + name_pattern) - return images - - -def get_images_by_name(module, client, name_pattern): - - images = [] - pattern = None - - pool = get_all_images(client) - - if name_pattern.startswith('~'): - import re - if name_pattern[1] == '*': - pattern = re.compile(name_pattern[2:], re.IGNORECASE) - else: - pattern = re.compile(name_pattern[1:]) - - for image in pool.IMAGE: - if pattern is not None: - if pattern.match(image.NAME): - images.append(image) - elif name_pattern == image.NAME: - images.append(image) - break - - # if the specific name is indicated - if pattern is None and len(images) == 0: - module.fail_json(msg="There is no IMAGE with name=" + name_pattern) - - return images - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONE_URL') - - if not username: - username = os.environ.get('ONE_USERNAME') - - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not (url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) + return images def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "ids": {"required": False, "aliases": ['id'], "type": "list", "elements": "str"}, - "name": {"required": False, "type": "str"}, - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[['ids', 'name']], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - ids = params.get('ids') - name = params.get('name') - client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - if ids: - images = get_images_by_ids(module, client, ids) - elif name: - images = get_images_by_name(module, client, name) - else: - images = get_all_images(client).IMAGE - - result = { - 'images': [get_image_info(image) for image in images], - } - - module.exit_json(**result) + ImageInfoModule().run_module() if __name__ == '__main__': diff --git a/plugins/modules/one_service.py b/plugins/modules/one_service.py index 81b42c0ecc..25ead72c1d 100644 --- a/plugins/modules/one_service.py +++ b/plugins/modules/one_service.py @@ -339,7 +339,7 @@ def get_service_info(module, auth, service): def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout): # make sure that the values in custom_attrs dict are strings - custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items()) + custom_attrs_with_str = {k: str(v) for k, v in custom_attrs.items()} data = { "action": { @@ -522,7 +522,7 @@ def create_service_and_operation(module, auth, template_id, service_name, owner_ if unique: service = get_service_by_name(module, auth, service_name) - if not service: + if not service or service["TEMPLATE"]["BODY"]["state"] == "DONE": if not module.check_mode: service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout) changed = True @@ -637,7 +637,6 @@ def get_service_id_by_name(module, auth, service_name): def get_connection_info(module): - url = module.params.get('api_url') username = module.params.get('api_username') password = module.params.get('api_password') diff --git a/plugins/modules/one_template.py b/plugins/modules/one_template.py index 06460fee57..1fcc81c540 100644 --- a/plugins/modules/one_template.py +++ b/plugins/modules/one_template.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # -# Copyright (c) 2021, Georg Gadinger +# Copyright (c) 2021, Jyrki Gadinger # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -60,7 +60,7 @@ extends_documentation_fragment: - community.general.attributes author: - - "Georg Gadinger (@nilsding)" + - "Jyrki Gadinger (@nilsding)" ''' EXAMPLES = ''' diff --git a/plugins/modules/one_vm.py b/plugins/modules/one_vm.py index 8ee9c85609..2f4ee25354 100644 --- a/plugins/modules/one_vm.py +++ b/plugins/modules/one_vm.py @@ -1559,11 +1559,11 @@ def main(): one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) if attributes: - attributes = dict((key.upper(), value) for key, value in attributes.items()) + attributes = {key.upper(): value for key, value in attributes.items()} check_attributes(module, attributes) if count_attributes: - count_attributes = dict((key.upper(), value) for key, value in count_attributes.items()) + count_attributes = {key.upper(): value for key, value in count_attributes.items()} if not attributes: import copy module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.') diff --git a/plugins/modules/one_vnet.py b/plugins/modules/one_vnet.py new file mode 100644 index 0000000000..2dcf20de5f --- /dev/null +++ b/plugins/modules/one_vnet.py @@ -0,0 +1,437 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024, Alexander Bakanovskii +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: one_vnet +short_description: Manages OpenNebula virtual networks +version_added: 9.4.0 +author: "Alexander Bakanovskii (@abakanovskii)" +requirements: + - pyone +description: + - Manages virtual networks in OpenNebula. +attributes: + check_mode: + support: partial + details: + - Note that check mode always returns C(changed=true) for existing networks, even if the network would not actually change. + diff_mode: + support: none +options: + id: + description: + - A O(id) of the network you would like to manage. + - If not set then a new network will be created with the given O(name). + type: int + name: + description: + - A O(name) of the network you would like to manage. If a network with + the given name does not exist it will be created, otherwise it will be + managed by this module. + type: str + template: + description: + - A string containing the network template contents. + type: str + state: + description: + - V(present) - state that is used to manage the network. + - V(absent) - delete the network. + choices: ["present", "absent"] + default: present + type: str + +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes +''' + +EXAMPLES = ''' +- name: Make sure the network is present by ID + community.general.one_vnet: + id: 0 + state: present + register: result + +- name: Make sure the network is present by name + community.general.one_vnet: + name: opennebula-bridge + state: present + register: result + +- name: Create a new or update an existing network + community.general.one_vnet: + name: bridge-network + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = 192.0.2.50, + SIZE = "20" + ] + DNS = 192.0.2.1 + GATEWAY = 192.0.2.1 + +- name: Delete the network by ID + community.general.one_vnet: + id: 0 + state: absent +''' + +RETURN = ''' +id: + description: The network id. + type: int + returned: when O(state=present) + sample: 153 +name: + description: The network name. + type: str + returned: when O(state=present) + sample: app1 +template: + description: The parsed network template. + type: dict + returned: when O(state=present) + sample: + BRIDGE: onebr.1000 + BRIDGE_TYPE: linux + DESCRIPTION: sampletext + PHYDEV: eth0 + SECURITY_GROUPS: 0 + VLAN_ID: 1000 + VN_MAD: 802.1Q +user_id: + description: The network's user name. + type: int + returned: when O(state=present) + sample: 1 +user_name: + description: The network's user id. + type: str + returned: when O(state=present) + sample: oneadmin +group_id: + description: The network's group id. + type: int + returned: when O(state=present) + sample: 1 +group_name: + description: The network's group name. + type: str + returned: when O(state=present) + sample: one-users +owner_id: + description: The network's owner id. + type: int + returned: when O(state=present) + sample: 143 +owner_name: + description: The network's owner name. + type: str + returned: when O(state=present) + sample: ansible-test +permissions: + description: The network's permissions. + type: dict + returned: when O(state=present) + contains: + owner_u: + description: The network's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The network's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The network's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The network's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The network's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The network's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The network's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The network's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The network's other users ADMIN permissions + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 +clusters: + description: The network's clusters. + type: list + returned: when O(state=present) + sample: [0, 100] +bridge: + description: The network's bridge interface. + type: str + returned: when O(state=present) + sample: br0 +bridge_type: + description: The network's bridge type. + type: str + returned: when O(state=present) + sample: linux +parent_network_id: + description: The network's parent network id. + type: int + returned: when O(state=present) + sample: 1 +vn_mad: + description: The network's VN_MAD. + type: str + returned: when O(state=present) + sample: bridge +phydev: + description: The network's physical device (NIC). + type: str + returned: when O(state=present) + sample: eth0 +vlan_id: + description: The network's VLAN tag. + type: int + returned: when O(state=present) + sample: 1000 +outer_vlan_id: + description: The network's outer VLAN tag. + type: int + returned: when O(state=present) + sample: 1000 +vrouters: + description: The network's list of virtual routers IDs. + type: list + returned: when O(state=present) + sample: [0, 1] +ar_pool: + description: The network's list of ar_pool. + type: list + returned: when O(state=present) + sample: + - ar_id: 0 + ip: 192.0.2.1 + mac: 6c:1e:46:01:cd:d1 + size: 20 + type: IP4 + - ar_id: 1 + allocated: 0 + ip: 198.51.100.1 + mac: 5d:9b:c0:9e:f6:e5 + size: 20 + type: IP4 +''' + + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + + +class NetworksModule(OpenNebulaModule): + + def __init__(self): + argument_spec = dict( + id=dict(type='int', required=False), + name=dict(type='str', required=False), + state=dict(type='str', choices=['present', 'absent'], default='present'), + template=dict(type='str', required=False), + ) + + mutually_exclusive = [ + ['id', 'name'] + ] + + required_one_of = [('id', 'name')] + + required_if = [ + ['state', 'present', ['template']] + ] + + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_if=required_if) + + def run(self, one, module, result): + params = module.params + id = params.get('id') + name = params.get('name') + desired_state = params.get('state') + template_data = params.get('template') + + self.result = {} + + template = self.get_template_instance(id, name) + needs_creation = False + if not template and desired_state != 'absent': + if id: + module.fail_json(msg="There is no template with id=" + str(id)) + else: + needs_creation = True + + if desired_state == 'absent': + self.result = self.delete_template(template) + else: + if needs_creation: + self.result = self.create_template(name, template_data) + else: + self.result = self.update_template(template, template_data) + + self.exit() + + def get_template(self, predicate): + # -2 means "Resources belonging to all users" + # the other two parameters are used for pagination, -1 for both essentially means "return all" + pool = self.one.vnpool.info(-2, -1, -1) + + for template in pool.VNET: + if predicate(template): + return template + + return None + + def get_template_by_id(self, template_id): + return self.get_template(lambda template: (template.ID == template_id)) + + def get_template_by_name(self, name): + return self.get_template(lambda template: (template.NAME == name)) + + def get_template_instance(self, requested_id, requested_name): + if requested_id: + return self.get_template_by_id(requested_id) + else: + return self.get_template_by_name(requested_name) + + def get_networks_ar_pool(self, template): + ar_pool = [] + template_pool = template.AR_POOL.AR + for ar in range(len(template_pool)): + template_param = template_pool[ar] + ar_pool.append({ + # These params will always be present + 'ar_id': template_param.AR_ID, + 'mac': template_param.MAC, + 'size': template_param.SIZE, + 'type': template_param.TYPE, + # These are optional so firstly check for presence + # and if not present set value to Null + 'allocated': getattr(template_param, 'ALLOCATED', 'Null'), + 'ip': getattr(template_param, 'IP', 'Null'), + 'global_prefix': getattr(template_param, 'GLOBAL_PREFIX', 'Null'), + 'parent_network_ar_id': getattr(template_param, 'PARENT_NETWORK_AR_ID', 'Null'), + 'ula_prefix': getattr(template_param, 'ULA_PREFIX', 'Null'), + 'vn_mad': getattr(template_param, 'VN_MAD', 'Null'), + }) + return ar_pool + + def get_template_info(self, template): + info = { + 'id': template.ID, + 'name': template.NAME, + 'template': template.TEMPLATE, + 'user_name': template.UNAME, + 'user_id': template.UID, + 'group_name': template.GNAME, + 'group_id': template.GID, + 'permissions': { + 'owner_u': template.PERMISSIONS.OWNER_U, + 'owner_m': template.PERMISSIONS.OWNER_M, + 'owner_a': template.PERMISSIONS.OWNER_A, + 'group_u': template.PERMISSIONS.GROUP_U, + 'group_m': template.PERMISSIONS.GROUP_M, + 'group_a': template.PERMISSIONS.GROUP_A, + 'other_u': template.PERMISSIONS.OTHER_U, + 'other_m': template.PERMISSIONS.OTHER_M, + 'other_a': template.PERMISSIONS.OTHER_A + }, + 'clusters': template.CLUSTERS.ID, + 'bridge': template.BRIDGE, + 'bride_type': template.BRIDGE_TYPE, + 'parent_network_id': template.PARENT_NETWORK_ID, + 'vn_mad': template.VN_MAD, + 'phydev': template.PHYDEV, + 'vlan_id': template.VLAN_ID, + 'outer_vlan_id': template.OUTER_VLAN_ID, + 'used_leases': template.USED_LEASES, + 'vrouters': template.VROUTERS.ID, + 'ar_pool': self.get_networks_ar_pool(template) + } + + return info + + def create_template(self, name, template_data): + if not self.module.check_mode: + # -1 means that network won't be added to any cluster which happens by default + self.one.vn.allocate("NAME = \"" + name + "\"\n" + template_data, -1) + + result = self.get_template_info(self.get_template_by_name(name)) + result['changed'] = True + + return result + + def update_template(self, template, template_data): + if not self.module.check_mode: + # 0 = replace the whole template + self.one.vn.update(template.ID, template_data, 0) + + result = self.get_template_info(self.get_template_by_id(template.ID)) + if self.module.check_mode: + # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. + result['changed'] = True + else: + # if the previous parsed template data is not equal to the updated one, this has changed + result['changed'] = template.TEMPLATE != result['template'] + + return result + + def delete_template(self, template): + if not template: + return {'changed': False} + + if not self.module.check_mode: + self.one.vn.delete(template.ID) + + return {'changed': True} + + +def main(): + NetworksModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/open_iscsi.py b/plugins/modules/open_iscsi.py index 163042cc42..df8a694a7e 100644 --- a/plugins/modules/open_iscsi.py +++ b/plugins/modules/open_iscsi.py @@ -45,6 +45,7 @@ options: login: description: - Whether the target node should be connected. + - When O(target) is omitted, will login to all available. type: bool aliases: [ state ] node_auth: @@ -101,7 +102,6 @@ options: type: bool default: false version_added: 4.1.0 - ''' EXAMPLES = r''' @@ -117,8 +117,7 @@ EXAMPLES = r''' discover: true ip: 10.1.2.3 -# NOTE: Only works if exactly one target is exported to the initiator -- name: Discover targets on portal and login to the one available +- name: Discover targets on portal and login to the ones available community.general.open_iscsi: portal: '{{ iscsi_target }}' login: true @@ -227,7 +226,7 @@ def target_loggedon(module, target, portal=None, port=None): module.fail_json(cmd=cmd, rc=rc, msg=err) -def target_login(module, target, portal=None, port=None): +def target_login(module, target, check_rc, portal=None, port=None): node_auth = module.params['node_auth'] node_user = module.params['node_user'] node_pass = module.params['node_pass'] @@ -240,21 +239,22 @@ def target_login(module, target, portal=None, port=None): ('node.session.auth.password', node_pass)] for (name, value) in params: cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value] - module.run_command(cmd, check_rc=True) + module.run_command(cmd, check_rc=check_rc) if node_user_in: params = [('node.session.auth.username_in', node_user_in), ('node.session.auth.password_in', node_pass_in)] for (name, value) in params: cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value) - module.run_command(cmd, check_rc=True) + module.run_command(cmd, check_rc=check_rc) cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login'] if portal is not None and port is not None: cmd.append('--portal') cmd.append('%s:%s' % (portal, port)) - module.run_command(cmd, check_rc=True) + rc, out, err = module.run_command(cmd, check_rc=check_rc) + return rc def target_logout(module, target): @@ -339,7 +339,10 @@ def main(): ), required_together=[['node_user', 'node_pass'], ['node_user_in', 'node_pass_in']], - required_if=[('discover', True, ['portal'])], + required_if=[ + ('discover', True, ['portal']), + ('auto_node_startup', True, ['target']), + ('auto_portal_startup', True, ['target'])], supports_check_mode=True, ) @@ -369,6 +372,8 @@ def main(): # return json dict result = {'changed': False} + login_to_all_nodes = False + check_rc = True if discover: if check: @@ -385,9 +390,10 @@ def main(): if login is not None or automatic is not None: if target is None: if len(nodes) > 1: - module.fail_json(msg="Need to specify a target") - else: - target = nodes[0] + # Disable strict return code checking if there are multiple targets + # That will allow to skip target where we have no rights to login + login_to_all_nodes = True + check_rc = False else: # check given target is in cache check_target = False @@ -402,26 +408,54 @@ def main(): result['nodes'] = nodes if login is not None: - loggedon = target_loggedon(module, target, portal, port) - if (login and loggedon) or (not login and not loggedon): - result['changed'] |= False - if login: - result['devicenodes'] = target_device_node(target) - elif not check: - if login: - target_login(module, target, portal, port) - # give udev some time - time.sleep(1) - result['devicenodes'] = target_device_node(target) - else: - target_logout(module, target) - result['changed'] |= True - result['connection_changed'] = True + if login_to_all_nodes: + result['devicenodes'] = [] + for index_target in nodes: + loggedon = target_loggedon(module, index_target, portal, port) + if (login and loggedon) or (not login and not loggedon): + result['changed'] |= False + if login: + result['devicenodes'] += target_device_node(index_target) + elif not check: + if login: + login_result = target_login(module, index_target, check_rc, portal, port) + # give udev some time + time.sleep(1) + result['devicenodes'] += target_device_node(index_target) + else: + target_logout(module, index_target) + # Check if there are multiple targets on a single portal and + # do not mark the task changed if host could not login to one of them + if len(nodes) > 1 and login_result == 24: + result['changed'] |= False + result['connection_changed'] = False + else: + result['changed'] |= True + result['connection_changed'] = True + else: + result['changed'] |= True + result['connection_changed'] = True else: - result['changed'] |= True - result['connection_changed'] = True + loggedon = target_loggedon(module, target, portal, port) + if (login and loggedon) or (not login and not loggedon): + result['changed'] |= False + if login: + result['devicenodes'] = target_device_node(target) + elif not check: + if login: + target_login(module, target, portal, port) + # give udev some time + time.sleep(1) + result['devicenodes'] = target_device_node(target) + else: + target_logout(module, target) + result['changed'] |= True + result['connection_changed'] = True + else: + result['changed'] |= True + result['connection_changed'] = True - if automatic is not None: + if automatic is not None and not login_to_all_nodes: isauto = target_isauto(module, target) if (automatic and isauto) or (not automatic and not isauto): result['changed'] |= False @@ -437,7 +471,7 @@ def main(): result['changed'] |= True result['automatic_changed'] = True - if automatic_portal is not None: + if automatic_portal is not None and not login_to_all_nodes: isauto = target_isauto(module, target, portal, port) if (automatic_portal and isauto) or (not automatic_portal and not isauto): result['changed'] |= False diff --git a/plugins/modules/openbsd_pkg.py b/plugins/modules/openbsd_pkg.py index c831136110..69ac7bff8e 100644 --- a/plugins/modules/openbsd_pkg.py +++ b/plugins/modules/openbsd_pkg.py @@ -24,7 +24,10 @@ attributes: check_mode: support: full diff_mode: - support: none + support: partial + version_added: 9.1.0 + details: + - Only works when check mode is not enabled. options: name: description: @@ -159,6 +162,20 @@ def execute_command(cmd, module): return module.run_command(cmd_args, environ_update={'TERM': 'dumb'}) +def get_all_installed(module): + """ + Get all installed packaged. Used to support diff mode + """ + command = 'pkg_info -Iq' + + rc, stdout, stderr = execute_command(command, module) + + if stderr: + module.fail_json(msg="failed in get_all_installed(): %s" % stderr) + + return stdout + + # Function used to find out if a package is currently installed. def get_package_state(names, pkg_spec, module): info_cmd = 'pkg_info -Iq' @@ -573,10 +590,13 @@ def main(): result['name'] = name result['state'] = state result['build'] = build + result['diff'] = {} # The data structure used to keep track of package information. pkg_spec = {} + new_package_list = original_package_list = get_all_installed(module) + if build is True: if not os.path.isdir(ports_dir): module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir)) @@ -661,6 +681,10 @@ def main(): result['changed'] = combined_changed + if result['changed'] and not module.check_mode: + new_package_list = get_all_installed(module) + result['diff'] = dict(before=original_package_list, after=new_package_list) + module.exit_json(**result) diff --git a/plugins/modules/pacemaker_cluster.py b/plugins/modules/pacemaker_cluster.py index 60d8656ac3..af8bb5ff56 100644 --- a/plugins/modules/pacemaker_cluster.py +++ b/plugins/modules/pacemaker_cluster.py @@ -8,71 +8,59 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pacemaker_cluster short_description: Manage pacemaker clusters author: - Mathieu Bultel (@matbu) description: - - This module can manage a pacemaker cluster and nodes from Ansible using - the pacemaker cli. + - This module can manage a pacemaker cluster and nodes from Ansible using the pacemaker CLI. extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Indicate desired state of the cluster - choices: [ cleanup, offline, online, restart ] - type: str - node: - description: - - Specify which node of the cluster you want to manage. None == the - cluster status itself, 'all' == check the status of all nodes. - type: str - timeout: - description: - - Timeout when the module should considered that the action has failed - default: 300 - type: int - force: - description: - - Force the change of the cluster state - type: bool - default: true -''' -EXAMPLES = ''' ---- + state: + description: + - Indicate desired state of the cluster. + choices: [cleanup, offline, online, restart] + type: str + node: + description: + - Specify which node of the cluster you want to manage. V(null) == the cluster status itself, V(all) == check the status of all nodes. + type: str + timeout: + description: + - Timeout when the module should considered that the action has failed. + default: 300 + type: int + force: + description: + - Force the change of the cluster state. + type: bool + default: true +""" + +EXAMPLES = r""" - name: Set cluster Online hosts: localhost gather_facts: false tasks: - - name: Get cluster state - community.general.pacemaker_cluster: - state: online -''' + - name: Get cluster state + community.general.pacemaker_cluster: + state: online +""" -RETURN = ''' -changed: - description: true if the cluster state has changed - type: bool - returned: always +RETURN = r""" out: - description: The output of the current state of the cluster. It return a - list of the nodes state. - type: str - sample: 'out: [[" overcloud-controller-0", " Online"]]}' - returned: always -rc: - description: exit code of the module - type: bool - returned: always -''' + description: The output of the current state of the cluster. It returns a list of the nodes state. + type: str + sample: 'out: [[" overcloud-controller-0", " Online"]]}' + returned: always +""" import time diff --git a/plugins/modules/packet_device.py b/plugins/modules/packet_device.py index 519a7031e1..13dbbb9ff3 100644 --- a/plugins/modules/packet_device.py +++ b/plugins/modules/packet_device.py @@ -10,26 +10,23 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_device short_description: Manage a bare metal server in the Packet Host description: - - Manage a bare metal server in the Packet Host (a "device" in the API terms). - - When the machine is created it can optionally wait for public IP address, or for active state. - - This module has a dependency on packet >= 1.0. - - API is documented at U(https://www.packet.net/developers/api/devices). - - + - Manage a bare metal server in the Packet Host (a "device" in the API terms). + - When the machine is created it can optionally wait for public IP address, or for active state. + - This module has a dependency on packet >= 1.0. + - API is documented at U(https://www.packet.net/developers/api/devices). author: - - Tomas Karasek (@t0mk) - - Matt Baldwin (@baldwinSPC) - - Thibaud Morel l'Horset (@teebes) + - Tomas Karasek (@t0mk) + - Matt Baldwin (@baldwinSPC) + - Thibaud Morel l'Horset (@teebes) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: @@ -45,7 +42,7 @@ options: count: description: - - The number of devices to create. Count number can be included in hostname via the %d string formatter. + - The number of devices to create. Count number can be included in hostname using the C(%d) string formatter. default: 1 type: int @@ -122,7 +119,7 @@ options: user_data: description: - - Userdata blob made available to the machine + - Userdata blob made available to the machine. type: str wait_for_public_IPv: @@ -130,7 +127,7 @@ options: - Whether to wait for the instance to be assigned a public IPv4/IPv6 address. - If set to 4, it will wait until IPv4 is assigned to the instance. - If set to 6, wait until public IPv6 is assigned to the instance. - choices: [4,6] + choices: [4, 6] type: int wait_timeout: @@ -156,11 +153,10 @@ options: requirements: - - "packet-python >= 1.35" + - "packet-python >= 1.35" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in environment variable PACKET_API_TOKEN. # You can also pass it to the auth_token parameter of the module instead. @@ -169,13 +165,13 @@ EXAMPLES = ''' - name: Create 1 device hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - tags: ci-xyz - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + tags: ci-xyz + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 # Create the same device and wait until it is in state "active", (when it's # ready for other API operations). Fail if the device is not "active" in @@ -184,64 +180,64 @@ EXAMPLES = ''' - name: Create device and wait up to 10 minutes for active state hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - state: active - wait_timeout: 600 + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active + wait_timeout: 600 - name: Create 3 ubuntu devices called server-01, server-02 and server-03 hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: server-%02d - count: 3 - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: server-%02d + count: 3 + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 - name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH hosts: localhost tasks: - - name: Create 3 devices and register their facts - community.general.packet_device: - hostnames: [coreos-one, coreos-two, coreos-three] - operating_system: coreos_stable - plan: baremetal_0 - facility: ewr1 - locked: true - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - wait_for_public_IPv: 4 - user_data: | - #cloud-config - ssh_authorized_keys: - - {{ lookup('file', 'my_packet_sshkey') }} - coreos: - etcd: - discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 - addr: $private_ipv4:4001 - peer-addr: $private_ipv4:7001 - fleet: - public-ip: $private_ipv4 - units: - - name: etcd.service - command: start - - name: fleet.service - command: start - register: newhosts + - name: Create 3 devices and register their facts + community.general.packet_device: + hostnames: [coreos-one, coreos-two, coreos-three] + operating_system: coreos_stable + plan: baremetal_0 + facility: ewr1 + locked: true + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + wait_for_public_IPv: 4 + user_data: | + #cloud-config + ssh_authorized_keys: + - {{ lookup('file', 'my_packet_sshkey') }} + coreos: + etcd: + discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 + addr: $private_ipv4:4001 + peer-addr: $private_ipv4:7001 + fleet: + public-ip: $private_ipv4 + units: + - name: etcd.service + command: start + - name: fleet.service + command: start + register: newhosts - - name: Wait for ssh - ansible.builtin.wait_for: - delay: 1 - host: "{{ item.public_ipv4 }}" - port: 22 - state: started - timeout: 500 - with_items: "{{ newhosts.devices }}" + - name: Wait for ssh + ansible.builtin.wait_for: + delay: 1 + host: "{{ item.public_ipv4 }}" + port: 22 + state: started + timeout: 500 + with_items: "{{ newhosts.devices }}" # Other states of devices @@ -249,38 +245,38 @@ EXAMPLES = ''' - name: Remove 3 devices by uuid hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - state: absent - device_ids: - - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 - - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 - - 6bb4faf8-a638-4ac7-8f47-86fe514c301f -''' + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + state: absent + device_ids: + - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 + - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 + - 6bb4faf8-a638-4ac7-8f47-86fe514c301f +""" -RETURN = ''' +RETURN = r""" changed: - description: True if a device was altered in any way (created, modified or removed) - type: bool - sample: true - returned: success + description: True if a device was altered in any way (created, modified or removed). + type: bool + sample: true + returned: success devices: - description: Information about each device that was processed - type: list - sample: - - { - "hostname": "my-server.com", - "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7", - "public_ipv4": "147.229.15.12", - "private-ipv4": "10.0.15.12", - "tags": [], - "locked": false, - "state": "provisioning", - "public_ipv6": "2604:1380:2:5200::3" - } - returned: success -''' # NOQA + description: Information about each device that was processed + type: list + sample: + - { + "hostname": "my-server.com", + "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7", + "public_ipv4": "147.229.15.12", + "private-ipv4": "10.0.15.12", + "tags": [], + "locked": false, + "state": "provisioning", + "public_ipv6": "2604:1380:2:5200::3" + } + returned: success +""" import os diff --git a/plugins/modules/packet_ip_subnet.py b/plugins/modules/packet_ip_subnet.py index 530cfe3a79..c2c9fcead4 100644 --- a/plugins/modules/packet_ip_subnet.py +++ b/plugins/modules/packet_ip_subnet.py @@ -10,26 +10,24 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_ip_subnet short_description: Assign IP subnet to a bare metal server description: - - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host. - - IPv4 subnets must come from already reserved block. - - IPv6 subnets must come from publicly routable /56 block from your project. - - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation. - + - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host. + - IPv4 subnets must come from already reserved block. + - IPv6 subnets must come from publicly routable /56 block from your project. + - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation. version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: @@ -77,7 +75,8 @@ options: state: description: - Desired state of the IP subnet on the specified device. - - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR will then be assigned to the specified device. + - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR will then be assigned to the specified + device. - With O(state=absent), you can specify either O(hostname) or O(device_id). The subnet will be removed from specified devices. - If you leave both O(hostname) and O(device_id) empty, the subnet will be removed from any device it's assigned to. choices: ['present', 'absent'] @@ -85,10 +84,10 @@ options: type: str requirements: - - "packet-python >= 1.35" -''' + - "packet-python >= 1.35" +""" -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass it to the auth_token parameter of the module instead. @@ -96,33 +95,33 @@ EXAMPLES = ''' hosts: localhost tasks: - - packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - state: active + - packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active # Pick an IPv4 address from a block allocated to your project. - - community.general.packet_ip_subnet: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostname: myserver - cidr: "147.75.201.78/32" + - community.general.packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostname: myserver + cidr: "147.75.201.78/32" # Release IP address 147.75.201.78 - name: Unassign IP address from any device in your project hosts: localhost tasks: - - community.general.packet_ip_subnet: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - cidr: "147.75.201.78/32" - state: absent -''' + - community.general.packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + cidr: "147.75.201.78/32" + state: absent +""" -RETURN = ''' +RETURN = r""" changed: description: True if an IP address assignments were altered in any way (created or removed). type: bool @@ -140,7 +139,7 @@ subnet: sample: address: 147.75.90.241 address_family: 4 - assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 } + assigned_to: {href: /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0} cidr: 31 created_at: '2017-08-07T15:15:30Z' enabled: true @@ -153,7 +152,7 @@ subnet: network: 147.75.90.240 public: true returned: success -''' +""" import uuid diff --git a/plugins/modules/packet_project.py b/plugins/modules/packet_project.py index d8c991dba2..f6acdec152 100644 --- a/plugins/modules/packet_project.py +++ b/plugins/modules/packet_project.py @@ -10,24 +10,22 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_project short_description: Create/delete a project in Packet host description: - - Create/delete a project in Packet host. - - API is documented at U(https://www.packet.com/developers/api/#projects). - + - Create/delete a project in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#projects). version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: @@ -55,9 +53,9 @@ options: type: str name: - description: - - Name for/of the project. - type: str + description: + - Name for/of the project. + type: str org_id: description: @@ -76,11 +74,10 @@ options: type: str requirements: - - "packet-python >= 1.40" + - "packet-python >= 1.40" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -110,9 +107,9 @@ EXAMPLES = ''' community.general.packet_project: name: "newer project" payment_method: "the other visa" -''' +""" -RETURN = ''' +RETURN = r""" changed: description: True if a project was created or removed. type: bool @@ -128,7 +125,7 @@ id: description: UUID of addressed project. type: str returned: success -''' +""" from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/packet_sshkey.py b/plugins/modules/packet_sshkey.py index 6519735dcc..8172482108 100644 --- a/plugins/modules/packet_sshkey.py +++ b/plugins/modules/packet_sshkey.py @@ -8,13 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_sshkey short_description: Create/delete an SSH key in Packet host description: - - Create/delete an SSH key in Packet host. - - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post). + - Create/delete an SSH key in Packet host. + - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post). author: "Tomas Karasek (@t0mk) " extends_documentation_fragment: - community.general.attributes @@ -26,42 +25,41 @@ attributes: options: state: description: - - Indicate desired state of the target. + - Indicate desired state of the target. default: present choices: ['present', 'absent'] type: str auth_token: description: - - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). type: str label: description: - - Label for the key. If you keep it empty, it will be read from key string. + - Label for the key. If you keep it empty, it will be read from key string. type: str aliases: [name] id: description: - - UUID of the key which you want to remove. + - UUID of the key which you want to remove. type: str fingerprint: description: - - Fingerprint of the key which you want to remove. + - Fingerprint of the key which you want to remove. type: str key: description: - - Public Key string ({type} {base64 encoded key} {description}). + - Public Key string (V({type} {base64 encoded key} {description})). type: str key_file: description: - - File with the public key. + - File with the public key. type: path requirements: - packet-python +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -84,27 +82,27 @@ EXAMPLES = ''' community.general.packet_sshkey: state: absent id: eef49903-7a09-4ca1-af67-4087c29ab5b6 -''' +""" -RETURN = ''' +RETURN = r""" changed: - description: True if a sshkey was created or removed. - type: bool - sample: true - returned: always + description: True if a sshkey was created or removed. + type: bool + sample: true + returned: always sshkeys: description: Information about sshkeys that were created/removed. type: list sample: [ - { - "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", - "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", - "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2", - "label": "mynewkey33" - } + { + "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", + "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", + "key": "ssh-dss AAAAB3NzaC1kc3MAAACBA ... MdDxfmcsCslJKgoRKSmQpCwXQtN2g== user@server", + "label": "mynewkey33" + } ] returned: always -''' # NOQA +""" import os import uuid diff --git a/plugins/modules/packet_volume.py b/plugins/modules/packet_volume.py index 659e8d8aa3..229d63a756 100644 --- a/plugins/modules/packet_volume.py +++ b/plugins/modules/packet_volume.py @@ -9,24 +9,22 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_volume short_description: Create/delete a volume in Packet host description: - - Create/delete a volume in Packet host. - - API is documented at U(https://www.packet.com/developers/api/#volumes). - + - Create/delete a volume in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#volumes). version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: @@ -55,14 +53,13 @@ options: name: description: - - Selector for API-generated name of the volume + - Selector for API-generated name of the volume. type: str description: description: - User-defined description attribute for Packet volume. - - "It is used used as idempotent identifier - if volume with given - description exists, new one is not created." + - It is used used as idempotent identifier - if volume with given description exists, new one is not created. type: str id: @@ -72,7 +69,7 @@ options: plan: description: - - storage_1 for standard tier, storage_2 for premium (performance) tier. + - V(storage_1) for standard tier, V(storage_2) for premium (performance) tier. - Tiers are described at U(https://www.packet.com/cloud/storage/). choices: ['storage_1', 'storage_2'] default: 'storage_1' @@ -91,7 +88,7 @@ options: locked: description: - - Create new volume locked. + - Create new volume locked. type: bool default: false @@ -123,10 +120,9 @@ options: requirements: - "packet-python >= 1.35" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -154,25 +150,25 @@ EXAMPLES = ''' id: "{{ result_create.id }}" project_id: "{{ project_id }}" state: absent -''' +""" -RETURN = ''' +RETURN = r""" id: - description: UUID of specified volume - type: str - returned: success - sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c + description: UUID of specified volume. + type: str + returned: success + sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c name: - description: The API-generated name of the volume resource. - type: str - returned: if volume is attached/detached to/from some device - sample: "volume-a91dc506" + description: The API-generated name of the volume resource. + type: str + returned: if volume is attached/detached to/from some device + sample: "volume-a91dc506" description: - description: The user-defined description of the volume resource. - type: str - returned: success - sample: "Just another volume" -''' + description: The user-defined description of the volume resource. + type: str + returned: success + sample: "Just another volume" +""" import uuid diff --git a/plugins/modules/packet_volume_attachment.py b/plugins/modules/packet_volume_attachment.py index a46fef55cb..0423cc879d 100644 --- a/plugins/modules/packet_volume_attachment.py +++ b/plugins/modules/packet_volume_attachment.py @@ -10,27 +10,24 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_volume_attachment short_description: Attach/detach a volume to a device in the Packet host description: - - Attach/detach a volume to a device in the Packet host. - - API is documented at U(https://www.packet.com/developers/api/volumes/). - - "This module creates the attachment route in the Packet API. In order to discover - the block devices on the server, you have to run the Attach Scripts, - as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)." - + - Attach/detach a volume to a device in the Packet host. + - API is documented at U(https://www.packet.com/developers/api/volumes/). + - This module creates the attachment route in the Packet API. In order to discover the block devices on the server, you have to run the Attach + Scripts, as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux). version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: @@ -61,7 +58,7 @@ options: description: - Selector for the volume. - It can be a UUID, an API-generated volume name, or user-defined description string. - - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"' + - 'Example values: V(4a347482-b546-4f67-8300-fb5018ef0c5), V(volume-4a347482), V(my volume).' type: str required: true @@ -69,15 +66,14 @@ options: description: - Selector for the device. - It can be a UUID of the device, or a hostname. - - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"' + - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device".' type: str requirements: - "packet-python >= 1.35" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -122,19 +118,19 @@ EXAMPLES = ''' volume: "{{ volname }}" device: "{{ devname }}" state: absent -''' +""" -RETURN = ''' +RETURN = r""" volume_id: - description: UUID of volume addressed by the module call. - type: str - returned: success + description: UUID of volume addressed by the module call. + type: str + returned: success device_id: - description: UUID of device addressed by the module call. - type: str - returned: success -''' + description: UUID of device addressed by the module call. + type: str + returned: success +""" import uuid diff --git a/plugins/modules/pacman.py b/plugins/modules/pacman.py index 7f67b91039..a4a9370ae0 100644 --- a/plugins/modules/pacman.py +++ b/plugins/modules/pacman.py @@ -12,172 +12,161 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pacman short_description: Manage packages with I(pacman) description: - - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. + - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. author: - - Indrajit Raychaudhuri (@indrajitr) - - Aaron Bull Schaefer (@elasticdog) - - Maxime de Roucy (@tchernomax) - - Jean Raby (@jraby) + - Indrajit Raychaudhuri (@indrajitr) + - Aaron Bull Schaefer (@elasticdog) + - Maxime de Roucy (@tchernomax) + - Jean Raby (@jraby) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: full + check_mode: + support: full + diff_mode: + support: full options: - name: - description: - - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. - Cannot be used in combination with O(upgrade). - aliases: [ package, pkg ] - type: list - elements: str - - state: - description: - - Whether to install (V(present) or V(installed), V(latest)), or remove (V(absent) or V(removed)) a package. - - V(present) and V(installed) will simply ensure that a desired package is installed. - - V(latest) will update the specified package if it is not of the latest available version. - - V(absent) and V(removed) will remove the specified package. - default: present - choices: [ absent, installed, latest, present, removed ] - type: str - - force: - description: - - When removing packages, forcefully remove them, without any checks. - Same as O(extra_args="--nodeps --nodeps"). - - When combined with O(update_cache), force a refresh of all package databases. - Same as O(update_cache_extra_args="--refresh --refresh"). - default: false - type: bool - - remove_nosave: - description: - - When removing packages, do not save modified configuration files as C(.pacsave) files. - (passes C(--nosave) to pacman) - version_added: 4.6.0 - default: false - type: bool - - executable: - description: - - Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper. - - Pacman compatibility is unfortunately ill defined, in particular, this modules makes - extensive use of the C(--print-format) directive which is known not to be implemented by - some AUR helpers (notably, C(yay)). - - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. - default: pacman - type: str - version_added: 3.1.0 - - extra_args: - description: - - Additional option to pass to pacman when enforcing O(state). - default: '' - type: str - - update_cache: - description: - - Whether or not to refresh the master package lists. - - This can be run as part of a package installation or as a separate step. - - If not specified, it defaults to V(false). - - Please note that this option only had an influence on the module's C(changed) state - if O(name) and O(upgrade) are not specified before community.general 5.0.0. - See the examples for how to keep the old behavior. - type: bool - - update_cache_extra_args: - description: - - Additional option to pass to pacman when enforcing O(update_cache). - default: '' - type: str - - upgrade: - description: - - Whether or not to upgrade the whole system. - Cannot be used in combination with O(name). - - If not specified, it defaults to V(false). - type: bool - - upgrade_extra_args: - description: - - Additional option to pass to pacman when enforcing O(upgrade). - default: '' - type: str - - reason: - description: - - The install reason to set for the packages. - choices: [ dependency, explicit ] - type: str - version_added: 5.4.0 - - reason_for: - description: - - Set the install reason for V(all) packages or only for V(new) packages. - - In case of O(state=latest) already installed packages which will be updated to a newer version are not counted as V(new). - default: new - choices: [ all, new ] - type: str - version_added: 5.4.0 - -notes: - - When used with a C(loop:) each package will be processed individually, - it is much more efficient to pass the list directly to the O(name) option. - - To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand. - For example, a dedicated build user with permissions to install packages could be necessary. - - > - In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages - with the error: C(error: target not found: ). -""" - -RETURN = """ -packages: + name: description: - - A list of packages that have been changed. - - Before community.general 4.5.0 this was only returned when O(upgrade=true). - In community.general 4.5.0, it was sometimes omitted when the package list is empty, - but since community.general 4.6.0 it is always returned when O(name) is specified or - O(upgrade=true). - returned: success and O(name) is specified or O(upgrade=true) + - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. Cannot be used in combination with O(upgrade). + aliases: [package, pkg] type: list elements: str - sample: [ package, other-package ] -cache_updated: + state: description: - - The changed status of C(pacman -Sy). - - Useful when O(name) or O(upgrade=true) are specified next to O(update_cache=true). - returned: success, when O(update_cache=true) + - Whether to install (V(present) or V(installed), V(latest)), or remove (V(absent) or V(removed)) a package. + - V(present) and V(installed) will simply ensure that a desired package is installed. + - V(latest) will update the specified package if it is not of the latest available version. + - V(absent) and V(removed) will remove the specified package. + default: present + choices: [absent, installed, latest, present, removed] + type: str + + force: + description: + - When removing packages, forcefully remove them, without any checks. Same as O(extra_args="--nodeps --nodeps"). + - When combined with O(update_cache), force a refresh of all package databases. Same as O(update_cache_extra_args="--refresh --refresh"). + default: false type: bool - sample: false + + remove_nosave: + description: + - When removing packages, do not save modified configuration files as C(.pacsave) files. (passes C(--nosave) to pacman). version_added: 4.6.0 + default: false + type: bool -stdout: + executable: description: - - Output from pacman. - returned: success, when needed + - Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper. + - Pacman compatibility is unfortunately ill defined, in particular, this modules makes extensive use of the C(--print-format) directive + which is known not to be implemented by some AUR helpers (notably, C(yay)). + - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. + default: pacman type: str - sample: ":: Synchronizing package databases... core is up to date :: Starting full system upgrade..." - version_added: 4.1.0 + version_added: 3.1.0 -stderr: + extra_args: description: - - Error output from pacman. - returned: success, when needed + - Additional option to pass to pacman when enforcing O(state). + default: '' type: str - sample: "warning: libtool: local (2.4.6+44+gb9b44533-14) is newer than core (2.4.6+42+gb88cebd5-15)\nwarning ..." - version_added: 4.1.0 + + update_cache: + description: + - Whether or not to refresh the master package lists. + - This can be run as part of a package installation or as a separate step. + - If not specified, it defaults to V(false). + - Please note that this option only had an influence on the module's C(changed) state if O(name) and O(upgrade) are not specified before + community.general 5.0.0. See the examples for how to keep the old behavior. + type: bool + + update_cache_extra_args: + description: + - Additional option to pass to pacman when enforcing O(update_cache). + default: '' + type: str + + upgrade: + description: + - Whether or not to upgrade the whole system. Cannot be used in combination with O(name). + - If not specified, it defaults to V(false). + type: bool + + upgrade_extra_args: + description: + - Additional option to pass to pacman when enforcing O(upgrade). + default: '' + type: str + + reason: + description: + - The install reason to set for the packages. + choices: [dependency, explicit] + type: str + version_added: 5.4.0 + + reason_for: + description: + - Set the install reason for V(all) packages or only for V(new) packages. + - In case of O(state=latest) already installed packages which will be updated to a newer version are not counted as V(new). + default: new + choices: [all, new] + type: str + version_added: 5.4.0 + +notes: + - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) + option. + - To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand. For example, a dedicated build user with + permissions to install packages could be necessary. + - 'In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages with the error: C(error: target not + found: ).' """ -EXAMPLES = """ +RETURN = r""" +packages: + description: + - A list of packages that have been changed. + - Before community.general 4.5.0 this was only returned when O(upgrade=true). In community.general 4.5.0, it was sometimes omitted when the + package list is empty, but since community.general 4.6.0 it is always returned when O(name) is specified or O(upgrade=true). + returned: success and O(name) is specified or O(upgrade=true) + type: list + elements: str + sample: [package, other-package] + +cache_updated: + description: + - The changed status of C(pacman -Sy). + - Useful when O(name) or O(upgrade=true) are specified next to O(update_cache=true). + returned: success, when O(update_cache=true) + type: bool + sample: false + version_added: 4.6.0 + +stdout: + description: + - Output from pacman. + returned: success, when needed + type: str + sample: ":: Synchronizing package databases... core is up to date :: Starting full system upgrade..." + version_added: 4.1.0 + +stderr: + description: + - Error output from pacman. + returned: success, when needed + type: str + sample: "warning: libtool: local (2.4.6+44+gb9b44533-14) is newer than core (2.4.6+42+gb88cebd5-15)\nwarning ..." + version_added: 4.1.0 +""" + +EXAMPLES = r""" - name: Install package foo from repo community.general.pacman: name: foo @@ -367,8 +356,9 @@ class Pacman(object): self.install_packages(pkgs) self.success() - # This shouldn't happen... - self.fail("This is a bug") + # This happens if an empty list has been provided for name + self.add_exit_infos(msg='Nothing to do') + self.success() def install_packages(self, pkgs): pkgs_to_install = [] diff --git a/plugins/modules/pacman_key.py b/plugins/modules/pacman_key.py index 4b7b2639ec..f98fb6f8a3 100644 --- a/plugins/modules/pacman_key.py +++ b/plugins/modules/pacman_key.py @@ -8,84 +8,83 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pacman_key author: - - George Rawlinson (@grawlinson) + - George Rawlinson (@grawlinson) version_added: "3.2.0" short_description: Manage pacman's list of trusted keys description: - - Add or remove gpg keys from the pacman keyring. + - Add or remove gpg keys from the pacman keyring. notes: - - Use full-length key ID (40 characters). - - Keys will be verified when using O(data), O(file), or O(url) unless O(verify) is overridden. - - Keys will be locally signed after being imported into the keyring. - - If the key ID exists in the keyring, the key will not be added unless O(force_update) is specified. - - O(data), O(file), O(url), and O(keyserver) are mutually exclusive. + - Use full-length key ID (40 characters). + - Keys will be verified when using O(data), O(file), or O(url) unless O(verify) is overridden. + - Keys will be locally signed after being imported into the keyring. + - If the key ID exists in the keyring, the key will not be added unless O(force_update) is specified. + - O(data), O(file), O(url), and O(keyserver) are mutually exclusive. requirements: - - gpg - - pacman-key + - gpg + - pacman-key extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - id: - description: - - The 40 character identifier of the key. - - Including this allows check mode to correctly report the changed state. - - Do not specify a subkey ID, instead specify the primary key ID. - required: true - type: str - data: - description: - - The keyfile contents to add to the keyring. - - Must be of C(PGP PUBLIC KEY BLOCK) type. - type: str - file: - description: - - The path to a keyfile on the remote server to add to the keyring. - - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. - type: path - url: - description: - - The URL to retrieve keyfile from. - - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. - type: str - keyserver: - description: - - The keyserver used to retrieve key from. - type: str - verify: - description: - - Whether or not to verify the keyfile's key ID against specified key ID. - type: bool - default: true - force_update: - description: - - This forces the key to be updated if it already exists in the keyring. - type: bool - default: false - keyring: - description: - - The full path to the keyring folder on the remote server. - - If not specified, module will use pacman's default (V(/etc/pacman.d/gnupg)). - - Useful if the remote system requires an alternative gnupg directory. - type: path - default: /etc/pacman.d/gnupg - state: - description: - - Ensures that the key is present (added) or absent (revoked). - default: present - choices: [ absent, present ] - type: str -''' + id: + description: + - The 40 character identifier of the key. + - Including this allows check mode to correctly report the changed state. + - Do not specify a subkey ID, instead specify the primary key ID. + required: true + type: str + data: + description: + - The keyfile contents to add to the keyring. + - Must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + file: + description: + - The path to a keyfile on the remote server to add to the keyring. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: path + url: + description: + - The URL to retrieve keyfile from. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + keyserver: + description: + - The keyserver used to retrieve key from. + type: str + verify: + description: + - Whether or not to verify the keyfile's key ID against specified key ID. + type: bool + default: true + force_update: + description: + - This forces the key to be updated if it already exists in the keyring. + type: bool + default: false + keyring: + description: + - The full path to the keyring folder on the remote server. + - If not specified, module will use pacman's default (V(/etc/pacman.d/gnupg)). + - Useful if the remote system requires an alternative gnupg directory. + type: path + default: /etc/pacman.d/gnupg + state: + description: + - Ensures that the key is present (added) or absent (revoked). + default: present + choices: [absent, present] + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Import a key via local file community.general.pacman_key: id: 01234567890ABCDE01234567890ABCDE12345678 @@ -119,9 +118,9 @@ EXAMPLES = ''' community.general.pacman_key: id: 01234567890ABCDE01234567890ABCDE12345678 state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ import os.path import tempfile diff --git a/plugins/modules/pagerduty.py b/plugins/modules/pagerduty.py index 853bd6d797..8d83374c34 100644 --- a/plugins/modules/pagerduty.py +++ b/plugins/modules/pagerduty.py @@ -9,84 +9,82 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: pagerduty short_description: Create PagerDuty maintenance windows description: - - This module will let you create PagerDuty maintenance windows + - This module will let you create PagerDuty maintenance windows. author: - - "Andrew Newdigate (@suprememoocow)" - - "Dylan Silva (@thaumos)" - - "Justin Johns (!UNKNOWN)" - - "Bruce Pennypacker (@bpennypacker)" + - "Andrew Newdigate (@suprememoocow)" + - "Dylan Silva (@thaumos)" + - "Justin Johns (!UNKNOWN)" + - "Bruce Pennypacker (@bpennypacker)" requirements: - - PagerDuty API access + - PagerDuty API access extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - type: str - description: - - Create a maintenance window or get a list of ongoing windows. - required: true - choices: [ "running", "started", "ongoing", "absent" ] - name: - type: str - description: - - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. - user: - type: str - description: - - PagerDuty user ID. Obsolete. Please, use O(token) for authorization. - token: - type: str - description: - - A pagerduty token, generated on the pagerduty site. It is used for authorization. - required: true - requester_id: - type: str - description: - - ID of user making the request. Only needed when creating a maintenance_window. - service: - type: list - elements: str - description: - - A comma separated list of PagerDuty service IDs. - aliases: [ services ] - window_id: - type: str - description: - - ID of maintenance window. Only needed when absent a maintenance_window. - hours: - type: str - description: - - Length of maintenance window in hours. - default: '1' - minutes: - type: str - description: - - Maintenance window in minutes (this is added to the hours). - default: '0' - desc: - type: str - description: - - Short description of maintenance window. - default: Created by Ansible - validate_certs: - description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: true -''' + state: + type: str + description: + - Create a maintenance window or get a list of ongoing windows. + required: true + choices: ["running", "started", "ongoing", "absent"] + name: + type: str + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + user: + type: str + description: + - PagerDuty user ID. Obsolete. Please, use O(token) for authorization. + token: + type: str + description: + - A pagerduty token, generated on the pagerduty site. It is used for authorization. + required: true + requester_id: + type: str + description: + - ID of user making the request. Only needed when creating a maintenance_window. + service: + type: list + elements: str + description: + - A comma separated list of PagerDuty service IDs. + aliases: [services] + window_id: + type: str + description: + - ID of maintenance window. Only needed when absent a maintenance_window. + hours: + type: str + description: + - Length of maintenance window in hours. + default: '1' + minutes: + type: str + description: + - Maintenance window in minutes (this is added to the hours). + default: '0' + desc: + type: str + description: + - Short description of maintenance window. + default: Created by Ansible + validate_certs: + description: + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + type: bool + default: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List ongoing maintenance windows using a token community.general.pagerduty: name: companyabc @@ -143,7 +141,7 @@ EXAMPLES = ''' token: yourtoken state: absent window_id: "{{ pd_window.result.maintenance_windows[0].id }}" -''' +""" import datetime import json diff --git a/plugins/modules/pagerduty_alert.py b/plugins/modules/pagerduty_alert.py index 3c0327e5ab..050dcd17e9 100644 --- a/plugins/modules/pagerduty_alert.py +++ b/plugins/modules/pagerduty_alert.py @@ -8,150 +8,149 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: pagerduty_alert short_description: Trigger, acknowledge or resolve PagerDuty incidents description: - - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events + - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events. author: - - "Amanpreet Singh (@ApsOps)" - - "Xiao Shen (@xshen1)" + - "Amanpreet Singh (@ApsOps)" + - "Xiao Shen (@xshen1)" requirements: - - PagerDuty API access + - PagerDuty API access extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: - - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. - api_key: - type: str - description: - - The pagerduty API key (readonly access), generated on the pagerduty site. - - Required if O(api_version=v1). - integration_key: - type: str - description: - - The GUID of one of your 'Generic API' services. - - This is the 'integration key' listed on a 'Integrations' tab of PagerDuty service. - service_id: - type: str - description: - - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved. - - Required if O(api_version=v1). - service_key: - type: str - description: - - The GUID of one of your 'Generic API' services. Obsolete. Please use O(integration_key). - state: - type: str - description: - - Type of event to be sent. - required: true - choices: - - 'triggered' - - 'acknowledged' - - 'resolved' - api_version: - type: str - description: - - The API version we want to use to run the module. - - V1 is more limited with option we can provide to trigger incident. - - V2 has more variables for example, O(severity), O(source), O(custom_details), etc. - default: 'v1' - choices: - - 'v1' - - 'v2' - version_added: 7.4.0 - client: - type: str - description: - - The name of the monitoring client that is triggering this event. - required: false - client_url: - type: str - description: - - The URL of the monitoring client that is triggering this event. - required: false - component: - type: str - description: - - Component of the source machine that is responsible for the event, for example C(mysql) or C(eth0). - required: false - version_added: 7.4.0 - custom_details: - type: dict - description: - - Additional details about the event and affected system. - - A dictionary with custom keys and values. - required: false - version_added: 7.4.0 - desc: - type: str - description: - - For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) - will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. - The maximum length is 1024 characters. - - For O(state=acknowledged) or O(state=resolved) - Text that will appear in the incident's log associated with this event. - required: false - default: Created via Ansible - incident_class: - type: str - description: - - The class/type of the event, for example C(ping failure) or C(cpu load). - required: false - version_added: 7.4.0 - incident_key: - type: str - description: - - Identifies the incident to which this O(state) should be applied. - - For O(state=triggered) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an - open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to 'de-dup' - problem reports. If no O(incident_key) is provided, then it will be generated by PagerDuty. - - For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident was first opened by a - trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. - required: false - link_url: - type: str - description: - - Relevant link url to the alert. For example, the website or the job link. - required: false - version_added: 7.4.0 - link_text: - type: str - description: - - A short description of the link_url. - required: false - version_added: 7.4.0 - source: - type: str - description: - - The unique location of the affected system, preferably a hostname or FQDN. - - Required in case of O(state=trigger) and O(api_version=v2). - required: false - version_added: 7.4.0 - severity: - type: str - description: - - The perceived severity of the status the event is describing with respect to the affected system. - - Required in case of O(state=trigger) and O(api_version=v2). - default: 'critical' - choices: - - 'critical' - - 'warning' - - 'error' - - 'info' - version_added: 7.4.0 -''' + name: + type: str + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + api_key: + type: str + description: + - The pagerduty API key (readonly access), generated on the pagerduty site. + - Required if O(api_version=v1). + integration_key: + type: str + description: + - The GUID of one of your 'Generic API' services. + - This is the 'integration key' listed on a 'Integrations' tab of PagerDuty service. + service_id: + type: str + description: + - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved. + - Required if O(api_version=v1). + service_key: + type: str + description: + - The GUID of one of your 'Generic API' services. Obsolete. Please use O(integration_key). + state: + type: str + description: + - Type of event to be sent. + required: true + choices: + - 'triggered' + - 'acknowledged' + - 'resolved' + api_version: + type: str + description: + - The API version we want to use to run the module. + - V1 is more limited with option we can provide to trigger incident. + - V2 has more variables for example, O(severity), O(source), O(custom_details) and so on. + default: 'v1' + choices: + - 'v1' + - 'v2' + version_added: 7.4.0 + client: + type: str + description: + - The name of the monitoring client that is triggering this event. + required: false + client_url: + type: str + description: + - The URL of the monitoring client that is triggering this event. + required: false + component: + type: str + description: + - Component of the source machine that is responsible for the event, for example C(mysql) or C(eth0). + required: false + version_added: 7.4.0 + custom_details: + type: dict + description: + - Additional details about the event and affected system. + - A dictionary with custom keys and values. + required: false + version_added: 7.4.0 + desc: + type: str + description: + - For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will + be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The + maximum length is 1024 characters. + - For O(state=acknowledged) or O(state=resolved) - Text that will appear in the incident's log associated with this event. + required: false + default: Created via Ansible + incident_class: + type: str + description: + - The class/type of the event, for example C(ping failure) or C(cpu load). + required: false + version_added: 7.4.0 + incident_key: + type: str + description: + - Identifies the incident to which this O(state) should be applied. + - For O(state=triggered) - If there is no open (in other words unresolved) incident with this key, a new one will be created. If there is already an + open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to 'de-dup' + problem reports. If no O(incident_key) is provided, then it will be generated by PagerDuty. + - For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident was first opened + by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. + required: false + link_url: + type: str + description: + - Relevant link URL to the alert. For example, the website or the job link. + required: false + version_added: 7.4.0 + link_text: + type: str + description: + - A short description of the O(link_url). + required: false + version_added: 7.4.0 + source: + type: str + description: + - The unique location of the affected system, preferably a hostname or FQDN. + - Required in case of O(state=trigger) and O(api_version=v2). + required: false + version_added: 7.4.0 + severity: + type: str + description: + - The perceived severity of the status the event is describing with respect to the affected system. + - Required in case of O(state=trigger) and O(api_version=v2). + default: 'critical' + choices: + - 'critical' + - 'warning' + - 'error' + - 'info' + version_added: 7.4.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Trigger an incident with just the basic options community.general.pagerduty_alert: name: companyabc @@ -226,7 +225,7 @@ EXAMPLES = ''' integration_key: xxx incident_key: somekey state: resolved -''' +""" import json from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/pagerduty_change.py b/plugins/modules/pagerduty_change.py index acd31fb447..39353f7575 100644 --- a/plugins/modules/pagerduty_change.py +++ b/plugins/modules/pagerduty_change.py @@ -8,7 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: pagerduty_change short_description: Track a code or infrastructure change as a PagerDuty change event version_added: 1.3.0 @@ -31,8 +31,7 @@ attributes: options: integration_key: description: - - The integration key that identifies the service the change was made to. - This can be found by adding an integration to a service in PagerDuty. + - The integration key that identifies the service the change was made to. This can be found by adding an integration to a service in PagerDuty. required: true type: str summary: @@ -82,14 +81,14 @@ options: type: str validate_certs: description: - - If V(false), SSL certificates for the target URL will not be validated. - This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates for the target URL will not be validated. This should only be used on personally controlled sites using + self-signed certificates. required: false default: true type: bool -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Track the deployment as a PagerDuty change event community.general.pagerduty_change: integration_key: abc123abc123abc123abc123abc123ab @@ -106,7 +105,7 @@ EXAMPLES = ''' environment: production link_url: https://github.com/ansible-collections/community.general/pull/1269 link_text: View changes on GitHub -''' +""" from ansible.module_utils.urls import fetch_url from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/pagerduty_user.py b/plugins/modules/pagerduty_user.py index eb8a309562..e03342c792 100644 --- a/plugins/modules/pagerduty_user.py +++ b/plugins/modules/pagerduty_user.py @@ -8,64 +8,63 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: pagerduty_user short_description: Manage a user account on PagerDuty description: - - This module manages the creation/removal of a user account on PagerDuty. + - This module manages the creation/removal of a user account on PagerDuty. version_added: '1.3.0' author: Zainab Alsaffar (@zanssa) requirements: - - pdpyras python module = 4.1.1 - - PagerDuty API Access + - pdpyras python module = 4.1.1 + - PagerDuty API Access extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - access_token: - description: - - An API access token to authenticate with the PagerDuty REST API. - required: true - type: str - pd_user: - description: - - Name of the user in PagerDuty. - required: true - type: str - pd_email: - description: - - The user's email address. - - O(pd_email) is the unique identifier used and cannot be updated using this module. - required: true - type: str - pd_role: - description: - - The user's role. - choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access'] - default: 'responder' - type: str - state: - description: - - State of the user. - - On V(present), it creates a user if the user doesn't exist. - - On V(absent), it removes a user if the account exists. - choices: ['present', 'absent'] - default: 'present' - type: str - pd_teams: - description: - - The teams to which the user belongs. - - Required if O(state=present). - type: list - elements: str -''' + access_token: + description: + - An API access token to authenticate with the PagerDuty REST API. + required: true + type: str + pd_user: + description: + - Name of the user in PagerDuty. + required: true + type: str + pd_email: + description: + - The user's email address. + - O(pd_email) is the unique identifier used and cannot be updated using this module. + required: true + type: str + pd_role: + description: + - The user's role. + choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access'] + default: 'responder' + type: str + state: + description: + - State of the user. + - On V(present), it creates a user if the user does not exist. + - On V(absent), it removes a user if the account exists. + choices: ['present', 'absent'] + default: 'present' + type: str + pd_teams: + description: + - The teams to which the user belongs. + - Required if O(state=present). + type: list + elements: str +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a user account on PagerDuty community.general.pagerduty_user: access_token: 'Your_Access_token' @@ -81,9 +80,9 @@ EXAMPLES = r''' pd_user: user_full_name pd_email: user_email state: "absent" -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from os import path from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/pam_limits.py b/plugins/modules/pam_limits.py index f97ea6602b..516b61fec1 100644 --- a/plugins/modules/pam_limits.py +++ b/plugins/modules/pam_limits.py @@ -8,11 +8,10 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: pam_limits author: -- "Sebastien Rohaut (@usawa)" + - "Sebastien Rohaut (@usawa)" short_description: Modify Linux PAM limits description: - The M(community.general.pam_limits) module modifies PAM limits. @@ -38,32 +37,32 @@ options: description: - Limit type, see C(man 5 limits.conf) for an explanation. required: true - choices: [ "hard", "soft", "-" ] + choices: ["hard", "soft", "-"] limit_item: type: str description: - The limit to be set. required: true choices: - - "core" - - "data" - - "fsize" - - "memlock" - - "nofile" - - "rss" - - "stack" - - "cpu" - - "nproc" - - "as" - - "maxlogins" - - "maxsyslogins" - - "priority" - - "locks" - - "sigpending" - - "msgqueue" - - "nice" - - "rtprio" - - "chroot" + - "core" + - "data" + - "fsize" + - "memlock" + - "nofile" + - "rss" + - "stack" + - "cpu" + - "nproc" + - "as" + - "maxlogins" + - "maxsyslogins" + - "priority" + - "locks" + - "sigpending" + - "msgqueue" + - "nice" + - "rtprio" + - "chroot" value: type: str description: @@ -74,24 +73,21 @@ options: required: true backup: description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. required: false type: bool default: false use_min: description: - If set to V(true), the minimal value will be used or conserved. - - If the specified value is inferior to the value in the file, - file content is replaced with the new value, else content is not modified. + - If the specified value is inferior to the value in the file, file content is replaced with the new value, else content is not modified. required: false type: bool default: false use_max: description: - If set to V(true), the maximal value will be used or conserved. - - If the specified value is superior to the value in the file, - file content is replaced with the new value, else content is not modified. + - If the specified value is superior to the value in the file, file content is replaced with the new value, else content is not modified. required: false type: bool default: false @@ -109,9 +105,9 @@ options: default: '' notes: - If O(dest) file does not exist, it is created. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add or modify nofile soft limit for the user joe community.general.pam_limits: domain: joe @@ -141,7 +137,7 @@ EXAMPLES = r''' limit_type: hard limit_item: nofile value: 39693561 -''' +""" import os import re @@ -339,7 +335,7 @@ def main(): pass # Move tempfile to newfile - module.atomic_move(nf.name, limits_conf) + module.atomic_move(os.path.abspath(nf.name), os.path.abspath(limits_conf)) try: nf.close() diff --git a/plugins/modules/pamd.py b/plugins/modules/pamd.py index 0ad4c8787e..6502922bc1 100644 --- a/plugins/modules/pamd.py +++ b/plugins/modules/pamd.py @@ -9,15 +9,14 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: pamd author: - - Kenneth D. Evensen (@kevensen) + - Kenneth D. Evensen (@kevensen) short_description: Manage PAM Modules description: - Edit PAM service's type, control, module path and module arguments. - - In order for a PAM rule to be modified, the type, control and - module_path must match an existing rule. See man(5) pam.d for details. + - In order for a PAM rule to be modified, the type, control and module_path must match an existing rule. See man(5) pam.d for details. notes: - This module does not handle authselect profiles. extends_documentation_fragment: @@ -30,8 +29,7 @@ attributes: options: name: description: - - The name generally refers to the PAM service file to - change, for example system-auth. + - The name generally refers to the PAM service file to change, for example system-auth. type: str required: true type: @@ -40,12 +38,11 @@ options: - The O(type), O(control), and O(module_path) options all must match a rule to be modified. type: str required: true - choices: [ account, -account, auth, -auth, password, -password, session, -session ] + choices: [account, -account, auth, -auth, password, -password, session, -session] control: description: - The control of the PAM rule being modified. - - This may be a complicated control with brackets. If this is the case, be - sure to put "[bracketed controls]" in quotes. + - This may be a complicated control with brackets. If this is the case, be sure to put "[bracketed controls]" in quotes. - The O(type), O(control), and O(module_path) options all must match a rule to be modified. type: str required: true @@ -57,55 +54,49 @@ options: required: true new_type: description: - - The new type to assign to the new rule. + - The new type to assign to the new rule. type: str - choices: [ account, -account, auth, -auth, password, -password, session, -session ] + choices: [account, -account, auth, -auth, password, -password, session, -session] new_control: description: - - The new control to assign to the new rule. + - The new control to assign to the new rule. type: str new_module_path: description: - - The new module path to be assigned to the new rule. + - The new module path to be assigned to the new rule. type: str module_arguments: description: - - When O(state=updated), the O(module_arguments) will replace existing module_arguments. - - When O(state=args_absent) args matching those listed in O(module_arguments) will be removed. - - When O(state=args_present) any args listed in O(module_arguments) are added if - missing from the existing rule. - - Furthermore, if the module argument takes a value denoted by C(=), - the value will be changed to that specified in module_arguments. + - When O(state=updated), the O(module_arguments) will replace existing module_arguments. + - When O(state=args_absent) args matching those listed in O(module_arguments) will be removed. + - When O(state=args_present) any args listed in O(module_arguments) are added if missing from the existing rule. + - Furthermore, if the module argument takes a value denoted by C(=), the value will be changed to that specified in module_arguments. type: list elements: str state: description: - - The default of V(updated) will modify an existing rule if type, - control and module_path all match an existing rule. - - With V(before), the new rule will be inserted before a rule matching type, - control and module_path. - - Similarly, with V(after), the new rule will be inserted after an existing rulematching type, - control and module_path. - - With either V(before) or V(after) O(new_type), O(new_control), and O(new_module_path) must all be specified. - - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) will be ignored. - - State V(absent) will remove the rule. + - The default of V(updated) will modify an existing rule if type, control and module_path all match an existing rule. + - With V(before), the new rule will be inserted before a rule matching type, control and module_path. + - Similarly, with V(after), the new rule will be inserted after an existing rulematching type, control and module_path. + - With either V(before) or V(after) O(new_type), O(new_control), and O(new_module_path) must all be specified. + - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) will be ignored. + - State V(absent) will remove the rule. type: str - choices: [ absent, before, after, args_absent, args_present, updated ] + choices: [absent, before, after, args_absent, args_present, updated] default: updated path: description: - - This is the path to the PAM service files. + - This is the path to the PAM service files. type: path default: /etc/pam.d backup: - description: - - Create a backup file including the timestamp information so you can - get the original file back if you somehow clobbered it incorrectly. - type: bool - default: false -''' + description: + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. + type: bool + default: false +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Update pamd rule's control in /etc/pam.d/system-auth community.general.pamd: name: system-auth @@ -133,8 +124,7 @@ EXAMPLES = r''' new_module_path: pam_faillock.so state: before -- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \ - existing rule pam_rootok.so +- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an existing rule pam_rootok.so community.general.pamd: name: su type: auth @@ -193,8 +183,8 @@ EXAMPLES = r''' control: '[success=1 default=ignore]' module_path: pam_succeed_if.so module_arguments: - - crond - - quiet + - crond + - quiet state: args_present - name: Module arguments requiring commas must be listed as a Yaml list @@ -204,7 +194,7 @@ EXAMPLES = r''' control: required module_path: pam_access.so module_arguments: - - listsep=, + - listsep=, state: args_present - name: Update specific argument value in a rule @@ -226,21 +216,20 @@ EXAMPLES = r''' type: auth module_path: pam_sss.so control: 'requisite' -''' +""" -RETURN = r''' +RETURN = r""" change_count: - description: How many rules were changed. - type: int - sample: 1 - returned: success + description: How many rules were changed. + type: int + sample: 1 + returned: success backupdest: - description: - - "The file name of the backup file, if created." - returned: success - type: str -... -''' + description: + - The file name of the backup file, if created. + returned: success + type: str +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/parted.py b/plugins/modules/parted.py index b3616a8ecd..43c34ff9e5 100644 --- a/plugins/modules/parted.py +++ b/plugins/modules/parted.py @@ -9,21 +9,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - - Fabrizio Colonna (@ColOfAbRiX) + - Fabrizio Colonna (@ColOfAbRiX) module: parted short_description: Configure block device partitions description: - - This module allows configuring block device partition using the C(parted) - command line tool. For a full description of the fields and the options - check the GNU parted manual. + - This module allows configuring block device partition using the C(parted) command line tool. For a full description of the fields and the + options check the GNU parted manual. requirements: - This module requires C(parted) version 1.8.3 and above. - Option O(align) (except V(undefined)) requires C(parted) 2.1 or above. - - If the version of C(parted) is below 3.1, it requires a Linux version running - the C(sysfs) file system C(/sys/). + - If the version of C(parted) is below 3.1, it requires a Linux version running the C(sysfs) file system C(/sys/). - Requires the C(resizepart) command when using the O(resize) parameter. extends_documentation_fragment: - community.general.attributes @@ -36,15 +33,14 @@ options: device: description: - The block device (disk) where to operate. - - Regular files can also be partitioned, but it is recommended to create a - loopback device using C(losetup) to easily access its partitions. + - Regular files can also be partitioned, but it is recommended to create a loopback device using C(losetup) to easily access its partitions. type: str required: true align: description: - Set alignment for newly created partitions. Use V(undefined) for parted default alignment. type: str - choices: [ cylinder, minimal, none, optimal, undefined ] + choices: [cylinder, minimal, none, optimal, undefined] default: optimal number: description: @@ -53,46 +49,40 @@ options: type: int unit: description: - - Selects the current default unit that Parted will use to display - locations and capacities on the disk and to interpret those given by the - user if they are not suffixed by an unit. + - Selects the current default unit that Parted will use to display locations and capacities on the disk and to interpret those given by + the user if they are not suffixed by an unit. - When fetching information about a disk, it is recommended to always specify a unit. type: str - choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ] + choices: [s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact] default: KiB label: description: - Disk label type or partition table to use. - - If O(device) already contains a different label, it will be changed to O(label) - and any previous partitions will be lost. + - If O(device) already contains a different label, it will be changed to O(label) and any previous partitions will be lost. - A O(name) must be specified for a V(gpt) partition table. type: str - choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ] + choices: [aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun] default: msdos part_type: description: - May be specified only with O(label=msdos) or O(label=dvh). - Neither O(part_type) nor O(name) may be used with O(label=sun). type: str - choices: [ extended, logical, primary ] + choices: [extended, logical, primary] default: primary part_start: description: - - Where the partition will start as offset from the beginning of the disk, - that is, the "distance" from the start of the disk. Negative numbers - specify distance from the end of the disk. - - The distance can be specified with all the units supported by parted - (except compat) and it is case sensitive, for example V(10GiB), V(15%). + - Where the partition will start as offset from the beginning of the disk, that is, the "distance" from the start of the disk. Negative + numbers specify distance from the end of the disk. + - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for example V(10GiB), V(15%). - Using negative values may require setting of O(fs_type) (see notes). type: str default: 0% part_end: description: - - Where the partition will end as offset from the beginning of the disk, - that is, the "distance" from the start of the disk. Negative numbers + - Where the partition will end as offset from the beginning of the disk, that is, the "distance" from the start of the disk. Negative numbers specify distance from the end of the disk. - - The distance can be specified with all the units supported by parted - (except compat) and it is case sensitive, for example V(10GiB), V(15%). + - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for example V(10GiB), V(15%). type: str default: 100% name: @@ -108,7 +98,7 @@ options: - Whether to create or delete a partition. - If set to V(info) the module will only return the device information. type: str - choices: [ absent, present, info ] + choices: [absent, present, info] default: info fs_type: description: @@ -124,18 +114,15 @@ options: version_added: '1.3.0' notes: - - When fetching information about a new disk and when the version of parted - installed on the system is before version 3.1, the module queries the kernel - through C(/sys/) to obtain disk information. In this case the units CHS and - CYL are not supported. - - Negative O(part_start) start values were rejected if O(fs_type) was not given. - This bug was fixed in parted 3.2.153. If you want to use negative O(part_start), - specify O(fs_type) as well or make sure your system contains newer parted. -''' + - When fetching information about a new disk and when the version of parted installed on the system is before version 3.1, the module queries + the kernel through C(/sys/) to obtain disk information. In this case the units CHS and CYL are not supported. + - Negative O(part_start) start values were rejected if O(fs_type) was not given. This bug was fixed in parted 3.2.153. If you want to use negative + O(part_start), specify O(fs_type) as well or make sure your system contains newer parted. +""" -RETURN = r''' +RETURN = r""" partition_info: - description: Current partition information + description: Current partition information. returned: success type: complex contains: @@ -146,7 +133,7 @@ partition_info: description: List of device partitions. type: list script: - description: parted script executed by module + description: Parted script executed by module. type: str sample: { "disk": { @@ -177,9 +164,9 @@ partition_info: }], "script": "unit KiB print " } -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a new ext4 primary partition community.general.parted: device: /dev/sdb @@ -204,7 +191,7 @@ EXAMPLES = r''' community.general.parted: device: /dev/sdb number: 2 - flags: [ lvm ] + flags: [lvm] state: present part_start: 1GiB @@ -235,7 +222,7 @@ EXAMPLES = r''' part_end: "100%" resize: true state: present -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/pear.py b/plugins/modules/pear.py index 36770de6c5..ba8f5f9ca2 100644 --- a/plugins/modules/pear.py +++ b/plugins/modules/pear.py @@ -12,54 +12,53 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pear short_description: Manage pear/pecl packages description: - - Manage PHP packages with the pear package manager. + - Manage PHP packages with the pear package manager. author: - - Jonathan Lestrelin (@jle64) + - Jonathan Lestrelin (@jle64) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: - - Name of the package to install, upgrade, or remove. - required: true - aliases: [pkg] - state: - type: str - description: - - Desired state of the package. - default: "present" - choices: ["present", "installed", "latest", "absent", "removed"] - executable: - type: path - description: - - Path to the pear executable. - prompts: - description: - - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question. - - Prompts will be processed in the same order as the packages list. - - You can optionally specify an answer to any question in the list. - - If no answer is provided, the list item will only contain the regular expression. - - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')." - - You can provide a list containing items with or without answer. - - A prompt list can be shorter or longer than the packages list but will issue a warning. - - If you want to specify that a package will not need prompts in the middle of a list, V(null). - type: list - elements: raw - version_added: 0.2.0 -''' + name: + type: str + description: + - Name of the package to install, upgrade, or remove. + required: true + aliases: [pkg] + state: + type: str + description: + - Desired state of the package. + default: "present" + choices: ["present", "installed", "latest", "absent", "removed"] + executable: + type: path + description: + - Path to the pear executable. + prompts: + description: + - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question. + - Prompts will be processed in the same order as the packages list. + - You can optionally specify an answer to any question in the list. + - If no answer is provided, the list item will only contain the regular expression. + - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')." + - You can provide a list containing items with or without answer. + - A prompt list can be shorter or longer than the packages list but will issue a warning. + - If you want to specify that a package will not need prompts in the middle of a list, V(null). + type: list + elements: raw + version_added: 0.2.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install pear package community.general.pear: name: Net_URL2 @@ -75,19 +74,18 @@ EXAMPLES = r''' name: pecl/apcu state: present prompts: - - (.*)Enable internal debugging in APCu \[no\] + - (.*)Enable internal debugging in APCu \[no\] - name: Install pecl package with expected prompt and an answer community.general.pear: name: pecl/apcu state: present prompts: - - (.*)Enable internal debugging in APCu \[no\]: "yes" + - (.*)Enable internal debugging in APCu \[no\]: "yes" -- name: Install multiple pear/pecl packages at once with prompts. - Prompts will be processed on the same order as the packages order. - If there is more prompts than packages, packages without prompts will be installed without any prompt expected. - If there is more packages than prompts, additional prompts will be ignored. +- name: Install multiple pear/pecl packages at once with prompts. Prompts will be processed on the same order as the packages order. If there + is more prompts than packages, packages without prompts will be installed without any prompt expected. If there is more packages than prompts, + additional prompts will be ignored. community.general.pear: name: pecl/gnupg, pecl/apcu state: present @@ -95,10 +93,9 @@ EXAMPLES = r''' - I am a test prompt because gnupg doesnt asks anything - (.*)Enable internal debugging in APCu \[no\]: "yes" -- name: Install multiple pear/pecl packages at once skipping the first prompt. - Prompts will be processed on the same order as the packages order. - If there is more prompts than packages, packages without prompts will be installed without any prompt expected. - If there is more packages than prompts, additional prompts will be ignored. +- name: Install multiple pear/pecl packages at once skipping the first prompt. Prompts will be processed on the same order as the packages order. + If there is more prompts than packages, packages without prompts will be installed without any prompt expected. If there is more packages + than prompts, additional prompts will be ignored. community.general.pear: name: pecl/gnupg, pecl/apcu state: present @@ -115,7 +112,7 @@ EXAMPLES = r''' community.general.pear: name: Net_URL2,pecl/json_post state: absent -''' +""" import os diff --git a/plugins/modules/pids.py b/plugins/modules/pids.py index 590f1e85a5..99b52ef1dd 100644 --- a/plugins/modules/pids.py +++ b/plugins/modules/pids.py @@ -111,7 +111,7 @@ class PSAdapter(object): attributes['cmdline'] and compare_lower(attributes['cmdline'][0], name)) def _get_proc_attributes(self, proc, *attributes): - return dict((attribute, self._get_attribute_from_proc(proc, attribute)) for attribute in attributes) + return {attribute: self._get_attribute_from_proc(proc, attribute) for attribute in attributes} @staticmethod @abc.abstractmethod diff --git a/plugins/modules/pip_package_info.py b/plugins/modules/pip_package_info.py index 6aea178cec..f7354e3678 100644 --- a/plugins/modules/pip_package_info.py +++ b/plugins/modules/pip_package_info.py @@ -27,6 +27,7 @@ options: type: list elements: path requirements: + - pip >= 20.3b1 (necessary for the C(--format) option) - The requested pip executables must be installed on the target. author: - Matthew Jones (@matburt) diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index e82e4c32a2..c317ae8da8 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -9,130 +9,150 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: pipx short_description: Manages applications installed with pipx version_added: 3.8.0 description: - - Manage Python applications installed in isolated virtualenvs using pipx. +- Manage Python applications installed in isolated virtualenvs using pipx. extends_documentation_fragment: - - community.general.attributes +- community.general.attributes +- community.general.pipx attributes: - check_mode: - support: full - diff_mode: - support: full + check_mode: + support: full + diff_mode: + support: full options: - state: - type: str - choices: [present, absent, install, uninstall, uninstall_all, inject, upgrade, upgrade_all, reinstall, reinstall_all, latest] - default: install - description: - - Desired state for the application. - - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively. - - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). - It was added in community.general 5.5.0. - name: - type: str - description: - - > - The name of the application to be installed. It must to be a simple package name. - For passing package specifications or installing from URLs or directories, - please use the O(source) option. - source: - type: str - description: - - > - If the application source, such as a package with version specifier, or an URL, - directory or any other accepted specification. See C(pipx) documentation for more details. - - When specified, the C(pipx) command will use O(source) instead of O(name). - install_apps: - description: - - Add apps from the injected packages. - - Only used when O(state=inject). - type: bool - default: false - version_added: 6.5.0 - install_deps: - description: - - Include applications of dependent packages. - - Only used when O(state=install), O(state=latest), or O(state=inject). - type: bool - default: false - inject_packages: - description: - - Packages to be injected into an existing virtual environment. - - Only used when O(state=inject). - type: list - elements: str - force: - description: - - Force modification of the application's virtual environment. See C(pipx) for details. - - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject). - type: bool - default: false - include_injected: - description: - - Upgrade the injected packages along with the application. - - Only used when O(state=upgrade), O(state=upgrade_all), or O(state=latest). - - This is used with O(state=upgrade) and O(state=latest) since community.general 6.6.0. - type: bool - default: false - index_url: - description: - - Base URL of Python Package Index. - - Only used when O(state=install), O(state=upgrade), O(state=latest), or O(state=inject). - type: str - python: - description: - - Python version to be used when creating the application virtual environment. Must be 3.6+. - - Only used when O(state=install), O(state=latest), O(state=reinstall), or O(state=reinstall_all). - type: str - system_site_packages: - description: - - Give application virtual environment access to the system site-packages directory. - - Only used when O(state=install) or O(state=latest). - type: bool - default: false - version_added: 6.6.0 - executable: - description: - - Path to the C(pipx) installed in the system. - - > - If not specified, the module will use C(python -m pipx) to run the tool, - using the same Python interpreter as ansible itself. - type: path - editable: - description: - - Install the project in editable mode. - type: bool - default: false - version_added: 4.6.0 - pip_args: - description: - - Arbitrary arguments to pass directly to C(pip). - type: str - version_added: 4.6.0 + state: + type: str + choices: + - present + - absent + - install + - install_all + - uninstall + - uninstall_all + - inject + - uninject + - upgrade + - upgrade_shared + - upgrade_all + - reinstall + - reinstall_all + - latest + - pin + - unpin + default: install + description: + - Desired state for the application. + - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively. + - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). It was added in community.general + 5.5.0. + - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0), make sure to have a + compatible version when using this option. These states have been added in community.general 9.4.0. + name: + type: str + description: + - The name of the application. In C(pipx) documentation it is also referred to as the name of the virtual environment where the application + will be installed. + - If O(name) is a simple package name without version specifiers, then that name is used as the Python package name to be installed. + - Use O(source) for passing package specifications or installing from URLs or directories. + source: + type: str + description: + - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other states. + - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed file. + - The value of this option is passed as-is to C(pipx). + - O(name) is still required when using O(source) to establish the application name without fetching the package from a remote source. + install_apps: + description: + - Add apps from the injected packages. + - Only used when O(state=inject). + type: bool + default: false + version_added: 6.5.0 + install_deps: + description: + - Include applications of dependent packages. + - Only used when O(state=install), O(state=latest), or O(state=inject). + type: bool + default: false + inject_packages: + description: + - Packages to be injected into an existing virtual environment. + - Only used when O(state=inject). + type: list + elements: str + force: + description: + - Force modification of the application's virtual environment. See C(pipx) for details. + - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject). + type: bool + default: false + include_injected: + description: + - Upgrade the injected packages along with the application. + - Only used when O(state=upgrade), O(state=upgrade_all), or O(state=latest). + - This is used with O(state=upgrade) and O(state=latest) since community.general 6.6.0. + type: bool + default: false + index_url: + description: + - Base URL of Python Package Index. + - Only used when O(state=install), O(state=upgrade), O(state=latest), or O(state=inject). + type: str + python: + description: + - Python version to be used when creating the application virtual environment. Must be 3.6+. + - Only used when O(state=install), O(state=latest), O(state=reinstall), or O(state=reinstall_all). + type: str + system_site_packages: + description: + - Give application virtual environment access to the system site-packages directory. + - Only used when O(state=install) or O(state=latest). + type: bool + default: false + version_added: 6.6.0 + editable: + description: + - Install the project in editable mode. + type: bool + default: false + version_added: 4.6.0 + pip_args: + description: + - Arbitrary arguments to pass directly to C(pip). + type: str + version_added: 4.6.0 + suffix: + description: + - Optional suffix for virtual environment and executable names. + - "B(Warning:) C(pipx) documentation states this is an B(experimental) feature subject to change." + type: str + version_added: 9.3.0 + global: + version_added: 9.4.0 + spec_metadata: + description: + - Spec metadata file for O(state=install_all). + - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info) with + O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output). + type: path + version_added: 9.4.0 notes: - - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). - - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. - - > - This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR) - passed using the R(environment Ansible keyword, playbooks_environment). - - This module requires C(pipx) version 0.16.2.1 or above. - - Please note that C(pipx) requires Python 3.6 or above. - - > - This first implementation does not verify whether a specified version constraint has been installed or not. - Hence, when using version operators, C(pipx) module will always try to execute the operation, - even when the application was previously installed. - This feature will be added in the future. - - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/). +- > + This first implementation does not verify whether a specified version constraint has been installed or not. + Hence, when using version operators, C(pipx) module will always try to execute the operation, + even when the application was previously installed. + This feature will be added in the future. author: - - "Alexei Znamensky (@russoz)" -''' +- "Alexei Znamensky (@russoz)" +""" -EXAMPLES = ''' +EXAMPLES = """ +--- - name: Install tox community.general.pipx: name: tox @@ -157,48 +177,75 @@ EXAMPLES = ''' community.general.pipx: name: pycowsay state: absent -''' + +- name: Install multiple packages from list + vars: + pipx_packages: + - pycowsay + - black + - tox + community.general.pipx: + name: "{{ item }}" + state: latest + with_items: "{{ pipx_packages }}" +""" import json from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper -from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec from ansible.module_utils.facts.compat import ansible_facts +def _make_name(name, suffix): + return name if suffix is None else "{0}{1}".format(name, suffix) + + class PipX(StateModuleHelper): output_params = ['name', 'source', 'index_url', 'force', 'installdeps'] + argument_spec = dict( + state=dict(type='str', default='install', + choices=[ + 'present', 'absent', 'install', 'install_all', 'uninstall', 'uninstall_all', 'inject', 'uninject', + 'upgrade', 'upgrade_shared', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest', 'pin', 'unpin', + ]), + name=dict(type='str'), + source=dict(type='str'), + install_apps=dict(type='bool', default=False), + install_deps=dict(type='bool', default=False), + inject_packages=dict(type='list', elements='str'), + force=dict(type='bool', default=False), + include_injected=dict(type='bool', default=False), + index_url=dict(type='str'), + python=dict(type='str'), + system_site_packages=dict(type='bool', default=False), + editable=dict(type='bool', default=False), + pip_args=dict(type='str'), + suffix=dict(type='str'), + spec_metadata=dict(type='path'), + ) + argument_spec.update(pipx_common_argspec) + module = dict( - argument_spec=dict( - state=dict(type='str', default='install', - choices=['present', 'absent', 'install', 'uninstall', 'uninstall_all', - 'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest']), - name=dict(type='str'), - source=dict(type='str'), - install_apps=dict(type='bool', default=False), - install_deps=dict(type='bool', default=False), - inject_packages=dict(type='list', elements='str'), - force=dict(type='bool', default=False), - include_injected=dict(type='bool', default=False), - index_url=dict(type='str'), - python=dict(type='str'), - system_site_packages=dict(type='bool', default=False), - executable=dict(type='path'), - editable=dict(type='bool', default=False), - pip_args=dict(type='str'), - ), + argument_spec=argument_spec, required_if=[ ('state', 'present', ['name']), ('state', 'install', ['name']), + ('state', 'install_all', ['spec_metadata']), ('state', 'absent', ['name']), ('state', 'uninstall', ['name']), ('state', 'upgrade', ['name']), ('state', 'reinstall', ['name']), ('state', 'latest', ['name']), ('state', 'inject', ['name', 'inject_packages']), + ('state', 'pin', ['name']), + ('state', 'unpin', ['name']), ], + required_by=dict( + suffix="name", + ), supports_check_mode=True, ) use_old_vardict = False @@ -213,18 +260,17 @@ class PipX(StateModuleHelper): for venv_name, venv in raw_data['venvs'].items(): results[venv_name] = { 'version': venv['metadata']['main_package']['package_version'], - 'injected': dict( - (k, v['package_version']) for k, v in venv['metadata']['injected_packages'].items() - ), + 'injected': {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()}, } return results installed = self.runner('_list', output_process=process_list).run(_list=1) if self.vars.name is not None: - app_list = installed.get(self.vars.name) + name = _make_name(self.vars.name, self.vars.suffix) + app_list = installed.get(name) if app_list: - return {self.vars.name: app_list} + return {name: app_list} else: return {} @@ -247,74 +293,98 @@ class PipX(StateModuleHelper): self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd - if self.verbosity >= 4: - self.vars.run_info = ctx.run_info + self.vars.set('run_info', ctx.run_info, verbosity=4) def state_install(self): if not self.vars.application or self.vars.force: self.changed = True - with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx: + args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source' + with self.runner(args_order, check_mode_skip=True) as ctx: ctx.run(name_source=[self.vars.name, self.vars.source]) self._capture_results(ctx) state_present = state_install + def state_install_all(self): + self.changed = True + with self.runner('state global index_url force python system_site_packages editable pip_args spec_metadata', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + def state_upgrade(self): + name = _make_name(self.vars.name, self.vars.suffix) if not self.vars.application: - self.do_raise("Trying to upgrade a non-existent application: {0}".format(self.vars.name)) + self.do_raise("Trying to upgrade a non-existent application: {0}".format(name)) if self.vars.force: self.changed = True - with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: - ctx.run() + with self.runner('state global include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: + ctx.run(name=name) self._capture_results(ctx) def state_uninstall(self): if self.vars.application: - with self.runner('state name', check_mode_skip=True) as ctx: - ctx.run() + name = _make_name(self.vars.name, self.vars.suffix) + with self.runner('state global name', check_mode_skip=True) as ctx: + ctx.run(name=name) self._capture_results(ctx) state_absent = state_uninstall def state_reinstall(self): + name = _make_name(self.vars.name, self.vars.suffix) if not self.vars.application: - self.do_raise("Trying to reinstall a non-existent application: {0}".format(self.vars.name)) + self.do_raise("Trying to reinstall a non-existent application: {0}".format(name)) self.changed = True - with self.runner('state name python', check_mode_skip=True) as ctx: - ctx.run() + with self.runner('state global name python', check_mode_skip=True) as ctx: + ctx.run(name=name) self._capture_results(ctx) def state_inject(self): + name = _make_name(self.vars.name, self.vars.suffix) if not self.vars.application: - self.do_raise("Trying to inject packages into a non-existent application: {0}".format(self.vars.name)) + self.do_raise("Trying to inject packages into a non-existent application: {0}".format(name)) if self.vars.force: self.changed = True - with self.runner('state index_url install_apps install_deps force editable pip_args name inject_packages', check_mode_skip=True) as ctx: - ctx.run() + with self.runner('state global index_url install_apps install_deps force editable pip_args name inject_packages', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + + def state_uninject(self): + name = _make_name(self.vars.name, self.vars.suffix) + if not self.vars.application: + self.do_raise("Trying to uninject packages into a non-existent application: {0}".format(name)) + with self.runner('state global name inject_packages', check_mode_skip=True) as ctx: + ctx.run(name=name) self._capture_results(ctx) def state_uninstall_all(self): - with self.runner('state', check_mode_skip=True) as ctx: + with self.runner('state global', check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) def state_reinstall_all(self): - with self.runner('state python', check_mode_skip=True) as ctx: + with self.runner('state global python', check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) def state_upgrade_all(self): if self.vars.force: self.changed = True - with self.runner('state include_injected force', check_mode_skip=True) as ctx: + with self.runner('state global include_injected force', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_upgrade_shared(self): + with self.runner('state global pip_args', check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) def state_latest(self): if not self.vars.application or self.vars.force: self.changed = True - with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx: + args_order = 'state index_url install_deps force python system_site_packages editable pip_args suffix name_source' + with self.runner(args_order, check_mode_skip=True) as ctx: ctx.run(state='install', name_source=[self.vars.name, self.vars.source]) self._capture_results(ctx) @@ -322,6 +392,16 @@ class PipX(StateModuleHelper): ctx.run(state='upgrade') self._capture_results(ctx) + def state_pin(self): + with self.runner('state global name', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_unpin(self): + with self.runner('state global name', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + def main(): PipX.execute() diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py index 34f9681b06..65c0ba552e 100644 --- a/plugins/modules/pipx_info.py +++ b/plugins/modules/pipx_info.py @@ -9,58 +9,46 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: pipx_info short_description: Rretrieves information about applications installed with pipx version_added: 5.6.0 description: - - Retrieve details about Python applications installed in isolated virtualenvs using pipx. +- Retrieve details about Python applications installed in isolated virtualenvs using pipx. extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.info_module +- community.general.attributes +- community.general.attributes.info_module +- community.general.pipx options: - name: - description: - - Name of an application installed with C(pipx). - type: str - include_deps: - description: - - Include dependent packages in the output. - type: bool - default: false - include_injected: - description: - - Include injected packages in the output. - type: bool - default: false - include_raw: - description: - - Returns the raw output of C(pipx list --json). - - The raw output is not affected by O(include_deps) or O(include_injected). - type: bool - default: false - executable: - description: - - Path to the C(pipx) installed in the system. - - > - If not specified, the module will use C(python -m pipx) to run the tool, - using the same Python interpreter as ansible itself. - type: path -notes: - - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). - - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. - - > - This module will honor C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) - passed using the R(environment Ansible keyword, playbooks_environment). - - This module requires C(pipx) version 0.16.2.1 or above. - - Please note that C(pipx) requires Python 3.6 or above. - - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/). + name: + description: + - Name of an application installed with C(pipx). + type: str + include_deps: + description: + - Include dependent packages in the output. + type: bool + default: false + include_injected: + description: + - Include injected packages in the output. + type: bool + default: false + include_raw: + description: + - Returns the raw output of C(pipx list --json). + - The raw output is not affected by O(include_deps) or O(include_injected). + type: bool + default: false + global: + version_added: 9.3.0 author: - - "Alexei Znamensky (@russoz)" -''' +- "Alexei Znamensky (@russoz)" +""" -EXAMPLES = ''' +EXAMPLES = """ +--- - name: retrieve all installed applications community.general.pipx_info: {} @@ -78,9 +66,10 @@ EXAMPLES = ''' community.general.pipx_info: name: ansible-lint include_deps: true -''' +""" -RETURN = ''' +RETURN = """ +--- application: description: The list of installed applications returned: success @@ -120,36 +109,31 @@ cmd: returned: success type: list elements: str - sample: [ - "/usr/bin/python3.10", - "-m", - "pipx", - "list", - "--include-injected", - "--json" - ] -''' + sample: ["/usr/bin/python3.10", "-m", "pipx", "list", "--include-injected", "--json"] +""" import json from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper -from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec from ansible.module_utils.facts.compat import ansible_facts class PipXInfo(ModuleHelper): output_params = ['name'] + argument_spec = dict( + name=dict(type='str'), + include_deps=dict(type='bool', default=False), + include_injected=dict(type='bool', default=False), + include_raw=dict(type='bool', default=False), + ) + argument_spec.update(pipx_common_argspec) module = dict( - argument_spec=dict( - name=dict(type='str'), - include_deps=dict(type='bool', default=False), - include_injected=dict(type='bool', default=False), - include_raw=dict(type='bool', default=False), - executable=dict(type='path'), - ), + argument_spec=argument_spec, supports_check_mode=True, ) + use_old_vardict = False def __init_module__(self): if self.vars.executable: @@ -185,16 +169,14 @@ class PipXInfo(ModuleHelper): 'version': venv['metadata']['main_package']['package_version'] } if self.vars.include_injected: - entry['injected'] = dict( - (k, v['package_version']) for k, v in venv['metadata']['injected_packages'].items() - ) + entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()} if self.vars.include_deps: entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies']) results.append(entry) return results - with self.runner('_list', output_process=process_list) as ctx: + with self.runner('_list global', output_process=process_list) as ctx: self.vars.application = ctx.run(_list=1) self._capture_results(ctx) diff --git a/plugins/modules/pkg5_publisher.py b/plugins/modules/pkg5_publisher.py index 9d1b381385..6d07e455f4 100644 --- a/plugins/modules/pkg5_publisher.py +++ b/plugins/modules/pkg5_publisher.py @@ -183,9 +183,7 @@ def get_publishers(module): name = values['publisher'] if name not in publishers: - publishers[name] = dict( - (k, values[k]) for k in ['sticky', 'enabled'] - ) + publishers[name] = {k: values[k] for k in ['sticky', 'enabled']} publishers[name]['origin'] = [] publishers[name]['mirror'] = [] diff --git a/plugins/modules/pkgng.py b/plugins/modules/pkgng.py index 88c9b8e3b9..7a04ee3a6e 100644 --- a/plugins/modules/pkgng.py +++ b/plugins/modules/pkgng.py @@ -100,6 +100,13 @@ options: type: bool default: false version_added: 1.3.0 + use_globs: + description: + - Treat the package names as shell glob patterns. + required: false + type: bool + default: true + version_added: 9.3.0 author: "bleader (@bleader)" notes: - When using pkgsite, be careful that already in cache packages won't be downloaded again. @@ -127,7 +134,6 @@ EXAMPLES = ''' - bar state: absent -# "latest" support added in 2.7 - name: Upgrade package baz community.general.pkgng: name: baz @@ -137,6 +143,12 @@ EXAMPLES = ''' community.general.pkgng: name: "*" state: latest + +- name: Upgrade foo/bar + community.general.pkgng: + name: foo/bar + state: latest + use_globs: false ''' @@ -147,7 +159,7 @@ from ansible.module_utils.basic import AnsibleModule def query_package(module, run_pkgng, name): - rc, out, err = run_pkgng('info', '-g', '-e', name) + rc, out, err = run_pkgng('info', '-e', name) return rc == 0 @@ -157,7 +169,7 @@ def query_update(module, run_pkgng, name): # Check to see if a package upgrade is available. # rc = 0, no updates available or package not installed # rc = 1, updates available - rc, out, err = run_pkgng('upgrade', '-g', '-n', name) + rc, out, err = run_pkgng('upgrade', '-n', name) return rc == 1 @@ -260,7 +272,7 @@ def install_packages(module, run_pkgng, packages, cached, state): action_count[action] += len(package_list) continue - pkgng_args = [action, '-g', '-U', '-y'] + package_list + pkgng_args = [action, '-U', '-y'] + package_list rc, out, err = run_pkgng(*pkgng_args) stdout += out stderr += err @@ -290,7 +302,7 @@ def install_packages(module, run_pkgng, packages, cached, state): def annotation_query(module, run_pkgng, package, tag): - rc, out, err = run_pkgng('info', '-g', '-A', package) + rc, out, err = run_pkgng('info', '-A', package) match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) if match: return match.group('value') @@ -425,7 +437,9 @@ def main(): rootdir=dict(required=False, type='path'), chroot=dict(required=False, type='path'), jail=dict(required=False, type='str'), - autoremove=dict(default=False, type='bool')), + autoremove=dict(default=False, type='bool'), + use_globs=dict(default=True, required=False, type='bool'), + ), supports_check_mode=True, mutually_exclusive=[["rootdir", "chroot", "jail"]]) @@ -466,6 +480,9 @@ def main(): def run_pkgng(action, *args, **kwargs): cmd = [pkgng_path, dir_arg, action] + if p["use_globs"] and action in ('info', 'install', 'upgrade',): + args = ('-g',) + args + pkgng_env = {'BATCH': 'yes'} if p["ignore_osver"]: diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index 73afd952e2..52d5a849f3 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -49,8 +49,44 @@ options: comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])." - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. - - Should not be used in conjunction with O(storage). + - This option is mutually exclusive with O(storage) and O(disk_volume). type: str + disk_volume: + description: + - Specify a hash/dictionary of the C(rootfs) disk. + - See U(https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points) for a full description. + - This option is mutually exclusive with O(storage) and O(disk). + type: dict + version_added: 9.2.0 + suboptions: + storage: + description: + - O(disk_volume.storage) is the storage identifier of the storage to use for the C(rootfs). + - Mutually exclusive with O(disk_volume.host_path). + type: str + volume: + description: + - O(disk_volume.volume) is the name of an existing volume. + - If not defined, the module will check if one exists. If not, a new volume will be created. + - If defined, the volume must exist under that name. + - Required only if O(disk_volume.storage) is defined and mutually exclusive with O(disk_volume.host_path). + type: str + size: + description: + - O(disk_volume.size) is the size of the storage to use. + - The size is given in GB. + - Required only if O(disk_volume.storage) is defined and mutually exclusive with O(disk_volume.host_path). + type: int + host_path: + description: + - O(disk_volume.host_path) defines a bind or device path on the PVE host to use for the C(rootfs). + - Mutually exclusive with O(disk_volume.storage), O(disk_volume.volume), and O(disk_volume.size). + type: path + options: + description: + - O(disk_volume.options) is a dict of extra options. + - The value of any given option must be a string, for example V("1"). + type: dict cores: description: - Specify number of cores per socket. @@ -89,8 +125,56 @@ options: version_added: 8.5.0 mounts: description: - - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points + - Specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points as strings. + - This Option is mutually exclusive with O(mount_volumes). type: dict + mount_volumes: + description: + - Specify additional mounts (separate disks) for the container. As a hash/dictionary defining mount points. + - See U(https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points) for a full description. + - This Option is mutually exclusive with O(mounts). + type: list + elements: dict + version_added: 9.2.0 + suboptions: + id: + description: + - O(mount_volumes[].id) is the identifier of the mount point written as C(mp[n]). + type: str + required: true + storage: + description: + - O(mount_volumes[].storage) is the storage identifier of the storage to use. + - Mutually exclusive with O(mount_volumes[].host_path). + type: str + volume: + description: + - O(mount_volumes[].volume) is the name of an existing volume. + - If not defined, the module will check if one exists. If not, a new volume will be created. + - If defined, the volume must exist under that name. + - Required only if O(mount_volumes[].storage) is defined and mutually exclusive with O(mount_volumes[].host_path). + type: str + size: + description: + - O(mount_volumes[].size) is the size of the storage to use. + - The size is given in GB. + - Required only if O(mount_volumes[].storage) is defined and mutually exclusive with O(mount_volumes[].host_path). + type: int + host_path: + description: + - O(mount_volumes[].host_path) defines a bind or device path on the PVE host to use for the C(rootfs). + - Mutually exclusive with O(mount_volumes[].storage), O(mount_volumes[].volume), and O(mount_volumes[].size). + type: path + mountpoint: + description: + - O(mount_volumes[].mountpoint) is the mount point of the volume. + type: path + required: true + options: + description: + - O(mount_volumes[].options) is a dict of extra options. + - The value of any given option must be a string, for example V("1"). + type: dict ip_address: description: - specifies the address the container will be assigned @@ -101,8 +185,8 @@ options: type: bool storage: description: - - target storage - - Should not be used in conjunction with O(disk). + - Target storage. + - This Option is mutually exclusive with O(disk) and O(disk_volume). type: str default: 'local' ostype: @@ -248,6 +332,20 @@ EXAMPLES = r''' ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' disk: 'local-lvm:20' +- name: Create new container with minimal options specifying disk storage location and size via disk_volume + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + disk_volume: + storage: local + size: 20 + - name: Create new container with hookscript and description community.general.proxmox: vmid: 100 @@ -303,7 +401,8 @@ EXAMPLES = r''' password: 123456 hostname: example.org ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}' + netif: + net0: "name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0" - name: Create new container with minimal options defining network interface with static ip community.general.proxmox: @@ -315,7 +414,21 @@ EXAMPLES = r''' password: 123456 hostname: example.org ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}' + netif: + net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0" + +- name: Create new container with more options defining network interface with static ip4 and ip6 with vlan-tag and mtu + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + netif: + net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,ip6=fe80::1227/64,gw6=fe80::1,bridge=vmbr0,firewall=1,tag=934,mtu=1500" - name: Create new container with minimal options defining a mount with 8GB community.general.proxmox: @@ -327,7 +440,24 @@ EXAMPLES = r''' password: 123456 hostname: example.org ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - mounts: '{"mp0":"local:8,mp=/mnt/test/"}' + mounts: + mp0: "local:8,mp=/mnt/test/" + +- name: Create new container with minimal options defining a mount with 8GB using mount_volumes + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + mount_volumes: + - id: mp0 + storage: local + size: 8 + mountpoint: /mnt/test - name: Create new container with minimal options defining a cpu core limit community.general.proxmox: @@ -397,7 +527,8 @@ EXAMPLES = r''' api_user: root@pam api_password: 1q2w3e api_host: node1 - netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.3/24,bridge=vmbr0"}' + netif: + net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.3/24,bridge=vmbr0" update: true - name: Start container @@ -478,6 +609,7 @@ from ansible_collections.community.general.plugins.module_utils.version import L from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native + from ansible_collections.community.general.plugins.module_utils.proxmox import ( ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible) @@ -501,6 +633,127 @@ class ProxmoxLxcAnsible(ProxmoxAnsible): msg="Updating configuration is only supported for LXC enabled proxmox clusters.", ) + def parse_disk_string(disk_string): + # Example strings: + # "acl=0,thin1:base-100-disk-1,size=8G" + # "thin1:10,backup=0" + # "local:20" + # "volume=local-lvm:base-100-disk-1,size=20G" + # "/mnt/bindmounts/shared,mp=/shared" + # "volume=/dev/USB01,mp=/mnt/usb01" + args = disk_string.split(",") + # If the volume is not explicitly defined but implicit by only passing a key, + # add the "volume=" key prefix for ease of parsing. + args = ["volume=" + arg if "=" not in arg else arg for arg in args] + # Then create a dictionary from the arguments + disk_kwargs = dict(map(lambda item: item.split("="), args)) + + VOLUME_PATTERN = r"""(?x) + (?:(?P[\w\-.]+): + (?:(?P\d+)| + (?P[^,\s]+)) + )| + (?P[^,\s]+) + """ + # DISCLAIMER: + # There are two things called a "volume": + # 1. The "volume" key which describes the storage volume, device or directory to mount into the container. + # 2. The storage volume of a storage-backed mount point in the PVE storage sub system. + # In this section, we parse the "volume" key and check which type of mount point we are dealing with. + pattern = re.compile(VOLUME_PATTERN) + match_dict = pattern.match(disk_kwargs.pop("volume")).groupdict() + match_dict = {k: v for k, v in match_dict.items() if v is not None} + + if "storage" in match_dict and "volume" in match_dict: + disk_kwargs["storage"] = match_dict["storage"] + disk_kwargs["volume"] = match_dict["volume"] + elif "storage" in match_dict and "size" in match_dict: + disk_kwargs["storage"] = match_dict["storage"] + disk_kwargs["size"] = match_dict["size"] + elif "host_path" in match_dict: + disk_kwargs["host_path"] = match_dict["host_path"] + + # Pattern matching only available in Python 3.10+ + # match match_dict: + # case {"storage": storage, "volume": volume}: + # disk_kwargs["storage"] = storage + # disk_kwargs["volume"] = volume + + # case {"storage": storage, "size": size}: + # disk_kwargs["storage"] = storage + # disk_kwargs["size"] = size + + # case {"host_path": host_path}: + # disk_kwargs["host_path"] = host_path + + return disk_kwargs + + def convert_mounts(mount_dict): + return_list = [] + for mount_key, mount_value in mount_dict.items(): + mount_config = parse_disk_string(mount_value) + return_list.append(dict(id=mount_key, **mount_config)) + + return return_list + + def build_volume( + key, + storage=None, + volume=None, + host_path=None, + size=None, + mountpoint=None, + options=None, + **kwargs + ): + if size is not None and isinstance(size, str): + size = size.strip("G") + # 1. Handle volume checks/creation + # 1.1 Check if defined volume exists + if volume is not None: + storage_content = self.get_storage_content(node, storage, vmid=vmid) + vol_ids = [vol["volid"] for vol in storage_content] + volid = "{storage}:{volume}".format(storage=storage, volume=volume) + if volid not in vol_ids: + self.module.fail_json( + changed=False, + msg="Storage {storage} does not contain volume {volume}".format( + storage=storage, + volume=volume, + ), + ) + vol_string = "{storage}:{volume},size={size}G".format( + storage=storage, volume=volume, size=size + ) + # 1.2 If volume not defined (but storage is), check if it exists + elif storage is not None: + api_node = self.proxmox_api.nodes( + node + ) # The node must exist, but not the LXC + try: + vol = api_node.lxc(vmid).get("config").get(key) + volume = parse_disk_string(vol).get("volume") + vol_string = "{storage}:{volume},size={size}G".format( + storage=storage, volume=volume, size=size + ) + + # If not, we have proxmox create one using the special syntax + except Exception: + vol_string = "{storage}:{size}".format(storage=storage, size=size) + else: + raise AssertionError('Internal error') + + # 1.3 If we have a host_path, we don't have storage, a volume, or a size + vol_string = ",".join( + [vol_string] + + ([] if host_path is None else [host_path]) + + ([] if mountpoint is None else ["mp={0}".format(mountpoint)]) + + ([] if options is None else ["{0}={1}".format(k, v) for k, v in options.items()]) + + ([] if not kwargs else ["{0}={1}".format(k, v) for k, v in kwargs.items()]) + ) + + return {key: vol_string} + # Version limited features minimum_version = {"tags": "6.1", "timezone": "6.3"} proxmox_node = self.proxmox_api.nodes(node) @@ -518,22 +771,29 @@ class ProxmoxLxcAnsible(ProxmoxAnsible): ) # Remove all empty kwarg entries - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs = {key: val for key, val in kwargs.items() if val is not None} if cpus is not None: kwargs["cpulimit"] = cpus if disk is not None: - kwargs["rootfs"] = disk + kwargs["disk_volume"] = parse_disk_string(disk) + if "disk_volume" in kwargs: + disk_dict = build_volume(key="rootfs", **kwargs.pop("disk_volume")) + kwargs.update(disk_dict) if memory is not None: kwargs["memory"] = memory if swap is not None: kwargs["swap"] = swap if "netif" in kwargs: - kwargs.update(kwargs["netif"]) - del kwargs["netif"] + kwargs.update(kwargs.pop("netif")) if "mounts" in kwargs: - kwargs.update(kwargs["mounts"]) - del kwargs["mounts"] + kwargs["mount_volumes"] = convert_mounts(kwargs.pop("mounts")) + if "mount_volumes" in kwargs: + mounts_list = kwargs.pop("mount_volumes") + for mount_config in mounts_list: + key = mount_config.pop("id") + mount_dict = build_volume(key=key, **mount_config) + kwargs.update(mount_dict) # LXC tags are expected to be valid and presented as a comma/semi-colon delimited string if "tags" in kwargs: re_tag = re.compile(r"^[a-z0-9_][a-z0-9_\-\+\.]*$") @@ -582,7 +842,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible): proxmox_node = self.proxmox_api.nodes(node) # Remove all empty kwarg entries - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs = {k: v for k, v in kwargs.items() if v is not None} pve_version = self.version() @@ -735,12 +995,53 @@ def main(): hostname=dict(), ostemplate=dict(), disk=dict(type='str'), + disk_volume=dict( + type="dict", + options=dict( + storage=dict(type="str"), + volume=dict(type="str"), + size=dict(type="int"), + host_path=dict(type="path"), + options=dict(type="dict"), + ), + required_together=[("storage", "size")], + required_by={ + "volume": ("storage", "size"), + }, + mutually_exclusive=[ + ("host_path", "storage"), + ("host_path", "volume"), + ("host_path", "size"), + ], + ), cores=dict(type='int'), cpus=dict(type='int'), memory=dict(type='int'), swap=dict(type='int'), netif=dict(type='dict'), mounts=dict(type='dict'), + mount_volumes=dict( + type="list", + elements="dict", + options=dict( + id=(dict(type="str", required=True)), + storage=dict(type="str"), + volume=dict(type="str"), + size=dict(type="int"), + host_path=dict(type="path"), + mountpoint=dict(type="path", required=True), + options=dict(type="dict"), + ), + required_together=[("storage", "size")], + required_by={ + "volume": ("storage", "size"), + }, + mutually_exclusive=[ + ("host_path", "storage"), + ("host_path", "volume"), + ("host_path", "size"), + ], + ), ip_address=dict(), ostype=dict(default='auto', choices=[ 'auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged' @@ -776,11 +1077,17 @@ def main(): # either clone a container or create a new one from a template file. ('state', 'present', ('clone', 'ostemplate', 'update'), True), ], - required_together=[ - ('api_token_id', 'api_token_secret') + required_together=[("api_token_id", "api_token_secret")], + required_one_of=[("api_password", "api_token_id")], + mutually_exclusive=[ + ( + "clone", + "ostemplate", + "update", + ), # Creating a new container is done either by cloning an existing one, or based on a template. + ("disk", "disk_volume", "storage"), + ("mounts", "mount_volumes"), ], - required_one_of=[('api_password', 'api_token_id')], - mutually_exclusive=[('clone', 'ostemplate', 'update')], # Creating a new container is done either by cloning an existing one, or based on a template. ) proxmox = ProxmoxLxcAnsible(module) @@ -821,7 +1128,9 @@ def main(): cores=module.params["cores"], hostname=module.params["hostname"], netif=module.params["netif"], + disk_volume=module.params["disk_volume"], mounts=module.params["mounts"], + mount_volumes=module.params["mount_volumes"], ip_address=module.params["ip_address"], onboot=ansible_to_proxmox_bool(module.params["onboot"]), cpuunits=module.params["cpuunits"], @@ -876,7 +1185,9 @@ def main(): hostname=module.params['hostname'], ostemplate=module.params['ostemplate'], netif=module.params['netif'], + disk_volume=module.params["disk_volume"], mounts=module.params['mounts'], + mount_volumes=module.params["mount_volumes"], ostype=module.params['ostype'], ip_address=module.params['ip_address'], onboot=ansible_to_proxmox_bool(module.params['onboot']), diff --git a/plugins/modules/proxmox_disk.py b/plugins/modules/proxmox_disk.py index 83cdbeee58..ed67445b30 100644 --- a/plugins/modules/proxmox_disk.py +++ b/plugins/modules/proxmox_disk.py @@ -453,10 +453,11 @@ msg: ''' from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) from re import compile, match, sub -from time import sleep def disk_conf_str_to_dict(config_string): @@ -524,26 +525,31 @@ class ProxmoxDiskAnsible(ProxmoxAnsible): # - Remove not defined args # - Ensure True and False converted to int. # - Remove unnecessary parameters - params = dict((k, v) for k, v in self.module.params.items() if v is not None and k in self.create_update_fields) - params.update(dict((k, int(v)) for k, v in params.items() if isinstance(v, bool))) + params = { + k: int(v) if isinstance(v, bool) else v + for k, v in self.module.params.items() + if v is not None and k in self.create_update_fields + } return params - def wait_till_complete_or_timeout(self, node_name, task_id): - timeout = self.module.params['timeout'] - while timeout: - if self.api_task_ok(node_name, task_id): - return True - timeout -= 1 - if timeout <= 0: - return False - sleep(1) - def create_disk(self, disk, vmid, vm, vm_config): + """Create a disk in the specified virtual machine. Check if creation is required, + and if so, compile the disk configuration and create it by updating the virtual + machine configuration. After calling the API function, wait for the result. + + :param disk: ID of the disk in format "". + :param vmid: ID of the virtual machine where the disk will be created. + :param vm: Name of the virtual machine where the disk will be created. + :param vm_config: Configuration of the virtual machine. + :return: (bool, string) Whether the task was successful or not + and the message to return to Ansible. + """ create = self.module.params['create'] if create == 'disabled' and disk not in vm_config: # NOOP return False, "Disk %s not found in VM %s and creation was disabled in parameters." % (disk, vmid) + timeout_str = "Reached timeout. Last line in task before timeout: %s" if (create == 'regular' and disk not in vm_config) or (create == 'forced'): # CREATE playbook_config = self.get_create_attributes() @@ -597,7 +603,7 @@ class ProxmoxDiskAnsible(ProxmoxAnsible): if iso_image is not None: playbook_config['volume'] = iso_image # Values in params are numbers, but strings are needed to compare with disk_config - playbook_config = dict((k, str(v)) for k, v in playbook_config.items()) + playbook_config = {k: str(v) for k, v in playbook_config.items()} # Now compare old and new config to detect if changes are needed if proxmox_config == playbook_config: @@ -606,15 +612,31 @@ class ProxmoxDiskAnsible(ProxmoxAnsible): disk_config_to_apply = {self.module.params["disk"]: config_str} current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.post(**disk_config_to_apply) - task_success = self.wait_till_complete_or_timeout(vm['node'], current_task_id) + task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout']) + if task_success: return True, ok_str % (disk, vmid) else: - self.module.fail_json( - msg=timeout_str % self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1] - ) + if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT: + self.module.fail_json( + msg=timeout_str % self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1] + ) + else: + self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason) def move_disk(self, disk, vmid, vm, vm_config): + """Call the `move_disk` API function that moves the disk to another storage and wait for the result. + + :param disk: ID of disk in format "". + :param vmid: ID of virtual machine which disk will be moved. + :param vm: Name of virtual machine which disk will be moved. + :param vm_config: Virtual machine configuration. + :return: (bool, string) Whether the task was successful or not + and the message to return to Ansible. + """ + disk_config = disk_conf_str_to_dict(vm_config[disk]) + disk_storage = disk_config["storage_name"] + params = dict() params['disk'] = disk params['vmid'] = vmid @@ -625,22 +647,65 @@ class ProxmoxDiskAnsible(ProxmoxAnsible): params['format'] = self.module.params['format'] params['delete'] = 1 if self.module.params.get('delete_moved', False) else 0 # Remove not defined args - params = dict((k, v) for k, v in params.items() if v is not None) + params = {k: v for k, v in params.items() if v is not None} if params.get('storage', False): + # Check if the disk is already in the target storage. disk_config = disk_conf_str_to_dict(vm_config[disk]) if params['storage'] == disk_config['storage_name']: - return False + return False, "Disk %s already at %s storage" % (disk, disk_storage) + + current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).move_disk.post(**params) + task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout']) - task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).move_disk.post(**params) - task_success = self.wait_till_complete_or_timeout(vm['node'], task_id) if task_success: - return True + return True, "Disk %s moved from VM %s storage %s" % (disk, vmid, disk_storage) else: - self.module.fail_json( - msg='Reached timeout while waiting for moving VM disk. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(task_id).log.get()[:1] - ) + if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT: + self.module.fail_json( + msg='Reached timeout while waiting for moving VM disk. Last line in task before timeout: %s' % + self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1] + ) + else: + self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason) + + def resize_disk(self, disk, vmid, vm, vm_config): + """Call the `resize` API function to change the disk size and wait for the result. + + :param disk: ID of disk in format "". + :param vmid: ID of virtual machine which disk will be resized. + :param vm: Name of virtual machine which disk will be resized. + :param vm_config: Virtual machine configuration. + :return: (Bool, string) Whether the task was successful or not + and the message to return to Ansible. + """ + size = self.module.params['size'] + if not match(r'^\+?\d+(\.\d+)?[KMGT]?$', size): + self.module.fail_json(msg="Unrecognized size pattern for disk %s: %s" % (disk, size)) + disk_config = disk_conf_str_to_dict(vm_config[disk]) + actual_size = disk_config['size'] + if size == actual_size: + return False, "Disk %s is already %s size" % (disk, size) + + # Resize disk API endpoint has changed at v8.0: PUT method become async. + version = self.version() + pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0] + if pve_major_version >= 8: + current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size) + task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout']) + if task_success: + return True, "Disk %s resized in VM %s" % (disk, vmid) + else: + if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT: + self.module.fail_json( + msg="Reached timeout while resizing disk. Last line in task before timeout: %s" % + self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1] + ) + else: + self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason) + else: + self.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size) + return True, "Disk %s resized in VM %s" % (disk, vmid) def main(): @@ -779,11 +844,8 @@ def main(): if state == 'present': try: - success, message = proxmox.create_disk(disk, vmid, vm, vm_config) - if success: - module.exit_json(changed=True, vmid=vmid, msg=message) - else: - module.exit_json(changed=False, vmid=vmid, msg=message) + changed, message = proxmox.create_disk(disk, vmid, vm, vm_config) + module.exit_json(changed=changed, vmid=vmid, msg=message) except Exception as e: module.fail_json(vmid=vmid, msg='Unable to create/update disk %s in VM %s: %s' % (disk, vmid, str(e))) @@ -800,27 +862,15 @@ def main(): elif state == 'moved': try: - disk_config = disk_conf_str_to_dict(vm_config[disk]) - disk_storage = disk_config["storage_name"] - if proxmox.move_disk(disk, vmid, vm, vm_config): - module.exit_json(changed=True, vmid=vmid, - msg="Disk %s moved from VM %s storage %s" % (disk, vmid, disk_storage)) - else: - module.exit_json(changed=False, vmid=vmid, msg="Disk %s already at %s storage" % (disk, disk_storage)) + changed, message = proxmox.move_disk(disk, vmid, vm, vm_config) + module.exit_json(changed=changed, vmid=vmid, msg=message) except Exception as e: module.fail_json(msg="Failed to move disk %s in VM %s with exception: %s" % (disk, vmid, str(e))) elif state == 'resized': try: - size = module.params['size'] - if not match(r'^\+?\d+(\.\d+)?[KMGT]?$', size): - module.fail_json(msg="Unrecognized size pattern for disk %s: %s" % (disk, size)) - disk_config = disk_conf_str_to_dict(vm_config[disk]) - actual_size = disk_config['size'] - if size == actual_size: - module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already %s size" % (disk, size)) - proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size) - module.exit_json(changed=True, vmid=vmid, msg="Disk %s resized in VM %s" % (disk, vmid)) + changed, message = proxmox.resize_disk(disk, vmid, vm, vm_config) + module.exit_json(changed=changed, vmid=vmid, msg=message) except Exception as e: module.fail_json(msg="Failed to resize disk %s in VM %s with exception: %s" % (disk, vmid, str(e))) diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py index 9fe805c7ab..771ddd902f 100644 --- a/plugins/modules/proxmox_kvm.py +++ b/plugins/modules/proxmox_kvm.py @@ -174,6 +174,7 @@ options: - Allow to force stop VM. - Can be used with states V(stopped), V(restarted), and V(absent). - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false). + - Requires parameter O(archive). type: bool format: description: @@ -969,7 +970,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs = {k: v for k, v in kwargs.items() if v is not None} # Convert all dict in kwargs to elements. # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] @@ -995,7 +996,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): proxmox_node = self.proxmox_api.nodes(node) # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs = {k: v for k, v in kwargs.items() if v is not None} return proxmox_node.qemu(vmid).config.set(**kwargs) is None @@ -1030,8 +1031,8 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): proxmox_node = self.proxmox_api.nodes(node) # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool))) + kwargs = {k: v for k, v in kwargs.items() if v is not None} + kwargs.update({k: int(v) for k, v in kwargs.items() if isinstance(v, bool)}) version = self.version() pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0] @@ -1162,7 +1163,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible): for param in valid_clone_params: if self.module.params[param] is not None: clone_params[param] = self.module.params[param] - clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool))) + clone_params.update({k: int(v) for k, v in clone_params.items() if isinstance(v, bool)}) taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params) else: taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) diff --git a/plugins/modules/proxmox_template.py b/plugins/modules/proxmox_template.py index f73109931f..c30f68d3f0 100644 --- a/plugins/modules/proxmox_template.py +++ b/plugins/modules/proxmox_template.py @@ -144,12 +144,12 @@ except ImportError: class ProxmoxTemplateAnsible(ProxmoxAnsible): - def get_template(self, node, storage, content_type, template): + def has_template(self, node, storage, content_type, template): + volid = '%s:%s/%s' % (storage, content_type, template) try: - return [True for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get() - if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)] + return any(tmpl['volid'] == volid for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get()) except Exception as e: - self.module.fail_json(msg="Failed to retrieve template '%s:%s/%s': %s" % (storage, content_type, template, e)) + self.module.fail_json(msg="Failed to retrieve template '%s': %s" % (volid, e)) def task_status(self, node, taskid, timeout): """ @@ -161,7 +161,7 @@ class ProxmoxTemplateAnsible(ProxmoxAnsible): timeout = timeout - 1 if timeout == 0: self.module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s' % - self.proxmox_api.node(node).tasks(taskid).log.get()[:1]) + self.proxmox_api.nodes(node).tasks(taskid).log.get()[:1]) time.sleep(1) return False @@ -190,7 +190,7 @@ class ProxmoxTemplateAnsible(ProxmoxAnsible): volid = '%s:%s/%s' % (storage, content_type, template) self.proxmox_api.nodes(node).storage(storage).content.delete(volid) while timeout: - if not self.get_template(node, storage, content_type, template): + if not self.has_template(node, storage, content_type, template): return True timeout = timeout - 1 if timeout == 0: @@ -239,14 +239,14 @@ def main(): if not template: module.fail_json(msg='template param for downloading appliance template is mandatory') - if proxmox.get_template(node, storage, content_type, template) and not module.params['force']: + if proxmox.has_template(node, storage, content_type, template) and not module.params['force']: module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template)) if proxmox.download_template(node, storage, template, timeout): module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template)) template = os.path.basename(src) - if proxmox.get_template(node, storage, content_type, template) and not module.params['force']: + if proxmox.has_template(node, storage, content_type, template) and not module.params['force']: module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template)) elif not src: module.fail_json(msg='src param to uploading template file is mandatory') @@ -261,7 +261,7 @@ def main(): content_type = module.params['content_type'] template = module.params['template'] - if not proxmox.get_template(node, storage, content_type, template): + if not proxmox.has_template(node, storage, content_type, template): module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) if proxmox.delete_template(node, storage, content_type, template, timeout): diff --git a/plugins/modules/proxmox_vm_info.py b/plugins/modules/proxmox_vm_info.py index 39d8307a43..e10b9dff6f 100644 --- a/plugins/modules/proxmox_vm_info.py +++ b/plugins/modules/proxmox_vm_info.py @@ -57,6 +57,13 @@ options: - pending default: none version_added: 8.1.0 + network: + description: + - Whether to retrieve the current network status. + - Requires enabled/running qemu-guest-agent on qemu VMs. + type: bool + default: false + version_added: 9.1.0 extends_documentation_fragment: - community.general.proxmox.actiongroup_proxmox - community.general.proxmox.documentation @@ -172,7 +179,7 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible): msg="Failed to retrieve VMs information from cluster resources: %s" % e ) - def get_vms_from_nodes(self, cluster_machines, type, vmid=None, name=None, node=None, config=None): + def get_vms_from_nodes(self, cluster_machines, type, vmid=None, name=None, node=None, config=None, network=False): # Leave in dict only machines that user wants to know about filtered_vms = { vm: info for vm, info in cluster_machines.items() if not ( @@ -201,17 +208,23 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible): config_type = 0 if config == "pending" else 1 # GET /nodes/{node}/qemu/{vmid}/config current=[0/1] desired_vm["config"] = call_vm_getter(this_vm_id).config().get(current=config_type) + if network: + if type == "qemu": + desired_vm["network"] = call_vm_getter(this_vm_id).agent("network-get-interfaces").get()['result'] + elif type == "lxc": + desired_vm["network"] = call_vm_getter(this_vm_id).interfaces.get() + return filtered_vms - def get_qemu_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None): + def get_qemu_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None, network=False): try: - return self.get_vms_from_nodes(cluster_machines, "qemu", vmid, name, node, config) + return self.get_vms_from_nodes(cluster_machines, "qemu", vmid, name, node, config, network) except Exception as e: self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e) - def get_lxc_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None): + def get_lxc_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None, network=False): try: - return self.get_vms_from_nodes(cluster_machines, "lxc", vmid, name, node, config) + return self.get_vms_from_nodes(cluster_machines, "lxc", vmid, name, node, config, network) except Exception as e: self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e) @@ -229,6 +242,7 @@ def main(): type="str", choices=["none", "current", "pending"], default="none", required=False ), + network=dict(type="bool", default=False, required=False), ) module_args.update(vm_info_args) @@ -245,6 +259,7 @@ def main(): vmid = module.params["vmid"] name = module.params["name"] config = module.params["config"] + network = module.params["network"] result = dict(changed=False) @@ -256,12 +271,12 @@ def main(): vms = {} if type == "lxc": - vms = proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config) + vms = proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config, network) elif type == "qemu": - vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config) + vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config, network) else: - vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config) - vms.update(proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config)) + vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config, network) + vms.update(proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config, network)) result["proxmox_vms"] = [info for vm, info in sorted(vms.items())] module.exit_json(**result) diff --git a/plugins/modules/puppet.py b/plugins/modules/puppet.py index 073a083247..46326c667f 100644 --- a/plugins/modules/puppet.py +++ b/plugins/modules/puppet.py @@ -128,6 +128,8 @@ options: - The default value, V(C), is supported on every system, but can lead to encoding errors if UTF-8 is used in the output - Use V(C.UTF-8) or V(en_US.UTF-8) or similar UTF-8 supporting locales in case of problems. You need to make sure the selected locale is supported on the system the puppet agent runs on. + - Starting with community.general 9.1.0, you can use the value V(auto) and the module will + try and determine the best parseable locale to use. type: str default: C version_added: 8.6.0 diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py index d351e7c1d8..df541a1bd3 100644 --- a/plugins/modules/redfish_command.py +++ b/plugins/modules/redfish_command.py @@ -288,6 +288,31 @@ options: type: str choices: [ ResetAll, PreserveNetworkAndUsers, PreserveNetwork ] version_added: 8.6.0 + wait: + required: false + description: + - Block until the service is ready again. + type: bool + default: false + version_added: 9.1.0 + wait_timeout: + required: false + description: + - How long to block until the service is ready again before giving up. + type: int + default: 120 + version_added: 9.1.0 + ciphers: + required: false + description: + - SSL/TLS Ciphers to use for the request. + - 'When a list is provided, all ciphers are joined in order with V(:).' + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) + for more details. + - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. + type: list + elements: str + version_added: 9.2.0 author: - "Jose Delarosa (@jose-delarosa)" @@ -685,6 +710,16 @@ EXAMPLES = ''' username: "{{ username }}" password: "{{ password }}" + - name: Restart manager power gracefully and wait for it to be available + community.general.redfish_command: + category: Manager + command: GracefulRestart + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + wait: True + - name: Restart manager power gracefully community.general.redfish_command: category: Manager @@ -841,7 +876,10 @@ def main(): ), strip_etag_quotes=dict(type='bool', default=False), reset_to_defaults_mode=dict(choices=['ResetAll', 'PreserveNetworkAndUsers', 'PreserveNetwork']), - bios_attributes=dict(type="dict") + bios_attributes=dict(type="dict"), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=120), + ciphers=dict(type='list', elements='str'), ), required_together=[ ('username', 'password'), @@ -873,6 +911,7 @@ def main(): 'account_oemaccounttypes': module.params['oem_account_types'], 'account_updatename': module.params['update_username'], 'account_properties': module.params['account_properties'], + 'account_passwordchangerequired': None, } # timeout @@ -910,10 +949,14 @@ def main(): # BIOS Attributes options bios_attributes = module.params['bios_attributes'] + # ciphers + ciphers = module.params['ciphers'] + # Build root URI root_uri = "https://" + module.params['baseuri'] rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes, + ciphers=ciphers) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: @@ -941,10 +984,16 @@ def main(): # execute only if we find an Account service resource result = rf_utils._find_accountservice_resource() if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - result = ACCOUNTS_COMMANDS[command](user) + # If a password change is required and the user is attempting to + # modify their password, try to proceed. + user['account_passwordchangerequired'] = rf_utils.check_password_change_required(result) + if len(command_list) == 1 and command_list[0] == "UpdateUserPassword" and user['account_passwordchangerequired']: + result = rf_utils.update_user_password(user) + else: + module.fail_json(msg=to_native(result['msg'])) + else: + for command in command_list: + result = ACCOUNTS_COMMANDS[command](user) elif category == "Systems": # execute only if we find a System resource @@ -1016,7 +1065,7 @@ def main(): command = 'PowerGracefulRestart' if command.startswith('Power'): - result = rf_utils.manage_manager_power(command) + result = rf_utils.manage_manager_power(command, module.params['wait'], module.params['wait_timeout']) elif command == 'ClearLogs': result = rf_utils.clear_logs() elif command == 'VirtualMediaInsert': diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py index 129b33b2e6..5b9caecc64 100644 --- a/plugins/modules/redfish_config.py +++ b/plugins/modules/redfish_config.py @@ -145,6 +145,13 @@ options: type: str default: '' version_added: '7.3.0' + storage_none_volume_deletion: + required: false + description: + - Indicates if all non-RAID volumes are automatically deleted prior to creating the new volume. + type: bool + default: false + version_added: '9.5.0' volume_ids: required: false description: @@ -164,9 +171,24 @@ options: required: false description: - Setting dict of volume to be created. + - If C(CapacityBytes) key is not specified in this dictionary, the size of + the volume will be determined by the Redfish service. It is possible the + size will not be the maximum available size. type: dict default: {} version_added: '7.5.0' + ciphers: + required: false + description: + - SSL/TLS Ciphers to use for the request. + - 'When a list is provided, all ciphers are joined in order with V(:).' + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) + for more details. + - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. + type: list + elements: str + version_added: 9.2.0 + author: - "Jose Delarosa (@jose-delarosa)" - "T S Kushal (@TSKushal)" @@ -403,9 +425,11 @@ def main(): hostinterface_id=dict(), sessions_config=dict(type='dict', default={}), storage_subsystem_id=dict(type='str', default=''), + storage_none_volume_deletion=dict(type='bool', default=False), volume_ids=dict(type='list', default=[], elements='str'), secure_boot_enable=dict(type='bool', default=True), - volume_details=dict(type='dict', default={}) + volume_details=dict(type='dict', default={}), + ciphers=dict(type='list', elements='str'), ), required_together=[ ('username', 'password'), @@ -468,11 +492,16 @@ def main(): # Volume creation options volume_details = module.params['volume_details'] storage_subsystem_id = module.params['storage_subsystem_id'] + storage_none_volume_deletion = module.params['storage_none_volume_deletion'] + + # ciphers + ciphers = module.params['ciphers'] # Build root URI root_uri = "https://" + module.params['baseuri'] rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes, + ciphers=ciphers) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: @@ -507,7 +536,7 @@ def main(): elif command == "DeleteVolumes": result = rf_utils.delete_volumes(storage_subsystem_id, volume_ids) elif command == "CreateVolume": - result = rf_utils.create_volume(volume_details, storage_subsystem_id) + result = rf_utils.create_volume(volume_details, storage_subsystem_id, storage_none_volume_deletion) elif category == "Manager": # execute only if we find a Manager service resource diff --git a/plugins/modules/redfish_info.py b/plugins/modules/redfish_info.py index 3b594b7a2c..b1b4a45ee5 100644 --- a/plugins/modules/redfish_info.py +++ b/plugins/modules/redfish_info.py @@ -73,6 +73,17 @@ options: - Handle to check the status of an update in progress. type: str version_added: '6.1.0' + ciphers: + required: false + description: + - SSL/TLS Ciphers to use for the request. + - 'When a list is provided, all ciphers are joined in order with V(:).' + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) + for more details. + - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. + type: list + elements: str + version_added: 9.2.0 author: "Jose Delarosa (@jose-delarosa)" ''' @@ -359,6 +370,16 @@ EXAMPLES = ''' baseuri: "{{ baseuri }}" username: "{{ username }}" password: "{{ password }}" + + - name: Check the availability of the service with a timeout of 5 seconds + community.general.redfish_info: + category: Service + command: CheckAvailability + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 5 + register: result ''' RETURN = ''' @@ -385,6 +406,7 @@ CATEGORY_COMMANDS_ALL = { "GetUpdateStatus"], "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols", "GetHealthReport", "GetHostInterfaces", "GetManagerInventory", "GetServiceIdentification"], + "Service": ["CheckAvailability"], } CATEGORY_COMMANDS_DEFAULT = { @@ -393,7 +415,8 @@ CATEGORY_COMMANDS_DEFAULT = { "Accounts": "ListUsers", "Update": "GetFirmwareInventory", "Sessions": "GetSessions", - "Manager": "GetManagerNicInventory" + "Manager": "GetManagerNicInventory", + "Service": "CheckAvailability", } @@ -411,6 +434,7 @@ def main(): timeout=dict(type='int', default=60), update_handle=dict(), manager=dict(), + ciphers=dict(type='list', elements='str'), ), required_together=[ ('username', 'password'), @@ -438,9 +462,12 @@ def main(): # manager manager = module.params['manager'] + # ciphers + ciphers = module.params['ciphers'] + # Build root URI root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module) + rf_utils = RedfishUtils(creds, root_uri, timeout, module, ciphers=ciphers) # Build Category list if "all" in module.params['category']: @@ -473,7 +500,13 @@ def main(): module.fail_json(msg="Invalid Category: %s" % category) # Organize by Categories / Commands - if category == "Systems": + if category == "Service": + # service-level commands are always available + for command in command_list: + if command == "CheckAvailability": + result["service"] = rf_utils.check_service_availability() + + elif category == "Systems": # execute only if we find a Systems resource resource = rf_utils._find_systems_resource() if resource['ret'] is False: diff --git a/plugins/modules/redhat_subscription.py b/plugins/modules/redhat_subscription.py index 4a7aac483e..ee7a01983c 100644 --- a/plugins/modules/redhat_subscription.py +++ b/plugins/modules/redhat_subscription.py @@ -1262,7 +1262,6 @@ def main(): module.exit_json(changed=False, msg="System already unregistered.") else: try: - rhsm.unsubscribe() rhsm.unregister() except Exception as e: module.fail_json(msg="Failed to unregister: %s" % to_native(e)) diff --git a/plugins/modules/redis.py b/plugins/modules/redis.py index 207927cb77..a30b89922c 100644 --- a/plugins/modules/redis.py +++ b/plugins/modules/redis.py @@ -132,6 +132,16 @@ EXAMPLES = ''' command: config name: lua-time-limit value: 100 + +- name: Connect using TLS and certificate authentication + community.general.redis: + command: config + name: lua-time-limit + value: 100 + tls: true + ca_certs: /etc/redis/certs/ca.crt + client_cert_file: /etc/redis/certs/redis.crt + client_key_file: /etc/redis/certs/redis.key ''' import traceback diff --git a/plugins/modules/redis_info.py b/plugins/modules/redis_info.py index f352d53d79..c75abcf212 100644 --- a/plugins/modules/redis_info.py +++ b/plugins/modules/redis_info.py @@ -30,6 +30,11 @@ options: version_added: 7.5.0 ca_certs: version_added: 7.5.0 + cluster: + default: false + description: Get informations about cluster status as RV(cluster). + type: bool + version_added: 9.1.0 seealso: - module: community.general.redis author: "Pavlo Bashynskyi (@levonet)" @@ -43,6 +48,15 @@ EXAMPLES = r''' - name: Print server information ansible.builtin.debug: var: result.info + +- name: Get server cluster information + community.general.redis_info: + cluster: true + register: result + +- name: Print server cluster information + ansible.builtin.debug: + var: result.cluster_info ''' RETURN = r''' @@ -178,6 +192,25 @@ info: "used_memory_scripts_human": "0B", "used_memory_startup": 791264 } +cluster: + description: The default set of cluster information sections U(https://redis.io/commands/cluster-info). + returned: success if O(cluster=true) + version_added: 9.1.0 + type: dict + sample: { + "cluster_state": ok, + "cluster_slots_assigned": 16384, + "cluster_slots_ok": 16384, + "cluster_slots_pfail": 0, + "cluster_slots_fail": 0, + "cluster_known_nodes": 6, + "cluster_size": 3, + "cluster_current_epoch": 6, + "cluster_my_epoch": 2, + "cluster_stats_messages_sent": 1483972, + "cluster_stats_messages_received": 1483968, + "total_cluster_links_buffer_limit_exceeded": 0 + } ''' import traceback @@ -202,14 +235,19 @@ def redis_client(**client_params): # Module execution. def main(): + module_args = dict( + cluster=dict(type='bool', default=False), + ) + module_args.update(redis_auth_argument_spec(tls_default=False)) module = AnsibleModule( - argument_spec=redis_auth_argument_spec(tls_default=False), + argument_spec=module_args, supports_check_mode=True, ) fail_imports(module, module.params['tls']) redis_params = redis_auth_params(module) + cluster = module.params['cluster'] # Connect and check client = redis_client(**redis_params) @@ -219,7 +257,13 @@ def main(): module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) info = client.info() - module.exit_json(changed=False, info=info) + + result = dict(changed=False, info=info) + + if cluster: + result['cluster_info'] = client.execute_command('CLUSTER INFO') + + module.exit_json(**result) if __name__ == '__main__': diff --git a/plugins/modules/rpm_ostree_pkg.py b/plugins/modules/rpm_ostree_pkg.py index 826c33f2d1..1a02b2d71c 100644 --- a/plugins/modules/rpm_ostree_pkg.py +++ b/plugins/modules/rpm_ostree_pkg.py @@ -55,6 +55,17 @@ EXAMPLES = r''' community.general.rpm_ostree_pkg: name: nfs-utils state: absent + +# In case a different transaction is currently running the module would fail. +# Adding a delay can help mitigate this problem: +- name: Install overlay package + community.general.rpm_ostree_pkg: + name: nfs-utils + state: present + register: rpm_ostree_pkg + until: rpm_ostree_pkg is not failed + retries: 10 + dealy: 30 ''' RETURN = r''' diff --git a/plugins/modules/say.py b/plugins/modules/say.py index 175e5feb0b..2dc359083d 100644 --- a/plugins/modules/say.py +++ b/plugins/modules/say.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: say short_description: Makes a computer to speak description: - - makes a computer speak! Amuse your friends, annoy your coworkers! + - Makes a computer speak! Amuse your friends, annoy your coworkers! notes: - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say). - If you like this module, you may also be interested in the osx_say callback plugin. @@ -37,19 +36,19 @@ options: description: - What voice to use. required: false -requirements: [ say or espeak or espeak-ng ] +requirements: [say or espeak or espeak-ng] author: - - "Ansible Core Team" - - "Michael DeHaan (@mpdehaan)" -''' + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Makes a computer to speak community.general.say: msg: '{{ inventory_hostname }} is all done' voice: Zarvox delegate_to: localhost -''' +""" import platform from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_compute.py b/plugins/modules/scaleway_compute.py index 58a3215056..c61030bede 100644 --- a/plugins/modules/scaleway_compute.py +++ b/plugins/modules/scaleway_compute.py @@ -13,16 +13,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_compute short_description: Scaleway compute management module author: Remy Leone (@remyleone) description: - - "This module manages compute instances on Scaleway." + - This module manages compute instances on Scaleway. extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -35,35 +34,33 @@ options: public_ip: type: str description: - - Manage public IP on a Scaleway server - - Could be Scaleway IP address UUID - - V(dynamic) Means that IP is destroyed at the same time the host is destroyed - - V(absent) Means no public IP at all + - Manage public IP on a Scaleway server. + - Could be Scaleway IP address UUID. + - V(dynamic) Means that IP is destroyed at the same time the host is destroyed. + - V(absent) Means no public IP at all. default: absent enable_ipv6: description: - - Enable public IPv6 connectivity on the instance + - Enable public IPv6 connectivity on the instance. default: false type: bool image: type: str description: - - Image identifier used to start the instance with + - Image identifier used to start the instance with. required: true name: type: str description: - - Name of the instance - + - Name of the instance. organization: type: str description: - Organization identifier. - Exactly one of O(project) and O(organization) must be specified. - project: type: str description: @@ -74,7 +71,7 @@ options: state: type: str description: - - Indicate desired state of the instance. + - Indicate desired state of the instance. default: present choices: - present @@ -87,14 +84,14 @@ options: type: list elements: str description: - - List of tags to apply to the instance (5 max) + - List of tags to apply to the instance (5 max). required: false default: [] region: type: str description: - - Scaleway compute zone + - Scaleway compute zone. required: true choices: - ams1 @@ -109,38 +106,38 @@ options: commercial_type: type: str description: - - Commercial name of the compute node + - Commercial name of the compute node. required: true wait: description: - - Wait for the instance to reach its desired state before returning. + - Wait for the instance to reach its desired state before returning. type: bool default: false wait_timeout: type: int description: - - Time to wait for the server to reach the expected state + - Time to wait for the server to reach the expected state. required: false default: 300 wait_sleep_time: type: int description: - - Time to wait before every attempt to check the state of the server + - Time to wait before every attempt to check the state of the server. required: false default: 3 security_group: type: str description: - - Security group unique identifier - - If no value provided, the default security group or current security group will be used + - Security group unique identifier. + - If no value provided, the default security group or current security group will be used. required: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a server community.general.scaleway_compute: name: foobar @@ -174,10 +171,10 @@ EXAMPLES = ''' project: 951df375-e094-4d26-97c1-ba548eeb9c42 region: ams1 commercial_type: VC1S -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import datetime import time @@ -586,9 +583,11 @@ def server_attributes_should_be_changed(compute_api, target_server, wished_serve compute_api.module.debug("Checking if server attributes should be changed") compute_api.module.debug("Current Server: %s" % target_server) compute_api.module.debug("Wished Server: %s" % wished_server) - debug_dict = dict((x, (target_server[x], wished_server[x])) - for x in PATCH_MUTABLE_SERVER_ATTRIBUTES - if x in target_server and x in wished_server) + debug_dict = { + x: (target_server[x], wished_server[x]) + for x in PATCH_MUTABLE_SERVER_ATTRIBUTES + if x in target_server and x in wished_server + } compute_api.module.debug("Debug dict %s" % debug_dict) try: for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: @@ -614,7 +613,7 @@ def server_change_attributes(compute_api, target_server, wished_server): # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]: # Setting all key to current value except ID - key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id") + key_dict = {x: target_server[key][x] for x in target_server[key].keys() if x != "id"} # Setting ID to the user specified ID key_dict["id"] = wished_server[key] patch_payload[key] = key_dict diff --git a/plugins/modules/scaleway_compute_private_network.py b/plugins/modules/scaleway_compute_private_network.py index b41720be58..5339dfef15 100644 --- a/plugins/modules/scaleway_compute_private_network.py +++ b/plugins/modules/scaleway_compute_private_network.py @@ -11,18 +11,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_compute_private_network short_description: Scaleway compute - private network management version_added: 5.2.0 author: Pascal MANGIN (@pastral) description: - - This module add or remove a private network to a compute instance - (U(https://developer.scaleway.com)). + - This module add or remove a private network to a compute instance (U(https://developer.scaleway.com)). extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -34,7 +32,7 @@ options: state: type: str description: - - Indicate desired state of the VPC. + - Indicate desired state of the VPC. default: present choices: - present @@ -49,7 +47,7 @@ options: region: type: str description: - - Scaleway region to use (for example V(par1)). + - Scaleway region to use (for example V(par1)). required: true choices: - ams1 @@ -64,18 +62,17 @@ options: compute_id: type: str description: - - ID of the compute instance (see M(community.general.scaleway_compute)). + - ID of the compute instance (see M(community.general.scaleway_compute)). required: true private_network_id: type: str description: - - ID of the private network (see M(community.general.scaleway_private_network)). + - ID of the private network (see M(community.general.scaleway_private_network)). required: true +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Plug a VM to a private network community.general.scaleway_compute_private_network: project: '{{ scw_project }}' @@ -92,10 +89,9 @@ EXAMPLES = ''' region: par1 compute_id: "12345678-f1e6-40ec-83e5-12345d67ed89" private_network_id: "22345678-f1e6-40ec-83e5-12345d67ed89" +""" -''' - -RETURN = ''' +RETURN = r""" scaleway_compute_private_network: description: Information on the VPC. returned: success when O(state=present) @@ -117,7 +113,8 @@ scaleway_compute_private_network: "updated_at": "2022-01-15T11:12:04.624837Z", "zone": "fr-par-2" } -''' +""" + from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_container.py b/plugins/modules/scaleway_container.py index 8764a76349..5cdd12da5d 100644 --- a/plugins/modules/scaleway_container.py +++ b/plugins/modules/scaleway_container.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container short_description: Scaleway Container management version_added: 6.0.0 @@ -109,7 +108,8 @@ options: privacy: description: - Privacy policies define whether a container can be executed anonymously. - - Choose V(public) to enable anonymous execution, or V(private) to protect your container with an authentication mechanism provided by the Scaleway API. + - Choose V(public) to enable anonymous execution, or V(private) to protect your container with an authentication mechanism provided by the + Scaleway API. type: str default: public choices: @@ -147,9 +147,9 @@ options: - Redeploy the container if update is required. type: bool default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a container community.general.scaleway_container: namespace_id: '{{ scw_container_namespace }}' @@ -169,9 +169,9 @@ EXAMPLES = ''' state: absent region: fr-par name: my-awesome-container -''' +""" -RETURN = ''' +RETURN = r""" container: description: The container information. returned: when O(state=present) @@ -181,7 +181,7 @@ container: description: Container used for testing scaleway_container ansible module domain_name: cnansibletestgfogtjod-cn-ansible-test.functions.fnc.fr-par.scw.cloud environment_variables: - MY_VAR: my_value + MY_VAR: my_value error_message: null http_option: "" id: c9070eb0-d7a4-48dd-9af3-4fb139890721 @@ -201,7 +201,7 @@ container: value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: created timeout: 300s -''' +""" from copy import deepcopy @@ -260,8 +260,7 @@ def absent_strategy(api, wished_cn): changed = False cn_list = api.fetch_all_resources("containers") - cn_lookup = dict((cn["name"], cn) - for cn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} if wished_cn["name"] not in cn_lookup: return changed, {} @@ -285,8 +284,7 @@ def present_strategy(api, wished_cn): changed = False cn_list = api.fetch_all_resources("containers") - cn_lookup = dict((cn["name"], cn) - for cn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} payload_cn = payload_from_wished_cn(wished_cn) diff --git a/plugins/modules/scaleway_container_info.py b/plugins/modules/scaleway_container_info.py index 20ebece212..28cf40ac50 100644 --- a/plugins/modules/scaleway_container_info.py +++ b/plugins/modules/scaleway_container_info.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container_info short_description: Retrieve information on Scaleway Container version_added: 6.0.0 @@ -46,18 +45,18 @@ options: description: - Name of the container. required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get a container info community.general.scaleway_container_info: namespace_id: '{{ scw_container_namespace }}' region: fr-par name: my-awesome-container register: container_info_task -''' +""" -RETURN = ''' +RETURN = r""" container: description: The container information. returned: always @@ -87,7 +86,7 @@ container: value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: created timeout: 300s -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, @@ -97,8 +96,7 @@ from ansible.module_utils.basic import AnsibleModule def info_strategy(api, wished_cn): cn_list = api.fetch_all_resources("containers") - cn_lookup = dict((fn["name"], fn) - for fn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} if wished_cn["name"] not in cn_lookup: msg = "Error during container lookup: Unable to find container named '%s' in namespace '%s'" % (wished_cn["name"], diff --git a/plugins/modules/scaleway_container_namespace.py b/plugins/modules/scaleway_container_namespace.py index fd56a7d433..802a491321 100644 --- a/plugins/modules/scaleway_container_namespace.py +++ b/plugins/modules/scaleway_container_namespace.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container_namespace short_description: Scaleway Container namespace management version_added: 6.0.0 @@ -24,7 +23,7 @@ extends_documentation_fragment: - community.general.scaleway_waitable_resource - community.general.attributes requirements: - - passlib[argon2] >= 1.7.4 + - passlib[argon2] >= 1.7.4 attributes: check_mode: @@ -84,9 +83,9 @@ options: - Injected in containers at runtime. type: dict default: {} -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a container namespace community.general.scaleway_container_namespace: project_id: '{{ scw_project }}' @@ -105,9 +104,9 @@ EXAMPLES = ''' state: absent region: fr-par name: my-awesome-container-namespace -''' +""" -RETURN = ''' +RETURN = r""" container_namespace: description: The container namespace information. returned: when O(state=present) @@ -128,7 +127,7 @@ container_namespace: - key: MY_SECRET_VAR value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: pending -''' +""" from copy import deepcopy @@ -167,8 +166,7 @@ def absent_strategy(api, wished_cn): changed = False cn_list = api.fetch_all_resources("namespaces") - cn_lookup = dict((cn["name"], cn) - for cn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} if wished_cn["name"] not in cn_lookup: return changed, {} @@ -192,8 +190,7 @@ def present_strategy(api, wished_cn): changed = False cn_list = api.fetch_all_resources("namespaces") - cn_lookup = dict((cn["name"], cn) - for cn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} payload_cn = payload_from_wished_cn(wished_cn) diff --git a/plugins/modules/scaleway_container_namespace_info.py b/plugins/modules/scaleway_container_namespace_info.py index 758720dd57..d1e7196871 100644 --- a/plugins/modules/scaleway_container_namespace_info.py +++ b/plugins/modules/scaleway_container_namespace_info.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container_namespace_info short_description: Retrieve information on Scaleway Container namespace version_added: 6.0.0 @@ -46,18 +45,18 @@ options: description: - Name of the container namespace. required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get a container namespace info community.general.scaleway_container_namespace_info: project_id: '{{ scw_project }}' region: fr-par name: my-awesome-container-namespace register: container_namespace_info_task -''' +""" -RETURN = ''' +RETURN = r""" container_namespace: description: The container namespace information. returned: always @@ -66,7 +65,7 @@ container_namespace: description: "" environment_variables: MY_VAR: my_value - error_message: null + error_message: id: 531a1fd7-98d2-4a74-ad77-d398324304b8 name: my-awesome-container-namespace organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 @@ -78,7 +77,7 @@ container_namespace: - key: MY_SECRET_VAR value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: pending -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, @@ -88,8 +87,7 @@ from ansible.module_utils.basic import AnsibleModule def info_strategy(api, wished_cn): cn_list = api.fetch_all_resources("namespaces") - cn_lookup = dict((fn["name"], fn) - for fn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} if wished_cn["name"] not in cn_lookup: msg = "Error during container namespace lookup: Unable to find container namespace named '%s' in project '%s'" % (wished_cn["name"], diff --git a/plugins/modules/scaleway_container_registry.py b/plugins/modules/scaleway_container_registry.py index 6344a7ae66..132dfe8bb6 100644 --- a/plugins/modules/scaleway_container_registry.py +++ b/plugins/modules/scaleway_container_registry.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container_registry short_description: Scaleway Container registry management module version_added: 5.8.0 @@ -77,9 +76,9 @@ options: - public - private default: private -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a container registry community.general.scaleway_container_registry: project_id: '{{ scw_project }}' @@ -94,9 +93,9 @@ EXAMPLES = ''' state: absent region: fr-par name: my-awesome-container-registry -''' +""" -RETURN = ''' +RETURN = r""" container_registry: description: The container registry information. returned: when O(state=present) @@ -116,7 +115,7 @@ container_registry: status: ready status_message: "" updated_at: "2022-10-14T09:51:07.949716Z" -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, @@ -150,8 +149,7 @@ def absent_strategy(api, wished_cr): changed = False cr_list = api.fetch_all_resources("namespaces") - cr_lookup = dict((cr["name"], cr) - for cr in cr_list) + cr_lookup = {cr["name"]: cr for cr in cr_list} if wished_cr["name"] not in cr_lookup: return changed, {} @@ -175,8 +173,7 @@ def present_strategy(api, wished_cr): changed = False cr_list = api.fetch_all_resources("namespaces") - cr_lookup = dict((cr["name"], cr) - for cr in cr_list) + cr_lookup = {cr["name"]: cr for cr in cr_list} payload_cr = payload_from_wished_cr(wished_cr) diff --git a/plugins/modules/scaleway_container_registry_info.py b/plugins/modules/scaleway_container_registry_info.py index 9c641edcbb..e0fc1db5f3 100644 --- a/plugins/modules/scaleway_container_registry_info.py +++ b/plugins/modules/scaleway_container_registry_info.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_container_registry_info short_description: Scaleway Container registry info module version_added: 5.8.0 @@ -46,18 +45,18 @@ options: description: - Name of the container registry. required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get a container registry info community.general.scaleway_container_registry_info: project_id: '{{ scw_project }}' region: fr-par name: my-awesome-container-registry register: container_registry_info_task -''' +""" -RETURN = ''' +RETURN = r""" container_registry: description: The container registry information. returned: always @@ -77,7 +76,7 @@ container_registry: status: ready status_message: "" updated_at: "2022-10-14T09:51:07.949716Z" -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, @@ -87,8 +86,7 @@ from ansible.module_utils.basic import AnsibleModule def info_strategy(api, wished_cn): cn_list = api.fetch_all_resources("namespaces") - cn_lookup = dict((fn["name"], fn) - for fn in cn_list) + cn_lookup = {cn["name"]: cn for cn in cn_list} if wished_cn["name"] not in cn_lookup: msg = "Error during container registries lookup: Unable to find container registry named '%s' in project '%s'" % (wished_cn["name"], diff --git a/plugins/modules/scaleway_database_backup.py b/plugins/modules/scaleway_database_backup.py index 1d0c17fb6d..b19a6b49bd 100644 --- a/plugins/modules/scaleway_database_backup.py +++ b/plugins/modules/scaleway_database_backup.py @@ -12,17 +12,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_database_backup short_description: Scaleway database backups management module version_added: 1.2.0 author: Guillaume Rodriguez (@guillaume_ro_fr) description: - - "This module manages database backups on Scaleway account U(https://developer.scaleway.com)." + - This module manages database backups on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: support: full @@ -31,118 +30,118 @@ attributes: options: state: description: - - Indicate desired state of the database backup. - - V(present) creates a backup. - - V(absent) deletes the backup. - - V(exported) creates a download link for the backup. - - V(restored) restores the backup to a new database. + - Indicate desired state of the database backup. + - V(present) creates a backup. + - V(absent) deletes the backup. + - V(exported) creates a download link for the backup. + - V(restored) restores the backup to a new database. type: str default: present choices: - - present - - absent - - exported - - restored + - present + - absent + - exported + - restored region: description: - - Scaleway region to use (for example V(fr-par)). + - Scaleway region to use (for example V(fr-par)). type: str required: true choices: - - fr-par - - nl-ams - - pl-waw + - fr-par + - nl-ams + - pl-waw id: description: - - UUID used to identify the database backup. - - Required for V(absent), V(exported) and V(restored) states. + - UUID used to identify the database backup. + - Required for V(absent), V(exported) and V(restored) states. type: str name: description: - - Name used to identify the database backup. - - Required for V(present) state. - - Ignored when O(state=absent), O(state=exported) or O(state=restored). + - Name used to identify the database backup. + - Required for V(present) state. + - Ignored when O(state=absent), O(state=exported) or O(state=restored). type: str required: false database_name: description: - - Name used to identify the database. - - Required for V(present) and V(restored) states. - - Ignored when O(state=absent) or O(state=exported). + - Name used to identify the database. + - Required for V(present) and V(restored) states. + - Ignored when O(state=absent) or O(state=exported). type: str required: false instance_id: description: - - UUID of the instance associated to the database backup. - - Required for V(present) and V(restored) states. - - Ignored when O(state=absent) or O(state=exported). + - UUID of the instance associated to the database backup. + - Required for V(present) and V(restored) states. + - Ignored when O(state=absent) or O(state=exported). type: str required: false expires_at: description: - - Expiration datetime of the database backup (ISO 8601 format). - - Ignored when O(state=absent), O(state=exported) or O(state=restored). + - Expiration datetime of the database backup (ISO 8601 format). + - Ignored when O(state=absent), O(state=exported) or O(state=restored). type: str required: false wait: description: - - Wait for the instance to reach its desired state before returning. + - Wait for the instance to reach its desired state before returning. type: bool default: false wait_timeout: description: - - Time to wait for the backup to reach the expected state. + - Time to wait for the backup to reach the expected state. type: int required: false default: 300 wait_sleep_time: description: - - Time to wait before every attempt to check the state of the backup. + - Time to wait before every attempt to check the state of the backup. type: int required: false default: 3 -''' +""" -EXAMPLES = ''' - - name: Create a backup - community.general.scaleway_database_backup: - name: 'my_backup' - state: present - region: 'fr-par' - database_name: 'my-database' - instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' +EXAMPLES = r""" +- name: Create a backup + community.general.scaleway_database_backup: + name: 'my_backup' + state: present + region: 'fr-par' + database_name: 'my-database' + instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' - - name: Export a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: exported - region: 'fr-par' +- name: Export a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: exported + region: 'fr-par' - - name: Restore a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: restored - region: 'fr-par' - database_name: 'my-new-database' - instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' +- name: Restore a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: restored + region: 'fr-par' + database_name: 'my-new-database' + instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' - - name: Remove a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: absent - region: 'fr-par' -''' +- name: Remove a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: absent + region: 'fr-par' +""" -RETURN = ''' +RETURN = r""" metadata: description: Backup metadata. returned: when O(state=present), O(state=exported), or O(state=restored) @@ -164,7 +163,7 @@ metadata: "updated_at": "2020-08-06T12:42:10.581649Z" } } -''' +""" import datetime import time diff --git a/plugins/modules/scaleway_function.py b/plugins/modules/scaleway_function.py index eb121cd9c7..a5e81c37e4 100644 --- a/plugins/modules/scaleway_function.py +++ b/plugins/modules/scaleway_function.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_function short_description: Scaleway Function management version_added: 6.0.0 @@ -97,8 +96,8 @@ options: runtime: description: - - Runtime of the function - - See U(https://www.scaleway.com/en/docs/compute/functions/reference-content/functions-lifecycle/) for all available runtimes + - Runtime of the function. + - See U(https://www.scaleway.com/en/docs/compute/functions/reference-content/functions-lifecycle/) for all available runtimes. type: str required: true @@ -121,7 +120,8 @@ options: privacy: description: - Privacy policies define whether a function can be executed anonymously. - - Choose V(public) to enable anonymous execution, or V(private) to protect your function with an authentication mechanism provided by the Scaleway API. + - Choose V(public) to enable anonymous execution, or V(private) to protect your function with an authentication mechanism provided by the + Scaleway API. type: str default: public choices: @@ -133,9 +133,9 @@ options: - Redeploy the function if update is required. type: bool default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a function community.general.scaleway_function: namespace_id: '{{ scw_function_namespace }}' @@ -155,9 +155,9 @@ EXAMPLES = ''' region: fr-par state: absent name: my-awesome-function -''' +""" -RETURN = ''' +RETURN = r""" function: description: The function information. returned: when O(state=present) @@ -186,7 +186,7 @@ function: value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: created timeout: 300s -''' +""" from copy import deepcopy @@ -245,8 +245,7 @@ def absent_strategy(api, wished_fn): changed = False fn_list = api.fetch_all_resources("functions") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} if wished_fn["name"] not in fn_lookup: return changed, {} @@ -270,8 +269,7 @@ def present_strategy(api, wished_fn): changed = False fn_list = api.fetch_all_resources("functions") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} payload_fn = payload_from_wished_fn(wished_fn) diff --git a/plugins/modules/scaleway_function_info.py b/plugins/modules/scaleway_function_info.py index c30f0cdb00..7a3acef11e 100644 --- a/plugins/modules/scaleway_function_info.py +++ b/plugins/modules/scaleway_function_info.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_function_info short_description: Retrieve information on Scaleway Function version_added: 6.0.0 @@ -46,18 +45,18 @@ options: description: - Name of the function. required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get a function info community.general.scaleway_function_info: namespace_id: '{{ scw_function_namespace }}' region: fr-par name: my-awesome-function register: function_info_task -''' +""" -RETURN = ''' +RETURN = r""" function: description: The function information. returned: always @@ -68,7 +67,7 @@ function: domain_name: fnansibletestfxamabuc-fn-ansible-test.functions.fnc.fr-par.scw.cloud environment_variables: MY_VAR: my_value - error_message: null + error_message: handler: handler.handle http_option: "" id: ceb64dc4-4464-4196-8e20-ecef705475d3 @@ -86,7 +85,7 @@ function: value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: created timeout: 300s -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway @@ -96,8 +95,7 @@ from ansible.module_utils.basic import AnsibleModule def info_strategy(api, wished_fn): fn_list = api.fetch_all_resources("functions") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} if wished_fn["name"] not in fn_lookup: msg = "Error during function lookup: Unable to find function named '%s' in namespace '%s'" % (wished_fn["name"], diff --git a/plugins/modules/scaleway_function_namespace.py b/plugins/modules/scaleway_function_namespace.py index 0ea31e9bcb..d43b42bc7f 100644 --- a/plugins/modules/scaleway_function_namespace.py +++ b/plugins/modules/scaleway_function_namespace.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_function_namespace short_description: Scaleway Function namespace management version_added: 6.0.0 @@ -84,9 +83,9 @@ options: - Injected in functions at runtime. type: dict default: {} -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a function namespace community.general.scaleway_function_namespace: project_id: '{{ scw_project }}' @@ -105,9 +104,9 @@ EXAMPLES = ''' state: absent region: fr-par name: my-awesome-function-namespace -''' +""" -RETURN = ''' +RETURN = r""" function_namespace: description: The function namespace information. returned: when O(state=present) @@ -116,7 +115,7 @@ function_namespace: description: "" environment_variables: MY_VAR: my_value - error_message: null + error_message: id: 531a1fd7-98d2-4a74-ad77-d398324304b8 name: my-awesome-function-namespace organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 @@ -128,7 +127,7 @@ function_namespace: - key: MY_SECRET_VAR value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: pending -''' +""" from copy import deepcopy @@ -168,8 +167,7 @@ def absent_strategy(api, wished_fn): changed = False fn_list = api.fetch_all_resources("namespaces") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} if wished_fn["name"] not in fn_lookup: return changed, {} @@ -193,8 +191,7 @@ def present_strategy(api, wished_fn): changed = False fn_list = api.fetch_all_resources("namespaces") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} payload_fn = payload_from_wished_fn(wished_fn) diff --git a/plugins/modules/scaleway_function_namespace_info.py b/plugins/modules/scaleway_function_namespace_info.py index f3ea5ddfc8..f2bed200dc 100644 --- a/plugins/modules/scaleway_function_namespace_info.py +++ b/plugins/modules/scaleway_function_namespace_info.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_function_namespace_info short_description: Retrieve information on Scaleway Function namespace version_added: 6.0.0 @@ -46,18 +45,18 @@ options: description: - Name of the function namespace. required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get a function namespace info community.general.scaleway_function_namespace_info: project_id: '{{ scw_project }}' region: fr-par name: my-awesome-function-namespace register: function_namespace_info_task -''' +""" -RETURN = ''' +RETURN = r""" function_namespace: description: The function namespace information. returned: always @@ -66,7 +65,7 @@ function_namespace: description: "" environment_variables: MY_VAR: my_value - error_message: null + error_message: id: 531a1fd7-98d2-4a74-ad77-d398324304b8 name: my-awesome-function-namespace organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 @@ -78,7 +77,7 @@ function_namespace: - key: MY_SECRET_VAR value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg status: pending -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import ( SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, @@ -88,8 +87,7 @@ from ansible.module_utils.basic import AnsibleModule def info_strategy(api, wished_fn): fn_list = api.fetch_all_resources("namespaces") - fn_lookup = dict((fn["name"], fn) - for fn in fn_list) + fn_lookup = {fn["name"]: fn for fn in fn_list} if wished_fn["name"] not in fn_lookup: msg = "Error during function namespace lookup: Unable to find function namespace named '%s' in project '%s'" % (wished_fn["name"], diff --git a/plugins/modules/scaleway_image_info.py b/plugins/modules/scaleway_image_info.py index bdae185148..0f6d1539c8 100644 --- a/plugins/modules/scaleway_image_info.py +++ b/plugins/modules/scaleway_image_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_image_info short_description: Gather information about the Scaleway images available description: @@ -37,9 +36,9 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway images information community.general.scaleway_image_info: region: par1 @@ -47,14 +46,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_image_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_image_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict @@ -91,7 +89,7 @@ scaleway_image_info: "state": "available" } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_ip.py b/plugins/modules/scaleway_ip.py index 1c9042742b..4fad2faf61 100644 --- a/plugins/modules/scaleway_ip.py +++ b/plugins/modules/scaleway_ip.py @@ -11,17 +11,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_ip short_description: Scaleway IP management module author: Remy Leone (@remyleone) description: - - This module manages IP on Scaleway account - U(https://developer.scaleway.com) + - This module manages IP on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -33,7 +31,7 @@ options: state: type: str description: - - Indicate desired state of the IP. + - Indicate desired state of the IP. default: present choices: - present @@ -42,13 +40,13 @@ options: organization: type: str description: - - Scaleway organization identifier + - Scaleway organization identifier. required: true region: type: str description: - - Scaleway region to use (for example par1). + - Scaleway region to use (for example par1). required: true choices: - ams1 @@ -63,21 +61,19 @@ options: id: type: str description: - - id of the Scaleway IP (UUID) - + - Id of the Scaleway IP (UUID). server: type: str description: - - id of the server you want to attach an IP to. - - To unattach an IP don't specify this option - + - Id of the server you want to attach an IP to. + - To unattach an IP don't specify this option. reverse: type: str description: - - Reverse to assign to the IP -''' + - Reverse to assign to the IP. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an IP community.general.scaleway_ip: organization: '{{ scw_org }}' @@ -90,9 +86,9 @@ EXAMPLES = ''' id: '{{ ip_creation_task.scaleway_ip.id }}' state: absent region: par1 -''' +""" -RETURN = ''' +RETURN = r""" data: description: This is only present when O(state=present). returned: when O(state=present) @@ -110,8 +106,8 @@ data: "address": "212.47.232.136" } ] - } -''' + } +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule @@ -145,11 +141,11 @@ def ip_attributes_should_be_changed(api, target_ip, wished_ip): def payload_from_wished_ip(wished_ip): - return dict( - (k, v) + return { + k: v for k, v in wished_ip.items() if k != 'id' and v is not None - ) + } def present_strategy(api, wished_ip): @@ -161,8 +157,7 @@ def present_strategy(api, wished_ip): response.status_code, response.json['message'])) ips_list = response.json["ips"] - ip_lookup = dict((ip["id"], ip) - for ip in ips_list) + ip_lookup = {ip["id"]: ip for ip in ips_list} if wished_ip["id"] not in ip_lookup.keys(): changed = True @@ -212,8 +207,7 @@ def absent_strategy(api, wished_ip): api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( status_code, response.json['message'])) - ip_lookup = dict((ip["id"], ip) - for ip in ips_list) + ip_lookup = {ip["id"]: ip for ip in ips_list} if wished_ip["id"] not in ip_lookup.keys(): return changed, {} diff --git a/plugins/modules/scaleway_ip_info.py b/plugins/modules/scaleway_ip_info.py index 1fd4be5898..b597c7c42b 100644 --- a/plugins/modules/scaleway_ip_info.py +++ b/plugins/modules/scaleway_ip_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_ip_info short_description: Gather information about the Scaleway ips available description: @@ -37,9 +36,9 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway ips information community.general.scaleway_ip_info: region: par1 @@ -47,14 +46,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_ip_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_ip_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)' returned: success type: list elements: dict @@ -71,7 +69,7 @@ scaleway_ip_info: } } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_lb.py b/plugins/modules/scaleway_lb.py index 5bd16c3f4e..927ef77990 100644 --- a/plugins/modules/scaleway_lb.py +++ b/plugins/modules/scaleway_lb.py @@ -13,16 +13,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_lb short_description: Scaleway load-balancer management module author: Remy Leone (@remyleone) description: - - "This module manages load-balancers on Scaleway." + - This module manages load-balancers on Scaleway. extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -53,7 +52,7 @@ options: state: type: str description: - - Indicate desired state of the instance. + - Indicate desired state of the instance. default: present choices: - present @@ -62,7 +61,7 @@ options: region: type: str description: - - Scaleway zone. + - Scaleway zone. required: true choices: - nl-ams @@ -74,30 +73,29 @@ options: elements: str default: [] description: - - List of tags to apply to the load-balancer. - + - List of tags to apply to the load-balancer. wait: description: - - Wait for the load-balancer to reach its desired state before returning. + - Wait for the load-balancer to reach its desired state before returning. type: bool default: false wait_timeout: type: int description: - - Time to wait for the load-balancer to reach the expected state. + - Time to wait for the load-balancer to reach the expected state. required: false default: 300 wait_sleep_time: type: int description: - - Time to wait before every attempt to check the state of the load-balancer. + - Time to wait before every attempt to check the state of the load-balancer. required: false default: 3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a load-balancer community.general.scaleway_lb: name: foobar @@ -113,7 +111,7 @@ EXAMPLES = ''' state: absent organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 region: fr-par -''' +""" RETURNS = ''' { @@ -224,10 +222,10 @@ def wait_to_complete_state_transition(api, lb, force_wait=False): def lb_attributes_should_be_changed(target_lb, wished_lb): - diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr]) + diff = {attr: wished_lb[attr] for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr]} if diff: - return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES) + return {attr: wished_lb[attr] for attr in MUTABLE_ATTRIBUTES} else: return diff @@ -241,8 +239,7 @@ def present_strategy(api, wished_lb): response.status_code, response.json['message'])) lbs_list = response.json["lbs"] - lb_lookup = dict((lb["name"], lb) - for lb in lbs_list) + lb_lookup = {lb["name"]: lb for lb in lbs_list} if wished_lb["name"] not in lb_lookup.keys(): changed = True @@ -298,8 +295,7 @@ def absent_strategy(api, wished_lb): api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( status_code, response.json['message'])) - lb_lookup = dict((lb["name"], lb) - for lb in lbs_list) + lb_lookup = {lb["name"]: lb for lb in lbs_list} if wished_lb["name"] not in lb_lookup.keys(): return changed, {} diff --git a/plugins/modules/scaleway_organization_info.py b/plugins/modules/scaleway_organization_info.py index e9e272c988..603ab3cd4c 100644 --- a/plugins/modules/scaleway_organization_info.py +++ b/plugins/modules/scaleway_organization_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_organization_info short_description: Gather information about the Scaleway organizations available description: @@ -27,20 +26,18 @@ extends_documentation_fragment: - community.general.scaleway - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway organizations information community.general.scaleway_organization_info: register: result - ansible.builtin.debug: msg: "{{ result.scaleway_organization_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_organization_info: description: Response from Scaleway API. returned: success @@ -70,7 +67,7 @@ scaleway_organization_info: "warnings": [] } ] -''' +""" from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_private_network.py b/plugins/modules/scaleway_private_network.py index 0cc9b900f4..922a780098 100644 --- a/plugins/modules/scaleway_private_network.py +++ b/plugins/modules/scaleway_private_network.py @@ -11,17 +11,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_private_network short_description: Scaleway private network management version_added: 4.5.0 author: Pascal MANGIN (@pastral) description: - - "This module manages private network on Scaleway account (U(https://developer.scaleway.com))." + - This module manages private network on Scaleway account (U(https://developer.scaleway.com)). extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -33,7 +32,7 @@ options: state: type: str description: - - Indicate desired state of the VPC. + - Indicate desired state of the VPC. default: present choices: - present @@ -48,7 +47,7 @@ options: region: type: str description: - - Scaleway region to use (for example V(par1)). + - Scaleway region to use (for example V(par1)). required: true choices: - ams1 @@ -63,18 +62,16 @@ options: name: type: str description: - - Name of the VPC. - + - Name of the VPC. tags: type: list elements: str description: - - List of tags to apply to the instance. + - List of tags to apply to the instance. default: [] +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an private network community.general.scaleway_vpc: project: '{{ scw_project }}' @@ -88,9 +85,9 @@ EXAMPLES = ''' name: 'foo' state: absent region: par1 -''' +""" -RETURN = ''' +RETURN = r""" scaleway_private_network: description: Information on the VPC. returned: success when O(state=present) @@ -112,7 +109,7 @@ scaleway_private_network: "updated_at": "2022-01-15T11:12:04.624837Z", "zone": "fr-par-2" } -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_security_group.py b/plugins/modules/scaleway_security_group.py index c09bc34bad..3e1a28275e 100644 --- a/plugins/modules/scaleway_security_group.py +++ b/plugins/modules/scaleway_security_group.py @@ -12,16 +12,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_security_group short_description: Scaleway Security Group management module author: Antoine Barbare (@abarbare) description: - - "This module manages Security Group on Scaleway account U(https://developer.scaleway.com)." + - This module manages Security Group on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: - - community.general.scaleway - - community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -34,7 +33,7 @@ options: description: - Indicate desired state of the Security Group. type: str - choices: [ absent, present ] + choices: [absent, present] default: present organization: @@ -79,21 +78,21 @@ options: description: - Default policy for incoming traffic. type: str - choices: [ accept, drop ] + choices: [accept, drop] outbound_default_policy: description: - Default policy for outcoming traffic. type: str - choices: [ accept, drop ] + choices: [accept, drop] organization_default: description: - Create security group to be the default one. type: bool -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Security Group community.general.scaleway_security_group: state: present @@ -106,9 +105,9 @@ EXAMPLES = ''' outbound_default_policy: accept organization_default: false register: security_group_creation_task -''' +""" -RETURN = ''' +RETURN = r""" data: description: This is only present when O(state=present). returned: when O(state=present) @@ -127,7 +126,7 @@ data: "stateful": false } } -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule @@ -135,11 +134,11 @@ from uuid import uuid4 def payload_from_security_group(security_group): - return dict( - (k, v) + return { + k: v for k, v in security_group.items() if k != 'id' and v is not None - ) + } def present_strategy(api, security_group): @@ -149,8 +148,7 @@ def present_strategy(api, security_group): if not response.ok: api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - security_group_lookup = dict((sg['name'], sg) - for sg in response.json['security_groups']) + security_group_lookup = {sg['name']: sg for sg in response.json['security_groups']} if security_group['name'] not in security_group_lookup.keys(): ret['changed'] = True @@ -181,8 +179,7 @@ def absent_strategy(api, security_group): if not response.ok: api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - security_group_lookup = dict((sg['name'], sg) - for sg in response.json['security_groups']) + security_group_lookup = {sg['name']: sg for sg in response.json['security_groups']} if security_group['name'] not in security_group_lookup.keys(): return ret diff --git a/plugins/modules/scaleway_security_group_info.py b/plugins/modules/scaleway_security_group_info.py index fb28e87740..6664938e09 100644 --- a/plugins/modules/scaleway_security_group_info.py +++ b/plugins/modules/scaleway_security_group_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_security_group_info short_description: Gather information about the Scaleway security groups available description: @@ -36,10 +35,9 @@ extends_documentation_fragment: - community.general.scaleway - community.general.attributes - community.general.attributes.info_module +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway security groups information community.general.scaleway_security_group_info: region: par1 @@ -47,14 +45,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_security_group_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_security_group_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict @@ -75,7 +72,7 @@ scaleway_security_group_info: ] } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_security_group_rule.py b/plugins/modules/scaleway_security_group_rule.py index 9cbb2eb57e..ec89d41f6c 100644 --- a/plugins/modules/scaleway_security_group_rule.py +++ b/plugins/modules/scaleway_security_group_rule.py @@ -12,13 +12,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_security_group_rule short_description: Scaleway Security Group Rule management module author: Antoine Barbare (@abarbare) description: - - "This module manages Security Group Rule on Scaleway account U(https://developer.scaleway.com)." + - This module manages Security Group Rule on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: - community.general.scaleway - community.general.attributes @@ -99,23 +98,23 @@ options: description: - Security Group unique identifier. required: true -''' +""" -EXAMPLES = ''' - - name: Create a Security Group Rule - community.general.scaleway_security_group_rule: - state: present - region: par1 - protocol: TCP - port: 80 - ip_range: 0.0.0.0/0 - direction: inbound - action: accept - security_group: b57210ee-1281-4820-a6db-329f78596ecb - register: security_group_rule_creation_task -''' +EXAMPLES = r""" +- name: Create a Security Group Rule + community.general.scaleway_security_group_rule: + state: present + region: par1 + protocol: TCP + port: 80 + ip_range: 0.0.0.0/0 + direction: inbound + action: accept + security_group: b57210ee-1281-4820-a6db-329f78596ecb + register: security_group_rule_creation_task +""" -RETURN = ''' +RETURN = r""" data: description: This is only present when O(state=present). returned: when O(state=present) @@ -133,7 +132,7 @@ data: "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" } } -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_server_info.py b/plugins/modules/scaleway_server_info.py index 01e9410da8..39af47005e 100644 --- a/plugins/modules/scaleway_server_info.py +++ b/plugins/modules/scaleway_server_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_server_info short_description: Gather information about the Scaleway servers available description: @@ -37,9 +36,9 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway servers information community.general.scaleway_server_info: region: par1 @@ -47,14 +46,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_server_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_server_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict @@ -157,7 +155,7 @@ scaleway_server_info: } } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_snapshot_info.py b/plugins/modules/scaleway_snapshot_info.py index 687f43c85b..6b932cced2 100644 --- a/plugins/modules/scaleway_snapshot_info.py +++ b/plugins/modules/scaleway_snapshot_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_snapshot_info short_description: Gather information about the Scaleway snapshots available description: @@ -37,9 +36,9 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway snapshots information community.general.scaleway_snapshot_info: region: par1 @@ -47,14 +46,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_snapshot_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_snapshot_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict @@ -75,7 +73,7 @@ scaleway_snapshot_info: "volume_type": "l_ssd" } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/scaleway_sshkey.py b/plugins/modules/scaleway_sshkey.py index 5647f9cd05..37e8ec8c3b 100644 --- a/plugins/modules/scaleway_sshkey.py +++ b/plugins/modules/scaleway_sshkey.py @@ -13,16 +13,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_sshkey short_description: Scaleway SSH keys management module author: Remy Leone (@remyleone) description: - - "This module manages SSH keys on Scaleway account U(https://developer.scaleway.com)." + - This module manages SSH keys on Scaleway account (U(https://developer.scaleway.com)). extends_documentation_fragment: -- community.general.scaleway -- community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -34,7 +33,7 @@ options: state: type: str description: - - Indicate desired state of the SSH key. + - Indicate desired state of the SSH key. default: present choices: - present @@ -42,7 +41,7 @@ options: ssh_pub_key: type: str description: - - The public SSH key as a string to add. + - The public SSH key as a string to add. required: true api_url: type: str @@ -50,9 +49,9 @@ options: - Scaleway API URL. default: 'https://account.scaleway.com' aliases: ['base_url'] -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Add SSH key" community.general.scaleway_sshkey: ssh_pub_key: "ssh-rsa AAAA..." @@ -68,9 +67,9 @@ EXAMPLES = ''' ssh_pub_key: "ssh-rsa AAAA..." state: "present" oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c" -''' +""" -RETURN = ''' +RETURN = r""" data: description: This is only present when O(state=present). returned: when O(state=present) @@ -80,7 +79,7 @@ data: {"key": "ssh-rsa AAAA...."} ] } -''' +""" from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway diff --git a/plugins/modules/scaleway_user_data.py b/plugins/modules/scaleway_user_data.py index 08ff86a55e..f4f2c18624 100644 --- a/plugins/modules/scaleway_user_data.py +++ b/plugins/modules/scaleway_user_data.py @@ -13,17 +13,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_user_data short_description: Scaleway user_data management module author: Remy Leone (@remyleone) description: - - This module manages user_data on compute instances on Scaleway. - - It can be used to configure cloud-init for instance. + - This module manages user_data on compute instances on Scaleway. + - It can be used to configure cloud-init for instance. extends_documentation_fragment: -- community.general.scaleway -- community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -36,20 +35,20 @@ options: server_id: type: str description: - - Scaleway Compute instance ID of the server. + - Scaleway Compute instance ID of the server. required: true user_data: type: dict description: - - User defined data. Typically used with C(cloud-init). - - Pass your C(cloud-init) script here as a string. + - User defined data. Typically used with C(cloud-init). + - Pass your C(cloud-init) script here as a string. required: false region: type: str description: - - Scaleway compute zone. + - Scaleway compute zone. required: true choices: - ams1 @@ -60,19 +59,19 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Update the cloud-init community.general.scaleway_user_data: server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce' region: ams1 user_data: cloud-init: 'final_message: "Hello World!"' -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway @@ -129,10 +128,10 @@ def core(module): compute_api.module.fail_json(msg=msg) present_user_data_keys = user_data_list.json["user_data"] - present_user_data = dict( - (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key)) + present_user_data = { + key: get_user_data(compute_api=compute_api, server_id=server_id, key=key) for key in present_user_data_keys - ) + } if present_user_data == user_data: module.exit_json(changed=changed, msg=user_data_list.json) @@ -149,7 +148,7 @@ def core(module): # Then we patch keys that are different for key, value in user_data.items(): - if key not in present_user_data or user_data[key] != present_user_data[key]: + if key not in present_user_data or value != present_user_data[key]: changed = True if compute_api.module.check_mode: diff --git a/plugins/modules/scaleway_volume.py b/plugins/modules/scaleway_volume.py index 46d72288e7..ed6a506742 100644 --- a/plugins/modules/scaleway_volume.py +++ b/plugins/modules/scaleway_volume.py @@ -12,16 +12,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_volume short_description: Scaleway volumes management module author: Henryk Konsek (@hekonsek) description: - - "This module manages volumes on Scaleway account U(https://developer.scaleway.com)." + - This module manages volumes on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: -- community.general.scaleway -- community.general.attributes + - community.general.scaleway + - community.general.attributes attributes: check_mode: @@ -74,9 +73,9 @@ options: type: str description: - Type of the volume (for example 'l_ssd'). -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create 10GB volume community.general.scaleway_volume: name: my-volume @@ -92,9 +91,9 @@ EXAMPLES = ''' name: my-volume state: absent region: par1 -''' +""" -RETURN = ''' +RETURN = r""" data: description: This is only present when O(state=present). returned: when O(state=present) @@ -110,7 +109,7 @@ data: "volume_type": "l_ssd" } } -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/scaleway_volume_info.py b/plugins/modules/scaleway_volume_info.py index 471845c43e..1b2e95f88c 100644 --- a/plugins/modules/scaleway_volume_info.py +++ b/plugins/modules/scaleway_volume_info.py @@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_volume_info short_description: Gather information about the Scaleway volumes available description: @@ -37,9 +36,9 @@ options: - EMEA-FR-PAR2 - waw1 - EMEA-PL-WAW1 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway volumes information community.general.scaleway_volume_info: region: par1 @@ -47,14 +46,13 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_volume_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_volume_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict @@ -73,7 +71,7 @@ scaleway_volume_info: "volume_type": "l_ssd" } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/sefcontext.py b/plugins/modules/sefcontext.py index 19c128fa7b..eeba491f5d 100644 --- a/plugins/modules/sefcontext.py +++ b/plugins/modules/sefcontext.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: sefcontext short_description: Manages SELinux file context mapping definitions description: @@ -28,85 +27,82 @@ attributes: options: target: description: - - Target path (expression). + - Target path (expression). type: str required: true - aliases: [ path ] + aliases: [path] ftype: description: - - The file type that should have SELinux contexts applied. - - "The following file type options are available:" - - V(a) for all files, - - V(b) for block devices, - - V(c) for character devices, - - V(d) for directories, - - V(f) for regular files, - - V(l) for symbolic links, - - V(p) for named pipes, - - V(s) for socket files. + - The file type that should have SELinux contexts applied. + - 'The following file type options are available:' + - V(a) for all files, + - V(b) for block devices, + - V(c) for character devices, + - V(d) for directories, + - V(f) for regular files, + - V(l) for symbolic links, + - V(p) for named pipes, + - V(s) for socket files. type: str - choices: [ a, b, c, d, f, l, p, s ] + choices: [a, b, c, d, f, l, p, s] default: a setype: description: - - SELinux type for the specified O(target). + - SELinux type for the specified O(target). type: str substitute: description: - - Path to use to substitute file context(s) for the specified O(target). The context labeling for the O(target) subtree is made equivalent to this path. - - This is also referred to as SELinux file context equivalence and it implements the C(equal) functionality of the SELinux management tools. + - Path to use to substitute file context(s) for the specified O(target). The context labeling for the O(target) subtree is made equivalent + to this path. + - This is also referred to as SELinux file context equivalence and it implements the C(equal) functionality of the SELinux management tools. version_added: 6.4.0 type: str - aliases: [ equal ] + aliases: [equal] seuser: description: - - SELinux user for the specified O(target). - - Defaults to V(system_u) for new file contexts and to existing value when modifying file contexts. + - SELinux user for the specified O(target). + - Defaults to V(system_u) for new file contexts and to existing value when modifying file contexts. type: str selevel: description: - - SELinux range for the specified O(target). - - Defaults to V(s0) for new file contexts and to existing value when modifying file contexts. + - SELinux range for the specified O(target). + - Defaults to V(s0) for new file contexts and to existing value when modifying file contexts. type: str - aliases: [ serange ] + aliases: [serange] state: description: - - Whether the SELinux file context must be V(absent) or V(present). - - Specifying V(absent) without either O(setype) or O(substitute) deletes both SELinux type or path substitution mappings that match O(target). + - Whether the SELinux file context must be V(absent) or V(present). + - Specifying V(absent) without either O(setype) or O(substitute) deletes both SELinux type or path substitution mappings that match O(target). type: str - choices: [ absent, present ] + choices: [absent, present] default: present reload: description: - - Reload SELinux policy after commit. - - Note that this does not apply SELinux file contexts to existing files. + - Reload SELinux policy after commit. + - Note that this does not apply SELinux file contexts to existing files. type: bool default: true ignore_selinux_state: description: - - Useful for scenarios (chrooted environment) that you can't get the real SELinux state. + - Useful for scenarios (chrooted environment) that you cannot get the real SELinux state. type: bool default: false notes: -- The changes are persistent across reboots. -- O(setype) and O(substitute) are mutually exclusive. -- If O(state=present) then one of O(setype) or O(substitute) is mandatory. -- The M(community.general.sefcontext) module does not modify existing files to the new - SELinux context(s), so it is advisable to first create the SELinux - file contexts before creating files, or run C(restorecon) manually - for the existing files that require the new SELinux file contexts. -- Not applying SELinux fcontexts to existing files is a deliberate - decision as it would be unclear what reported changes would entail - to, and there's no guarantee that applying SELinux fcontext does - not pick up other unrelated prior changes. + - The changes are persistent across reboots. + - O(setype) and O(substitute) are mutually exclusive. + - If O(state=present) then one of O(setype) or O(substitute) is mandatory. + - The M(community.general.sefcontext) module does not modify existing files to the new SELinux context(s), so it is advisable to first create + the SELinux file contexts before creating files, or run C(restorecon) manually for the existing files that require the new SELinux file contexts. + - Not applying SELinux fcontexts to existing files is a deliberate decision as it would be unclear what reported changes would entail to, and + there is no guarantee that applying SELinux fcontext does not pick up other unrelated prior changes. requirements: -- libselinux-python -- policycoreutils-python + - libselinux-python + - policycoreutils-python author: -- Dag Wieers (@dagwieers) -''' + - Dag Wieers (@dagwieers) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Allow apache to modify files in /srv/git_repos community.general.sefcontext: target: '/srv/git_repos(/.*)?' @@ -132,11 +128,11 @@ EXAMPLES = r''' - name: Apply new SELinux file context to filesystem ansible.builtin.command: restorecon -irv /srv/git_repos -''' +""" -RETURN = r''' +RETURN = r""" # Default return values -''' +""" import traceback diff --git a/plugins/modules/selinux_permissive.py b/plugins/modules/selinux_permissive.py index 80439e1de7..b5c0ee4a61 100644 --- a/plugins/modules/selinux_permissive.py +++ b/plugins/modules/selinux_permissive.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: selinux_permissive short_description: Change permissive domain in SELinux policy description: @@ -25,20 +24,20 @@ attributes: options: domain: description: - - The domain that will be added or removed from the list of permissive domains. + - The domain that will be added or removed from the list of permissive domains. type: str required: true - aliases: [ name ] + aliases: [name] permissive: description: - - Indicate if the domain should or should not be set as permissive. + - Indicate if the domain should or should not be set as permissive. type: bool required: true no_reload: description: - - Disable reloading of the SELinux policy after making change to a domain's permissive setting. - - The default is V(false), which causes policy to be reloaded when a domain changes state. - - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6." + - Disable reloading of the SELinux policy after making change to a domain's permissive setting. + - The default is V(false), which causes policy to be reloaded when a domain changes state. + - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6.". type: bool default: false store: @@ -47,18 +46,18 @@ options: type: str default: '' notes: - - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer). -requirements: [ policycoreutils-python ] + - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer). +requirements: [policycoreutils-python] author: -- Michael Scherer (@mscherer) -''' + - Michael Scherer (@mscherer) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Change the httpd_t domain to permissive community.general.selinux_permissive: name: httpd_t permissive: true -''' +""" import traceback diff --git a/plugins/modules/selogin.py b/plugins/modules/selogin.py index 57482b0908..8f1b20c230 100644 --- a/plugins/modules/selogin.py +++ b/plugins/modules/selogin.py @@ -8,12 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: selogin short_description: Manages linux user to SELinux user mapping description: - - Manages linux user to SELinux user mapping + - Manages linux user to SELinux user mapping. extends_documentation_fragment: - community.general.attributes attributes: @@ -25,15 +24,15 @@ options: login: type: str description: - - a Linux user + - A Linux user. required: true seuser: type: str description: - - SELinux user name + - SELinux user name. selevel: type: str - aliases: [ serange ] + aliases: [serange] description: - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range. default: s0 @@ -42,7 +41,7 @@ options: description: - Desired mapping value. default: present - choices: [ 'present', 'absent' ] + choices: ['present', 'absent'] reload: description: - Reload SELinux policy after commit. @@ -50,20 +49,20 @@ options: default: true ignore_selinux_state: description: - - Run independent of selinux runtime state + - Run independent of selinux runtime state. type: bool default: false notes: - - The changes are persistent across reboots - - Not tested on any debian based system -requirements: [ 'libselinux', 'policycoreutils' ] + - The changes are persistent across reboots. + - Not tested on any debian based system. +requirements: ['libselinux', 'policycoreutils'] author: -- Dan Keder (@dankeder) -- Petr Lautrbach (@bachradsusi) -- James Cassell (@jamescassell) -''' + - Dan Keder (@dankeder) + - Petr Lautrbach (@bachradsusi) + - James Cassell (@jamescassell) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Modify the default user on the system to the guest_u user community.general.selogin: login: __default__ @@ -82,11 +81,11 @@ EXAMPLES = ''' login: '%engineering' seuser: staff_u state: present -''' +""" -RETURN = r''' +RETURN = r""" # Default return values -''' +""" import traceback diff --git a/plugins/modules/sendgrid.py b/plugins/modules/sendgrid.py index b4f6b6eaff..c964fa8c23 100644 --- a/plugins/modules/sendgrid.py +++ b/plugins/modules/sendgrid.py @@ -9,21 +9,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: sendgrid short_description: Sends an email with the SendGrid API description: - - "Sends an email with a SendGrid account through their API, not through - the SMTP service." + - Sends an email with a SendGrid account through their API, not through the SMTP service. notes: - - "This module is non-idempotent because it sends an email through the - external API. It is idempotent only in the case that the module fails." - - "Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need an active SendGrid - account." - - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers - you must pip install sendgrid" + - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you will need an active SendGrid account. + - In order to use O(api_key), O(cc), O(bcc), O(attachments), O(from_name), O(html_body), and O(headers) you must C(pip install sendgrid). requirements: - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported) extends_documentation_fragment: @@ -98,9 +92,9 @@ options: - The e-mail body content. required: true author: "Matt Makai (@makaimc)" -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Send an email to a single recipient that the deployment was successful community.general.sendgrid: username: "{{ sendgrid_username }}" @@ -114,16 +108,16 @@ EXAMPLES = r''' - name: Send an email to more than one recipient that the build failed community.general.sendgrid: - username: "{{ sendgrid_username }}" - password: "{{ sendgrid_password }}" - from_address: "build@mycompany.com" - to_addresses: - - "ops@mycompany.com" - - "devteam@mycompany.com" - subject: "Build failure!." - body: "Unable to pull source repository from Git server." + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "build@mycompany.com" + to_addresses: + - "ops@mycompany.com" + - "devteam@mycompany.com" + subject: "Build failure!." + body: "Unable to pull source repository from Git server." delegate_to: localhost -''' +""" # ======================================= # sendgrid module support methods diff --git a/plugins/modules/sensu_check.py b/plugins/modules/sensu_check.py index 1430d6a6ce..018bb75db3 100644 --- a/plugins/modules/sensu_check.py +++ b/plugins/modules/sensu_check.py @@ -9,15 +9,14 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_check short_description: Manage Sensu checks description: - Manage the checks that should be run on a machine by I(Sensu). - Most options do not have a default and will not be added to the check definition unless specified. - All defaults except O(path), O(state), O(backup) and O(metric) are not managed by this module, - - they are simply specified for your convenience. + they are simply specified for your convenience. extends_documentation_fragment: - community.general.attributes attributes: @@ -29,127 +28,127 @@ options: name: type: str description: - - The name of the check - - This is the key that is used to determine whether a check exists + - The name of the check. + - This is the key that is used to determine whether a check exists. required: true state: type: str description: - - Whether the check should be present or not - choices: [ 'present', 'absent' ] + - Whether the check should be present or not. + choices: ['present', 'absent'] default: present path: type: str description: - Path to the json file of the check to be added/removed. - Will be created if it does not exist (unless O(state=absent)). - - The parent folders need to exist when O(state=present), otherwise an error will be thrown + - The parent folders need to exist when O(state=present), otherwise an error will be thrown. default: /etc/sensu/conf.d/checks.json backup: description: - Create a backup file (if yes), including the timestamp information so - - you can get the original file back if you somehow clobbered it incorrectly. + you can get the original file back if you somehow clobbered it incorrectly. type: bool default: false command: type: str description: - - Path to the sensu check to run (not required when O(state=absent)) + - Path to the sensu check to run (not required when O(state=absent)). handlers: type: list elements: str description: - - List of handlers to notify when the check fails + - List of handlers to notify when the check fails. subscribers: type: list elements: str description: - - List of subscribers/channels this check should run for - - See sensu_subscribers to subscribe a machine to a channel + - List of subscribers/channels this check should run for. + - See sensu_subscribers to subscribe a machine to a channel. interval: type: int description: - - Check interval in seconds + - Check interval in seconds. timeout: type: int description: - - Timeout for the check + - Timeout for the check. - If not specified, it defaults to 10. ttl: type: int description: - - Time to live in seconds until the check is considered stale + - Time to live in seconds until the check is considered stale. handle: description: - - Whether the check should be handled or not + - Whether the check should be handled or not. - Default is V(false). type: bool subdue_begin: type: str description: - - When to disable handling of check failures + - When to disable handling of check failures. subdue_end: type: str description: - - When to enable handling of check failures + - When to enable handling of check failures. dependencies: type: list elements: str description: - - Other checks this check depends on, if dependencies fail handling of this check will be disabled + - Other checks this check depends on, if dependencies fail handling of this check will be disabled. metric: description: - - Whether the check is a metric + - Whether the check is a metric. type: bool default: false standalone: description: - - Whether the check should be scheduled by the sensu client or server - - This option obviates the need for specifying the O(subscribers) option + - Whether the check should be scheduled by the sensu client or server. + - This option obviates the need for specifying the O(subscribers) option. - Default is V(false). type: bool publish: description: - Whether the check should be scheduled at all. - - You can still issue it via the sensu api + - You can still issue it via the sensu API. - Default is V(false). type: bool occurrences: type: int description: - - Number of event occurrences before the handler should take action + - Number of event occurrences before the handler should take action. - If not specified, defaults to 1. refresh: type: int description: - - Number of seconds handlers should wait before taking second action + - Number of seconds handlers should wait before taking second action. aggregate: description: - Classifies the check as an aggregate check, - - making it available via the aggregate API + making it available via the aggregate API. - Default is V(false). type: bool low_flap_threshold: type: int description: - - The low threshold for flap detection + - The low threshold for flap detection. high_flap_threshold: type: int description: - - The high threshold for flap detection + - The high threshold for flap detection. custom: type: dict description: - A hash/dictionary of custom parameters for mixing to the configuration. - - You can't rewrite others module parameters using this + - You can't rewrite others module parameters using this. source: type: str description: - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch). author: "Anders Ingemann (@andsens)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Fetch metrics about the CPU load every 60 seconds, # the sensu server has a handler called 'relay' which forwards stats to graphite - name: Get cpu metrics @@ -177,7 +176,7 @@ EXAMPLES = ''' community.general.sensu_check: name: check_disk_capacity state: absent -''' +""" import json import traceback diff --git a/plugins/modules/sensu_client.py b/plugins/modules/sensu_client.py index eca0804b0a..0de0340f33 100644 --- a/plugins/modules/sensu_client.py +++ b/plugins/modules/sensu_client.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_client author: "David Moreau Simard (@dmsimard)" short_description: Manages Sensu client configuration @@ -27,8 +26,8 @@ options: state: type: str description: - - Whether the client should be present or not - choices: [ 'present', 'absent' ] + - Whether the client should be present or not. + choices: ['present', 'absent'] default: present name: type: str @@ -49,7 +48,8 @@ options: - The subscriptions array items must be strings. safe_mode: description: - - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check. + - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the + check. type: bool default: false redact: @@ -99,11 +99,9 @@ options: type: dict description: - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only). -notes: - - Check mode is supported -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Minimum possible configuration - name: Configure Sensu client community.general.sensu_client: @@ -146,20 +144,20 @@ EXAMPLES = ''' - name: Delete the Sensu client configuration community.general.sensu_client: state: "absent" -''' +""" -RETURN = ''' +RETURN = r""" config: - description: Effective client configuration, when state is present + description: Effective client configuration, when state is present. returned: success type: dict sample: {'name': 'client', 'subscriptions': ['default']} file: - description: Path to the client configuration file + description: Path to the client configuration file. returned: success type: str sample: "/etc/sensu/conf.d/client.json" -''' +""" import json import os diff --git a/plugins/modules/sensu_handler.py b/plugins/modules/sensu_handler.py index bbb8dc6129..bd1014f2dd 100644 --- a/plugins/modules/sensu_handler.py +++ b/plugins/modules/sensu_handler.py @@ -8,13 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_handler author: "David Moreau Simard (@dmsimard)" short_description: Manages Sensu handler configuration description: - - Manages Sensu handler configuration + - Manages Sensu handler configuration. - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)' extends_documentation_fragment: - community.general.attributes @@ -27,8 +26,8 @@ options: state: type: str description: - - Whether the handler should be present or not - choices: [ 'present', 'absent' ] + - Whether the handler should be present or not. + choices: ['present', 'absent'] default: present name: type: str @@ -38,8 +37,8 @@ options: type: type: str description: - - The handler type - choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ] + - The handler type. + choices: ['pipe', 'tcp', 'udp', 'transport', 'set'] filter: type: str description: @@ -98,12 +97,10 @@ options: elements: str description: - An array of Sensu event handlers (names) to use for events using the handler set. - - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").' -notes: - - Check mode is supported -''' + - 'NOTE: the handlers attribute is only required for handler sets (that is, handlers configured with O(type=set)).' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Configure a handler that sends event data as STDIN (pipe) - name: Configure IRC Sensu handler community.general.sensu_handler: @@ -146,25 +143,25 @@ EXAMPLES = ''' owner: "sensu" group: "sensu" mode: "0600" -''' +""" -RETURN = ''' +RETURN = r""" config: - description: Effective handler configuration, when state is present + description: Effective handler configuration, when state is present. returned: success type: dict sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'} file: - description: Path to the handler configuration file + description: Path to the handler configuration file. returned: success type: str sample: "/etc/sensu/conf.d/handlers/irc.json" name: - description: Name of the handler + description: Name of the handler. returned: success type: str sample: "irc" -''' +""" import json import os diff --git a/plugins/modules/sensu_silence.py b/plugins/modules/sensu_silence.py index 14c664755d..bcb70f9bd1 100644 --- a/plugins/modules/sensu_silence.py +++ b/plugins/modules/sensu_silence.py @@ -9,14 +9,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_silence author: Steven Bambling (@smbambling) short_description: Manage Sensu silence entries description: - - Create and clear (delete) a silence entries via the Sensu API - for subscriptions and checks. + - Create and clear (delete) a silence entries via the Sensu API for subscriptions and checks. extends_documentation_fragment: - community.general.attributes attributes: @@ -36,30 +34,26 @@ options: expire: type: int description: - - If specified, the silence entry will be automatically cleared - after this number of seconds. + - If specified, the silence entry will be automatically cleared after this number of seconds. expire_on_resolve: description: - - If specified as true, the silence entry will be automatically - cleared once the condition it is silencing is resolved. + - If specified as true, the silence entry will be automatically cleared once the condition it is silencing is resolved. type: bool reason: type: str description: - - If specified, this free-form string is used to provide context or - rationale for the reason this silence entry was created. + - If specified, this free-form string is used to provide context or rationale for the reason this silence entry was created. state: type: str description: - - Specifies to create or clear (delete) a silence entry via the Sensu API + - Specifies to create or clear (delete) a silence entry via the Sensu API. default: present choices: ['present', 'absent'] subscription: type: str description: - Specifies the subscription which the silence entry applies to. - - To create a silence entry for a client prepend C(client:) to client name. - Example - C(client:server1.example.dev) + - To create a silence entry for a client prepend C(client:) to client name. Example - C(client:server1.example.dev). required: true url: type: str @@ -67,9 +61,9 @@ options: - Specifies the URL of the Sensu monitoring host server. required: false default: http://127.0.01:4567 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Silence ALL checks for a given client - name: Silence server1.example.dev community.general.sensu_silence: @@ -98,10 +92,10 @@ EXAMPLES = ''' reason: "{{ item.value.reason }}" creator: "{{ ansible_user_id }}" with_dict: "{{ silence }}" -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import json @@ -149,7 +143,7 @@ def clear(module, url, check, subscription): # Test if silence exists before clearing (rc, out, changed) = query(module, url, check, subscription) - d = dict((i['subscription'], i['check']) for i in out) + d = {i['subscription']: i['check'] for i in out} subscription_exists = subscription in d if check and subscription_exists: exists = (check == d[subscription]) diff --git a/plugins/modules/sensu_subscription.py b/plugins/modules/sensu_subscription.py index 0077e2ffa6..e7c78c3290 100644 --- a/plugins/modules/sensu_subscription.py +++ b/plugins/modules/sensu_subscription.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_subscription short_description: Manage Sensu subscriptions description: - - Manage which I(sensu channels) a machine should subscribe to + - Manage which I(sensu channels) a machine should subscribe to. extends_documentation_fragment: - community.general.attributes attributes: @@ -26,41 +25,41 @@ options: name: type: str description: - - The name of the channel + - The name of the channel. required: true state: type: str description: - - Whether the machine should subscribe or unsubscribe from the channel - choices: [ 'present', 'absent' ] + - Whether the machine should subscribe or unsubscribe from the channel. + choices: ['present', 'absent'] required: false default: present path: type: str description: - - Path to the subscriptions json file + - Path to the subscriptions json file. required: false default: /etc/sensu/conf.d/subscriptions.json backup: description: - Create a backup file (if yes), including the timestamp information so you - - can get the original file back if you somehow clobbered it incorrectly. + can get the original file back if you somehow clobbered it incorrectly. type: bool required: false default: false -requirements: [ ] +requirements: [] author: Anders Ingemann (@andsens) -''' +""" -RETURN = ''' +RETURN = r""" reasons: - description: the reasons why the module changed or did not change something - returned: success - type: list - sample: ["channel subscription was absent and state is `present'"] -''' + description: The reasons why the module changed or did not change something. + returned: success + type: list + sample: ["channel subscription was absent and state is `present'"] +""" -EXAMPLES = ''' +EXAMPLES = r""" # Subscribe to the nginx channel - name: Subscribe to nginx checks community.general.sensu_subscription: name=nginx @@ -68,7 +67,7 @@ EXAMPLES = ''' # Unsubscribe from the common checks channel - name: Unsubscribe from common checks community.general.sensu_subscription: name=common state=absent -''' +""" import json import traceback diff --git a/plugins/modules/seport.py b/plugins/modules/seport.py index 964e8f0eda..24311fc56d 100644 --- a/plugins/modules/seport.py +++ b/plugins/modules/seport.py @@ -8,14 +8,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: seport short_description: Manages SELinux network port type definitions description: - - Manages SELinux network port type definitions. + - Manages SELinux network port type definitions. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -34,7 +33,7 @@ options: - Protocol for the specified port. type: str required: true - choices: [ tcp, udp ] + choices: [tcp, udp] setype: description: - SELinux type for the specified port. @@ -44,7 +43,7 @@ options: description: - Desired boolean value. type: str - choices: [ absent, present ] + choices: [absent, present] default: present reload: description: @@ -53,26 +52,26 @@ options: default: true ignore_selinux_state: description: - - Run independent of selinux runtime state + - Run independent of selinux runtime state. type: bool default: false local: description: - - Work with local modifications only. + - Work with local modifications only. type: bool default: false version_added: 5.6.0 notes: - - The changes are persistent across reboots. - - Not tested on any debian based system. + - The changes are persistent across reboots. + - Not tested on any Debian based system. requirements: -- libselinux-python -- policycoreutils-python + - libselinux-python + - policycoreutils-python author: -- Dan Keder (@dankeder) -''' + - Dan Keder (@dankeder) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Allow Apache to listen on tcp port 8888 community.general.seport: ports: 8888 @@ -110,7 +109,7 @@ EXAMPLES = r''' setype: ssh_port_t state: absent local: true -''' +""" import traceback diff --git a/plugins/modules/serverless.py b/plugins/modules/serverless.py index 8aa9396d62..937f7dcdea 100644 --- a/plugins/modules/serverless.py +++ b/plugins/modules/serverless.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: serverless short_description: Manages a Serverless Framework project description: @@ -26,11 +25,11 @@ options: description: - Goal state of given stage/project. type: str - choices: [ absent, present ] + choices: [absent, present] default: present serverless_bin_path: description: - - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless + - The path of a serverless framework binary relative to the O(service_path), for example V(node_module/.bin/serverless). type: path service_path: description: @@ -67,16 +66,15 @@ options: type: bool default: false notes: - - Currently, the C(serverless) command must be in the path of the node executing the task. - In the future this may be a flag. + - Currently, the C(serverless) command must be in the path of the node executing the task. In the future this may be a flag. requirements: -- serverless -- yaml + - serverless + - PyYAML author: -- Ryan Scott Brown (@ryansb) -''' + - Ryan Scott Brown (@ryansb) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Basic deploy of a service community.general.serverless: service_path: '{{ project_dir }}' @@ -103,9 +101,9 @@ EXAMPLES = r''' region: us-east-1 service_path: '{{ project_dir }}' serverless_bin_path: node_modules/.bin/serverless -''' +""" -RETURN = r''' +RETURN = r""" service_name: type: str description: The service name specified in the serverless.yml that was just deployed. @@ -120,7 +118,7 @@ command: description: Full C(serverless) command run by this module, in case you want to re-run the command outside the module. returned: always sample: serverless deploy --stage production -''' +""" import os diff --git a/plugins/modules/shutdown.py b/plugins/modules/shutdown.py index d8108425eb..6f2dac14b1 100644 --- a/plugins/modules/shutdown.py +++ b/plugins/modules/shutdown.py @@ -8,14 +8,14 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: shutdown short_description: Shut down a machine notes: - - E(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use O(search_paths) - to specify locations to search if the default paths do not work. - - The O(msg) and O(delay) options are not supported when a shutdown command is not found in O(search_paths), instead - the module will attempt to shutdown the system by calling C(systemctl shutdown). + - E(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use O(search_paths) to specify locations to search if the + default paths do not work. + - The O(msg) and O(delay) options are not supported when a shutdown command is not found in O(search_paths), instead the module will attempt + to shutdown the system by calling C(systemctl shutdown). description: - Shut downs a machine. version_added: "1.1.0" @@ -47,20 +47,21 @@ options: search_paths: description: - Paths to search on the remote machine for the C(shutdown) command. - - I(Only) these paths will be searched for the C(shutdown) command. E(PATH) is ignored in the remote node when searching for the C(shutdown) command. + - I(Only) these paths will be searched for the C(shutdown) command. E(PATH) is ignored in the remote node when searching for the C(shutdown) + command. type: list elements: path default: ['/sbin', '/usr/sbin', '/usr/local/sbin'] seealso: -- module: ansible.builtin.reboot + - module: ansible.builtin.reboot author: - - Matt Davis (@nitzmahone) - - Sam Doran (@samdoran) - - Amin Vakil (@aminvakil) -''' + - Matt Davis (@nitzmahone) + - Sam Doran (@samdoran) + - Amin Vakil (@aminvakil) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Unconditionally shut down the machine with all defaults community.general.shutdown: @@ -71,13 +72,13 @@ EXAMPLES = r''' - name: Shut down a machine with shutdown command in unusual place community.general.shutdown: search_paths: - - '/lib/molly-guard' -''' + - '/lib/molly-guard' +""" -RETURN = r''' +RETURN = r""" shutdown: description: V(true) if the machine has been shut down. returned: always type: bool sample: true -''' +""" diff --git a/plugins/modules/simpleinit_msb.py b/plugins/modules/simpleinit_msb.py index 92738471c2..2b1b865d2c 100644 --- a/plugins/modules/simpleinit_msb.py +++ b/plugins/modules/simpleinit_msb.py @@ -11,8 +11,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: simpleinit_msb short_description: Manage services on Source Mage GNU/Linux version_added: 7.5.0 @@ -38,24 +37,21 @@ options: state: type: str required: false - choices: [ running, started, stopped, restarted, reloaded ] + choices: [running, started, stopped, restarted, reloaded] description: - - V(started)/V(stopped) are idempotent actions that will not run - commands unless necessary. V(restarted) will always bounce the - service. V(reloaded) will always reload. + - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. V(restarted) will always bounce the service. + V(reloaded) will always reload. - At least one of O(state) and O(enabled) are required. - - Note that V(reloaded) will start the - service if it is not already started, even if your chosen init - system would not normally. + - Note that V(reloaded) will start the service if it is not already started, even if your chosen init system would not normally. enabled: type: bool required: false description: - Whether the service should start on boot. - At least one of O(state) and O(enabled) are required. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Example action to start service httpd, if not running community.general.simpleinit_msb: name: httpd @@ -80,7 +76,7 @@ EXAMPLES = ''' community.general.simpleinit_msb: name: httpd enabled: true -''' +""" import os import re diff --git a/plugins/modules/sl_vm.py b/plugins/modules/sl_vm.py index 1604ffc11f..3216fded8b 100644 --- a/plugins/modules/sl_vm.py +++ b/plugins/modules/sl_vm.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sl_vm short_description: Create or cancel a virtual instance in SoftLayer description: @@ -121,7 +120,7 @@ options: disks: description: - List of disk sizes to be assigned to new virtual instance. - default: [ 25 ] + default: [25] type: list elements: int os_code: @@ -159,7 +158,7 @@ options: description: - Create, or cancel a virtual instance. - Specify V(present) for create, V(absent) to cancel. - choices: [ absent, present ] + choices: [absent, present] default: present type: str wait: @@ -173,102 +172,102 @@ options: default: 600 type: int requirements: - - softlayer >= 4.1.1 + - softlayer >= 4.1.1 author: -- Matt Colton (@mcltn) -''' + - Matt Colton (@mcltn) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Build instance hosts: localhost gather_facts: false tasks: - - name: Build instance request - community.general.sl_vm: - hostname: instance-1 - domain: anydomain.com - datacenter: dal09 - tags: ansible-module-test - hourly: true - private: false - dedicated: false - local_disk: true - cpus: 1 - memory: 1024 - disks: [25] - os_code: UBUNTU_LATEST - wait: false + - name: Build instance request + community.general.sl_vm: + hostname: instance-1 + domain: anydomain.com + datacenter: dal09 + tags: ansible-module-test + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: [25] + os_code: UBUNTU_LATEST + wait: false - name: Build additional instances hosts: localhost gather_facts: false tasks: - - name: Build instances request - community.general.sl_vm: - hostname: "{{ item.hostname }}" - domain: "{{ item.domain }}" - datacenter: "{{ item.datacenter }}" - tags: "{{ item.tags }}" - hourly: "{{ item.hourly }}" - private: "{{ item.private }}" - dedicated: "{{ item.dedicated }}" - local_disk: "{{ item.local_disk }}" - cpus: "{{ item.cpus }}" - memory: "{{ item.memory }}" - disks: "{{ item.disks }}" - os_code: "{{ item.os_code }}" - ssh_keys: "{{ item.ssh_keys }}" - wait: "{{ item.wait }}" - with_items: - - hostname: instance-2 - domain: anydomain.com - datacenter: dal09 - tags: - - ansible-module-test - - ansible-module-test-replicas - hourly: true - private: false - dedicated: false - local_disk: true - cpus: 1 - memory: 1024 - disks: - - 25 - - 100 - os_code: UBUNTU_LATEST - ssh_keys: [] - wait: true - - hostname: instance-3 - domain: anydomain.com - datacenter: dal09 - tags: - - ansible-module-test - - ansible-module-test-replicas - hourly: true - private: false - dedicated: false - local_disk: true - cpus: 1 - memory: 1024 - disks: - - 25 - - 100 - os_code: UBUNTU_LATEST - ssh_keys: [] - wait: true + - name: Build instances request + community.general.sl_vm: + hostname: "{{ item.hostname }}" + domain: "{{ item.domain }}" + datacenter: "{{ item.datacenter }}" + tags: "{{ item.tags }}" + hourly: "{{ item.hourly }}" + private: "{{ item.private }}" + dedicated: "{{ item.dedicated }}" + local_disk: "{{ item.local_disk }}" + cpus: "{{ item.cpus }}" + memory: "{{ item.memory }}" + disks: "{{ item.disks }}" + os_code: "{{ item.os_code }}" + ssh_keys: "{{ item.ssh_keys }}" + wait: "{{ item.wait }}" + with_items: + - hostname: instance-2 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-replicas + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: true + - hostname: instance-3 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-replicas + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: true - name: Cancel instances hosts: localhost gather_facts: false tasks: - - name: Cancel by tag - community.general.sl_vm: - state: absent - tags: ansible-module-test -''' + - name: Cancel by tag + community.general.sl_vm: + state: absent + tags: ansible-module-test +""" # TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed. -RETURN = '''# ''' +RETURN = """# """ import json import time diff --git a/plugins/modules/slack.py b/plugins/modules/slack.py index 41dd4f5dba..b4e637f591 100644 --- a/plugins/modules/slack.py +++ b/plugins/modules/slack.py @@ -15,11 +15,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = """ +DOCUMENTATION = r""" module: slack short_description: Send Slack notifications description: - - The M(community.general.slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration + - The M(community.general.slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration. author: "Ramon de la Fuente (@ramondelafuente)" extends_documentation_fragment: - community.general.attributes @@ -32,51 +32,43 @@ options: domain: type: str description: - - Slack (sub)domain for your environment without protocol. (For example - V(example.slack.com).) In Ansible 1.8 and beyond, this is deprecated and may - be ignored. See token documentation for information. + - Slack (sub)domain for your environment without protocol. (For example V(example.slack.com).) In Ansible 1.8 and beyond, this is deprecated + and may be ignored. See token documentation for information. token: type: str description: - - Slack integration token. This authenticates you to the slack service. - Make sure to use the correct type of token, depending on what method you use. - - "Webhook token: - Prior to Ansible 1.8, a token looked like V(3Ffe373sfhRE6y42Fg3rvf4GlK). In - Ansible 1.8 and above, Ansible adapts to the new slack API where tokens look - like V(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens - are in the new format then slack will ignore any value of domain. If - the token is in the old format the domain is required. Ansible has no - control of when slack will get rid of the old API. When slack does - that the old format will stop working. ** Please keep in mind the tokens - are not the API tokens but are the webhook tokens. In slack these are - found in the webhook URL which are obtained under the apps and integrations. - The incoming webhooks can be added in that area. In some cases this may - be locked by your Slack admin and you must request access. It is there - that the incoming webhooks can be added. The key is on the end of the - URL given to you in that section." - - "WebAPI token: - Slack WebAPI requires a personal, bot or work application token. These tokens start with V(xoxp-), V(xoxb-) - or V(xoxa-), for example V(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id. - See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information." + - Slack integration token. This authenticates you to the slack service. Make sure to use the correct type of token, depending on what method + you use. + - 'Webhook token: Prior to Ansible 1.8, a token looked like V(3Ffe373sfhRE6y42Fg3rvf4GlK). In Ansible 1.8 and above, Ansible adapts to the + new slack API where tokens look like V(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens are in the new format then slack will + ignore any value of domain. If the token is in the old format the domain is required. Ansible has no control of when slack will get rid + of the old API. When slack does that the old format will stop working. ** Please keep in mind the tokens are not the API tokens but are + the webhook tokens. In slack these are found in the webhook URL which are obtained under the apps and integrations. The incoming webhooks + can be added in that area. In some cases this may be locked by your Slack admin and you must request access. It is there that the incoming + webhooks can be added. The key is on the end of the URL given to you in that section.' + - "WebAPI token: Slack WebAPI requires a personal, bot or work application token. These tokens start with V(xoxp-), V(xoxb-) or V(xoxa-), + for example V(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id. See Slack's documentation + (U(https://api.slack.com/docs/token-types)) for more information." required: true msg: type: str description: - - Message to send. Note that the module does not handle escaping characters. - Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &) before sending. - See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more. + - Message to send. Note that the module does not handle escaping characters. Plain-text angle brackets and ampersands should be converted + to HTML entities (for example C(&) to C(&)) before sending. See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more. channel: type: str description: - Channel to send the message to. If absent, the message goes to the channel selected for the O(token). thread_id: description: - - Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading + - Optional. Timestamp of parent message to thread this message, see U(https://api.slack.com/docs/message-threading). type: str message_id: description: - Optional. Message ID to edit, instead of posting a new message. - - If supplied O(channel) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel_id }}) to get RV(ignore:channel_id) from previous task run. + - If supplied O(channel) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel }}) to get RV(ignore:channel) from previous task + run. + - The token needs history scope to get information on the message to edit (C(channels:history,groups:history,mpim:history,im:history)). - Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)). type: str version_added: 1.2.0 @@ -106,14 +98,13 @@ options: parse: type: str description: - - Setting for the message parser at Slack + - Setting for the message parser at Slack. choices: - 'full' - 'none' validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. type: bool default: true color: @@ -139,11 +130,10 @@ options: type: str description: - Setting for automatically prepending a V(#) symbol on the passed in O(channel). - - The V(auto) method prepends a V(#) unless O(channel) starts with one of V(#), V(@), V(C0), V(GF), V(G0), V(CP). - These prefixes only cover a small set of the prefixes that should not have a V(#) prepended. - Since an exact condition which O(channel) values must not have the V(#) prefix is not known, - the value V(auto) for this option will be deprecated in the future. It is best to explicitly set - O(prepend_hash=always) or O(prepend_hash=never) to obtain the needed behavior. + - The V(auto) method prepends a V(#) unless O(channel) starts with one of V(#), V(@), V(C0), V(GF), V(G0), V(CP). These prefixes only cover + a small set of the prefixes that should not have a V(#) prepended. Since an exact condition which O(channel) values must not have the + V(#) prefix is not known, the value V(auto) for this option will be deprecated in the future. It is best to explicitly set O(prepend_hash=always) + or O(prepend_hash=never) to obtain the needed behavior. choices: - 'always' - 'never' @@ -152,7 +142,7 @@ options: version_added: 6.1.0 """ -EXAMPLES = """ +EXAMPLES = r""" - name: Send notification message via Slack community.general.slack: token: thetoken/generatedby/slack @@ -214,14 +204,14 @@ EXAMPLES = """ Display my system load on host A and B - type: context elements: - - type: mrkdwn - text: |- - *System A* - load average: 0,74, 0,66, 0,63 - - type: mrkdwn - text: |- - *System B* - load average: 5,16, 4,64, 2,43 + - type: mrkdwn + text: |- + *System A* + load average: 0,74, 0,66, 0,63 + - type: mrkdwn + text: |- + *System B* + load average: 5,16, 4,64, 2,43 - name: Send a message with a link using Slack markup community.general.slack: @@ -391,6 +381,8 @@ def get_slack_message(module, token, channel, ts): if info['status'] != 200: module.fail_json(msg="failed to get slack message") data = module.from_json(response.read()) + if data.get('ok') is False: + module.fail_json(msg="failed to get slack message: %s" % data) if len(data['messages']) < 1: module.fail_json(msg="no messages matching ts: %s" % ts) if len(data['messages']) > 1: diff --git a/plugins/modules/slackpkg.py b/plugins/modules/slackpkg.py index 9347db1591..2ec91de051 100644 --- a/plugins/modules/slackpkg.py +++ b/plugins/modules/slackpkg.py @@ -15,49 +15,47 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: slackpkg short_description: Package manager for Slackware >= 12.2 description: - - Manage binary packages for Slackware using 'slackpkg' which - is available in versions after 12.2. + - Manage binary packages for Slackware using C(slackpkg) which is available in versions after 12.2. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - name of package to install/remove - required: true - type: list - elements: str - aliases: [pkg] + name: + description: + - Name of package to install/remove. + required: true + type: list + elements: str + aliases: [pkg] - state: - description: - - State of the package, you can use V(installed) as an alias for V(present) and V(removed) as one for V(absent). - choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ] - required: false - default: present - type: str + state: + description: + - State of the package, you can use V(installed) as an alias for V(present) and V(removed) as one for V(absent). + choices: ['present', 'absent', 'latest', 'installed', 'removed'] + required: false + default: present + type: str - update_cache: - description: - - update the package database first - required: false - default: false - type: bool + update_cache: + description: + - Update the package database first. + required: false + default: false + type: bool author: Kim Nørgaard (@KimNorgaard) -requirements: [ "Slackware >= 12.2" ] -''' +requirements: ["Slackware >= 12.2"] +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.slackpkg: name: foo @@ -72,7 +70,7 @@ EXAMPLES = ''' community.general.slackpkg: name: foo state: latest -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/smartos_image_info.py b/plugins/modules/smartos_image_info.py index 1a25b46681..96bf9b0575 100644 --- a/plugins/modules/smartos_image_info.py +++ b/plugins/modules/smartos_image_info.py @@ -9,31 +9,28 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: smartos_image_info short_description: Get SmartOS image details description: - - Retrieve information about all installed images on SmartOS. + - Retrieve information about all installed images on SmartOS. author: Adam Števko (@xen0l) extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.info_module + - community.general.attributes + - community.general.attributes.info_module attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - filters: - description: - - Criteria for selecting image. Can be any value from image - manifest and C(published_date), C(published), C(source), C(clones), - and C(size). More information can be found at U(https://smartos.org/man/1m/imgadm) - under C(imgadm list). - type: str -''' + filters: + description: + - Criteria for selecting image. Can be any value from image manifest and V(published_date), V(published), V(source), V(clones), and V(size). + - More information can be found at U(https://smartos.org/man/1m/imgadm) under C(imgadm list). + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Return information about all installed images community.general.smartos_image_info: register: result @@ -49,19 +46,17 @@ EXAMPLES = ''' - name: Print information ansible.builtin.debug: - msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} - has {{ result.smartos_images[item]['clones'] }} VM(s)" + msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} has {{ result.smartos_images[item]['clones'] }} VM(s)" with_items: "{{ result.smartos_images.keys() | list }}" - name: Print information ansible.builtin.debug: - msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} - has {{ smartos_images[item]['clones'] }} VM(s)" + msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} has {{ smartos_images[item]['clones'] }} VM(s)" with_items: "{{ smartos_images.keys() | list }}" -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import json from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/snap.py b/plugins/modules/snap.py index fd16764802..1de829801d 100644 --- a/plugins/modules/snap.py +++ b/plugins/modules/snap.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright (c) 2024, Lincoln Wallace (locnnil) # Copyright (c) 2021, Alexei Znamensky (russoz) # Copyright (c) 2021, Marcus Rickert # Copyright (c) 2018, Stanislas Lange (angristan) @@ -12,85 +13,87 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: snap short_description: Manages snaps description: - - Manages snaps packages. + - Manages snaps packages. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the snaps to be installed. - - Any named snap accepted by the C(snap) command is valid. - - > - Notice that snap files might require O(dangerous=true) to ignore the error - "cannot find signatures with metadata for snap". - required: true - type: list - elements: str - state: - description: - - Desired state of the package. - - > - When O(state=present) the module will use C(snap install) if the snap is not installed, - and C(snap refresh) if it is installed but from a different channel. - default: present - choices: [ absent, present, enabled, disabled ] - type: str - classic: - description: - - Confinement policy. The classic confinement allows a snap to have - the same level of access to the system as "classic" packages, - like those managed by APT. This option corresponds to the C(--classic) argument. - This option can only be specified if there is a single snap in the task. - type: bool - required: false - default: false - channel: - description: - - Define which release of a snap is installed and tracked for updates. - This option can only be specified if there is a single snap in the task. - - If not passed, the C(snap) command will default to V(stable). - - If the value passed does not contain the C(track), it will default to C(latest). - For example, if V(edge) is passed, the module will assume the channel to be V(latest/edge). - - See U(https://snapcraft.io/docs/channels) for more details about snap channels. - type: str - required: false - options: - description: - - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option will be applied - to that snap only. If the snap name is omitted, the options will be applied to all snaps listed in O(name). Options will - only be applied to active snaps. - required: false - type: list - elements: str - version_added: 4.4.0 - dangerous: - description: - - Install the given snap file even if there are no pre-acknowledged signatures for it, - meaning it was not verified and could be dangerous. - type: bool - required: false - default: false - version_added: 7.2.0 - + name: + description: + - Name of the snaps to be installed. + - Any named snap accepted by the C(snap) command is valid. + - O(dangerous=true) may be necessary when installing C(.snap) files. See O(dangerous) for more details. + required: true + type: list + elements: str + state: + description: + - Desired state of the package. + - When O(state=present) the module will use C(snap install) if the snap is not installed, and C(snap refresh) if it is installed but from + a different channel. + default: present + choices: [absent, present, enabled, disabled] + type: str + classic: + description: + - Install a snap that has classic confinement. + - This option corresponds to the C(--classic) argument of the C(snap install) command. + - This level of confinement is permissive, granting full system access, similar to that of traditionally packaged applications that do not + use sandboxing mechanisms. This option can only be specified when the task involves a single snap. + - See U(https://snapcraft.io/docs/snap-confinement) for more details about classic confinement and confinement levels. + type: bool + required: false + default: false + channel: + description: + - Define which release of a snap is installed and tracked for updates. This option can only be specified if there is a single snap in the + task. + - If not passed, the C(snap) command will default to V(stable). + - If the value passed does not contain the C(track), it will default to C(latest). For example, if V(edge) is passed, the module will assume + the channel to be V(latest/edge). + - See U(https://snapcraft.io/docs/channels) for more details about snap channels. + type: str + required: false + options: + description: + - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option will be applied to that snap only. If + the snap name is omitted, the options will be applied to all snaps listed in O(name). Options will only be applied to active snaps. + - Options will only be applied when C(state) is set to V(present). This is done after the necessary installation or refresh (upgrade/downgrade) + of all the snaps listed in O(name). + - See U(https://snapcraft.io/docs/configuration-in-snaps) for more details about snap configuration options. + required: false + type: list + elements: str + version_added: 4.4.0 + dangerous: + description: + - Install the snap in dangerous mode, without validating its assertions and signatures. + - This is useful when installing local snaps that are either unsigned or have signatures that have not been acknowledged. + - See U(https://snapcraft.io/docs/install-modes) for more details about installation modes. + type: bool + required: false + default: false + version_added: 7.2.0 +notes: + - Privileged operations, such as installing and configuring snaps, require root priviledges. This is only the case if the user has not logged + in to the Snap Store. author: - - Victor Carceler (@vcarceler) - - Stanislas Lange (@angristan) + - Victor Carceler (@vcarceler) + - Stanislas Lange (@angristan) seealso: - - module: community.general.snap_alias -''' + - module: community.general.snap_alias +""" -EXAMPLES = ''' +EXAMPLES = r""" # Install "foo" and "bar" snap - name: Install foo community.general.snap: @@ -135,35 +138,35 @@ EXAMPLES = ''' community.general.snap: name: foo channel: latest/edge -''' +""" -RETURN = ''' +RETURN = r""" classic: - description: Whether or not the snaps were installed with the classic confinement - type: bool - returned: When snaps are installed + description: Whether or not the snaps were installed with the classic confinement. + type: bool + returned: When snaps are installed channel: - description: The channel the snaps were installed from - type: str - returned: When snaps are installed + description: The channel the snaps were installed from. + type: str + returned: When snaps are installed cmd: - description: The command that was executed on the host - type: str - returned: When changed is true + description: The command that was executed on the host. + type: str + returned: When changed is true snaps_installed: - description: The list of actually installed snaps - type: list - returned: When any snaps have been installed + description: The list of actually installed snaps. + type: list + returned: When any snaps have been installed snaps_removed: - description: The list of actually removed snaps - type: list - returned: When any snaps have been removed + description: The list of actually removed snaps. + type: list + returned: When any snaps have been removed options_changed: - description: The list of options set/changed in format C(snap:key=value). - type: list - returned: When any options have been changed/set - version_added: 4.4.0 -''' + description: The list of options set/changed in format C(snap:key=value). + type: list + returned: When any options have been changed/set + version_added: 4.4.0 +""" import re import json @@ -194,6 +197,7 @@ class Snap(StateModuleHelper): }, supports_check_mode=True, ) + use_old_vardict = False @staticmethod def _first_non_zero(a): @@ -405,8 +409,8 @@ class Snap(StateModuleHelper): def state_present(self): - self.vars.meta('classic').set(output=True) - self.vars.meta('channel').set(output=True) + self.vars.set_meta('classic', output=True) + self.vars.set_meta('channel', output=True) actionable_refresh = [snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.CHANNEL_MISMATCH] if actionable_refresh: diff --git a/plugins/modules/snap_alias.py b/plugins/modules/snap_alias.py index 54448c6f3a..81a968730d 100644 --- a/plugins/modules/snap_alias.py +++ b/plugins/modules/snap_alias.py @@ -9,46 +9,45 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: snap_alias short_description: Manages snap aliases version_added: 4.0.0 description: - - "Manages snaps aliases." + - Manages snaps aliases. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: full + check_mode: + support: full + diff_mode: + support: full options: - state: - description: - - Desired state of the alias. - type: str - choices: [ absent, present ] - default: present - name: - description: - - Name of the snap. - type: str - alias: - description: - - Aliases to be created or removed. - type: list - elements: str - aliases: [aliases] + state: + description: + - Desired state of the alias. + type: str + choices: [absent, present] + default: present + name: + description: + - Name of the snap. + type: str + alias: + description: + - Aliases to be created or removed. + type: list + elements: str + aliases: [aliases] author: - - Alexei Znamensky (@russoz) + - Alexei Znamensky (@russoz) seealso: - - module: community.general.snap -''' + - module: community.general.snap +""" -EXAMPLES = ''' +EXAMPLES = r""" # Install "foo" and "bar" snap - name: Create snap alias community.general.snap_alias: @@ -62,7 +61,7 @@ EXAMPLES = ''' - hw - hw2 - hw3 - state: present # optional + state: present # optional - name: Remove one specific aliases community.general.snap_alias: @@ -73,15 +72,15 @@ EXAMPLES = ''' community.general.snap_alias: name: hello-world state: absent -''' +""" -RETURN = ''' +RETURN = r""" snap_aliases: - description: The snap aliases after execution. If called in check mode, then the list represents the state before execution. - type: list - elements: str - returned: always -''' + description: The snap aliases after execution. If called in check mode, then the list represents the state before execution. + type: list + elements: str + returned: always +""" import re @@ -105,6 +104,7 @@ class SnapAlias(StateModuleHelper): ], supports_check_mode=True, ) + use_old_vardict = False def _aliases(self): n = self.vars.name diff --git a/plugins/modules/snmp_facts.py b/plugins/modules/snmp_facts.py index aecc08f325..af0abf9479 100644 --- a/plugins/modules/snmp_facts.py +++ b/plugins/modules/snmp_facts.py @@ -9,87 +9,85 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: snmp_facts author: -- Patrick Ogenstad (@ogenstad) + - Patrick Ogenstad (@ogenstad) short_description: Retrieve facts for a device using SNMP description: - - Retrieve facts for a device using SNMP, the facts will be - inserted to the ansible_facts key. + - Retrieve facts for a device using SNMP, the facts will be inserted to the C(ansible_facts) key. requirements: - - pysnmp + - pysnmp extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.facts - - community.general.attributes.facts_module + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - host: - description: - - Set to target SNMP server (normally C({{ inventory_hostname }})). - type: str - required: true - version: - description: - - SNMP Version to use, V(v2), V(v2c) or V(v3). - type: str - required: true - choices: [ v2, v2c, v3 ] - community: - description: - - The SNMP community string, required if O(version) is V(v2) or V(v2c). - type: str - level: - description: - - Authentication level. - - Required if O(version=v3). - type: str - choices: [ authNoPriv, authPriv ] - username: - description: - - Username for SNMPv3. - - Required if O(version=v3). - type: str - integrity: - description: - - Hashing algorithm. - - Required if O(version=v3). - type: str - choices: [ md5, sha ] - authkey: - description: - - Authentication key. - - Required O(version=v3). - type: str - privacy: - description: - - Encryption algorithm. - - Required if O(level=authPriv). - type: str - choices: [ aes, des ] - privkey: - description: - - Encryption key. - - Required if O(level=authPriv). - type: str - timeout: - description: - - Response timeout in seconds. - type: int - version_added: 2.3.0 - retries: - description: - - Maximum number of request retries, 0 retries means just a single request. - type: int - version_added: 2.3.0 -''' + host: + description: + - Set to target SNMP server (normally C({{ inventory_hostname }})). + type: str + required: true + version: + description: + - SNMP Version to use, V(v2), V(v2c) or V(v3). + type: str + required: true + choices: [v2, v2c, v3] + community: + description: + - The SNMP community string, required if O(version) is V(v2) or V(v2c). + type: str + level: + description: + - Authentication level. + - Required if O(version=v3). + type: str + choices: [authNoPriv, authPriv] + username: + description: + - Username for SNMPv3. + - Required if O(version=v3). + type: str + integrity: + description: + - Hashing algorithm. + - Required if O(version=v3). + type: str + choices: [md5, sha] + authkey: + description: + - Authentication key. + - Required O(version=v3). + type: str + privacy: + description: + - Encryption algorithm. + - Required if O(level=authPriv). + type: str + choices: [aes, des] + privkey: + description: + - Encryption key. + - Required if O(level=authPriv). + type: str + timeout: + description: + - Response timeout in seconds. + type: int + version_added: 2.3.0 + retries: + description: + - Maximum number of request retries, 0 retries means just a single request. + type: int + version_added: 2.3.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts with SNMP version 2 community.general.snmp_facts: host: '{{ inventory_hostname }}' @@ -108,9 +106,9 @@ EXAMPLES = r''' authkey: abc12345 privkey: def6789 delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" ansible_sysdescr: description: A textual description of the entity. returned: success @@ -152,39 +150,39 @@ ansible_interfaces: type: dict sample: { "1": { - "adminstatus": "up", - "description": "", - "ifindex": "1", - "ipv4": [ - { - "address": "127.0.0.1", - "netmask": "255.0.0.0" - } - ], - "mac": "", - "mtu": "65536", - "name": "lo", - "operstatus": "up", - "speed": "65536" + "adminstatus": "up", + "description": "", + "ifindex": "1", + "ipv4": [ + { + "address": "127.0.0.1", + "netmask": "255.0.0.0" + } + ], + "mac": "", + "mtu": "65536", + "name": "lo", + "operstatus": "up", + "speed": "65536" }, "2": { - "adminstatus": "up", - "description": "", - "ifindex": "2", - "ipv4": [ - { - "address": "192.168.213.128", - "netmask": "255.255.255.0" - } - ], - "mac": "000a305a52a1", - "mtu": "1500", - "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", - "operstatus": "up", - "speed": "1500" + "adminstatus": "up", + "description": "", + "ifindex": "2", + "ipv4": [ + { + "address": "192.168.213.128", + "netmask": "255.255.255.0" + } + ], + "mac": "000a305a52a1", + "mtu": "1500", + "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", + "operstatus": "up", + "speed": "1500" } } -''' +""" import binascii from collections import defaultdict @@ -300,13 +298,19 @@ def main(): deps.validate(module) cmdGen = cmdgen.CommandGenerator() - transport_opts = dict((k, m_args[k]) for k in ('timeout', 'retries') if m_args[k] is not None) + transport_opts = { + k: m_args[k] + for k in ('timeout', 'retries') + if m_args[k] is not None + } # Verify that we receive a community when using snmp v2 if m_args['version'] in ("v2", "v2c"): if m_args['community'] is None: module.fail_json(msg='Community not set when using snmp version 2') + integrity_proto = None + privacy_proto = None if m_args['version'] == "v3": if m_args['username'] is None: module.fail_json(msg='Username not set when using snmp version 3') diff --git a/plugins/modules/solaris_zone.py b/plugins/modules/solaris_zone.py index d9f44589dc..9f8f774cbe 100644 --- a/plugins/modules/solaris_zone.py +++ b/plugins/modules/solaris_zone.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: solaris_zone short_description: Manage Solaris zones description: @@ -31,16 +30,15 @@ options: description: - V(present), configure and install the zone. - V(installed), synonym for V(present). - - V(running), if the zone already exists, boot it, otherwise, configure and install - the zone first, then boot it. + - V(running), if the zone already exists, boot it, otherwise, configure and install the zone first, then boot it. - V(started), synonym for V(running). - V(stopped), shutdown a zone. - V(absent), destroy the zone. - V(configured), configure the ready so that it's to be attached. - V(attached), attach a zone, but do not boot it. - - V(detached), shutdown and detach a zone + - V(detached), shutdown and detach a zone. type: str - choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ] + choices: [absent, attached, configured, detached, installed, present, running, started, stopped] default: present name: description: @@ -53,8 +51,7 @@ options: required: true path: description: - - The path where the zone will be created. This is required when the zone is created, but not - used otherwise. + - The path where the zone will be created. This is required when the zone is created, but not used otherwise. type: str sparse: description: @@ -63,32 +60,29 @@ options: default: false root_password: description: - - The password hash for the root account. If not specified, the zone's root account - will not have a password. + - The password hash for the root account. If not specified, the zone's root account will not have a password. type: str config: description: - - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options - and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g. - "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"' + - The C(zonecfg) configuration commands for this zone. See zonecfg(1M) for the valid options and syntax. Typically this is a list of options + separated by semi-colons or new lines, for example V(set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end). type: str default: '' create_options: description: - - 'Extra options to the zonecfg(1M) create command.' + - Extra options to the zonecfg(1M) create command. type: str default: '' install_options: description: - - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation, - use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"' + - Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation, use this to specify the profile XML file, for example + O(install_options=-c sc_profile.xml). type: str default: '' attach_options: description: - - 'Extra options to the zoneadm attach command. For example, this can be used to specify - whether a minimum or full update of packages is required and if any packages need to - be deleted. For valid values, see zoneadm(1M)' + - Extra options to the zoneadm attach command. For example, this can be used to specify whether a minimum or full update of packages is + required and if any packages need to be deleted. For valid values, see zoneadm(1M). type: str default: '' timeout: @@ -96,9 +90,9 @@ options: - Timeout, in seconds, for zone to boot. type: int default: 600 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create and install a zone, but don't boot it community.general.solaris_zone: name: zone1 @@ -149,7 +143,7 @@ EXAMPLES = ''' name: zone1 state: attached attach_options: -u -''' +""" import os import platform diff --git a/plugins/modules/sorcery.py b/plugins/modules/sorcery.py index 4fcf46a052..9ad3d30f3b 100644 --- a/plugins/modules/sorcery.py +++ b/plugins/modules/sorcery.py @@ -10,93 +10,86 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: sorcery short_description: Package manager for Source Mage GNU/Linux description: - - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain + - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain. author: "Vlad Glagolev (@vaygr)" notes: - - When all three components are selected, the update goes by the sequence -- - Sorcery -> Grimoire(s) -> Spell(s); you cannot override it. - - Grimoire handling is supported since community.general 7.3.0. + - When all three components are selected, the update goes by the sequence -- Sorcery -> Grimoire(s) -> Spell(s); you cannot override it. + - Grimoire handling is supported since community.general 7.3.0. requirements: - - bash + - bash extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the spell or grimoire. - - Multiple names can be given, separated by commas. - - Special value V(*) in conjunction with states V(latest) or - V(rebuild) will update or rebuild the whole system respectively - - The alias O(grimoire) was added in community.general 7.3.0. - aliases: ["spell", "grimoire"] - type: list - elements: str + name: + description: + - Name of the spell or grimoire. + - Multiple names can be given, separated by commas. + - Special value V(*) in conjunction with states V(latest) or V(rebuild) will update or rebuild the whole system respectively. + - The alias O(grimoire) was added in community.general 7.3.0. + aliases: ["spell", "grimoire"] + type: list + elements: str - repository: - description: - - Repository location. - - If specified, O(name) represents grimoire(s) instead of spell(s). - - Special value V(*) will pull grimoire from the official location. - - Only single item in O(name) in conjunction with V(*) can be used. - - O(state=absent) must be used with a special value V(*). - type: str - version_added: 7.3.0 + repository: + description: + - Repository location. + - If specified, O(name) represents grimoire(s) instead of spell(s). + - Special value V(*) will pull grimoire from the official location. + - Only single item in O(name) in conjunction with V(*) can be used. + - O(state=absent) must be used with a special value V(*). + type: str + version_added: 7.3.0 - state: - description: - - Whether to cast, dispel or rebuild a package. - - State V(cast) is an equivalent of V(present), not V(latest). - - State V(rebuild) implies cast of all specified spells, not only - those existed before. - choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"] - default: "present" - type: str + state: + description: + - Whether to cast, dispel or rebuild a package. + - State V(cast) is an equivalent of V(present), not V(latest). + - State V(rebuild) implies cast of all specified spells, not only those existed before. + choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"] + default: "present" + type: str - depends: - description: - - Comma-separated list of _optional_ dependencies to build a spell - (or make sure it is built) with; use V(+)/V(-) in front of dependency - to turn it on/off (V(+) is optional though). - - This option is ignored if O(name) parameter is equal to V(*) or - contains more than one spell. - - Providers must be supplied in the form recognized by Sorcery, - for example 'V(openssl(SSL\))'. - type: str + depends: + description: + - Comma-separated list of _optional_ dependencies to build a spell (or make sure it is built) with; use V(+)/V(-) in front of dependency + to turn it on/off (V(+) is optional though). + - This option is ignored if O(name) parameter is equal to V(*) or contains more than one spell. + - Providers must be supplied in the form recognized by Sorcery, for example 'V(openssl(SSL\))'. + type: str - update: - description: - - Whether or not to update sorcery scripts at the very first stage. - type: bool - default: false + update: + description: + - Whether or not to update sorcery scripts at the very first stage. + type: bool + default: false - update_cache: - description: - - Whether or not to update grimoire collection before casting spells. - type: bool - default: false - aliases: ["update_codex"] + update_cache: + description: + - Whether or not to update grimoire collection before casting spells. + type: bool + default: false + aliases: ["update_codex"] - cache_valid_time: - description: - - Time in seconds to invalidate grimoire collection on update. - - Especially useful for SCM and rsync grimoires. - - Makes sense only in pair with O(update_cache). - type: int - default: 0 -''' + cache_valid_time: + description: + - Time in seconds to invalidate grimoire collection on update. + - Especially useful for SCM and rsync grimoires. + - Makes sense only in pair with O(update_cache). + type: int + default: 0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Make sure spell foo is installed community.general.sorcery: spell: foo @@ -131,9 +124,9 @@ EXAMPLES = ''' depends: "{{ item.depends | default(None) }}" state: present loop: - - { spell: 'vifm', depends: '+file,-gtk+2' } - - { spell: 'fwknop', depends: 'gpgme' } - - { spell: 'pv,tnftp,tor' } + - {spell: 'vifm', depends: '+file,-gtk+2'} + - {spell: 'fwknop', depends: 'gpgme'} + - {spell: 'pv,tnftp,tor'} - name: Install the latest version of spell foo using regular glossary community.general.sorcery: @@ -184,11 +177,11 @@ EXAMPLES = ''' - name: Update only Sorcery itself community.general.sorcery: update: true -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import datetime @@ -280,7 +273,7 @@ def codex_list(module, skip_new=False): # return only specified grimoires unless requested to skip new if params['repository'] and not skip_new: - codex = dict((x, codex.get(x, NA)) for x in params['name']) + codex = {x: codex.get(x, NA) for x in params['name']} if not codex: module.fail_json(msg="no grimoires to operate on; add at least one") diff --git a/plugins/modules/spectrum_device.py b/plugins/modules/spectrum_device.py index 7cf7cf9150..bb9761d37d 100644 --- a/plugins/modules/spectrum_device.py +++ b/plugins/modules/spectrum_device.py @@ -9,88 +9,86 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: spectrum_device short_description: Creates/deletes devices in CA Spectrum description: - - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html). - - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1 + - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html). + - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1. author: "Renato Orgito (@orgito)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - device: - type: str - aliases: [ host, name ] - required: true - description: - - IP address of the device. - - If a hostname is given, it will be resolved to the IP address. - community: - type: str - description: - - SNMP community used for device discovery. - - Required when O(state=present). - required: true - landscape: - type: str - required: true - description: - - Landscape handle of the SpectroServer to which add or remove the device. - state: - type: str - description: - - On V(present) creates the device when it does not exist. - - On V(absent) removes the device when it exists. - choices: ['present', 'absent'] - default: 'present' - url: - type: str - aliases: [ oneclick_url ] - required: true - description: - - HTTP, HTTPS URL of the Oneclick server in the form V((http|https\)://host.domain[:port]). - url_username: - type: str - aliases: [ oneclick_user ] - required: true - description: - - Oneclick user name. - url_password: - type: str - aliases: [ oneclick_password ] - required: true - description: - - Oneclick user password. - use_proxy: - description: - - if V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts. - default: true - type: bool - validate_certs: - description: - - If V(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - default: true - type: bool - agentport: - type: int - required: false - description: - - UDP port used for SNMP discovery. - default: 161 + device: + type: str + aliases: [host, name] + required: true + description: + - IP address of the device. + - If a hostname is given, it will be resolved to the IP address. + community: + type: str + description: + - SNMP community used for device discovery. + - Required when O(state=present). + required: true + landscape: + type: str + required: true + description: + - Landscape handle of the SpectroServer to which add or remove the device. + state: + type: str + description: + - On V(present) creates the device when it does not exist. + - On V(absent) removes the device when it exists. + choices: ['present', 'absent'] + default: 'present' + url: + type: str + aliases: [oneclick_url] + required: true + description: + - HTTP, HTTPS URL of the Oneclick server in the form V((http|https\)://host.domain[:port]). + url_username: + type: str + aliases: [oneclick_user] + required: true + description: + - Oneclick user name. + url_password: + type: str + aliases: [oneclick_password] + required: true + description: + - Oneclick user password. + use_proxy: + description: + - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts. + default: true + type: bool + validate_certs: + description: + - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + default: true + type: bool + agentport: + type: int + required: false + description: + - UDP port used for SNMP discovery. + default: 161 notes: - - The devices will be created inside the I(Universe) container of the specified landscape. - - All the operations will be performed only on the specified landscape. -''' + - The devices will be created inside the I(Universe) container of the specified landscape. + - All the operations will be performed only on the specified landscape. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add device to CA Spectrum local_action: module: spectrum_device @@ -113,15 +111,15 @@ EXAMPLES = ''' oneclick_password: password use_proxy: false state: absent -''' +""" -RETURN = ''' +RETURN = r""" device: - description: device data when state = present + description: Device data when O(state=present). returned: success type: dict sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'} -''' +""" from socket import gethostbyname, gaierror import xml.etree.ElementTree as ET diff --git a/plugins/modules/spectrum_model_attrs.py b/plugins/modules/spectrum_model_attrs.py index 43983a11a5..3057f04c15 100644 --- a/plugins/modules/spectrum_model_attrs.py +++ b/plugins/modules/spectrum_model_attrs.py @@ -9,110 +9,108 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: spectrum_model_attrs short_description: Enforce a model's attributes in CA Spectrum description: - - This module can be used to enforce a model's attributes in CA Spectrum. + - This module can be used to enforce a model's attributes in CA Spectrum. version_added: 2.5.0 author: - - Tyler Gates (@tgates81) + - Tyler Gates (@tgates81) notes: - - Tested on CA Spectrum version 10.4.2.0.189. - - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead. + - Tested on CA Spectrum version 10.4.2.0.189. + - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - url: - description: - - URL of OneClick server. - type: str - required: true - url_username: - description: - - OneClick username. - type: str - required: true - aliases: [username] - url_password: - description: - - OneClick password. - type: str - required: true - aliases: [password] - use_proxy: - description: - - if V(false), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. - default: true - required: false - type: bool - name: - description: - - Model name. - type: str - required: true - type: - description: - - Model type. - type: str - required: true - validate_certs: - description: - - Validate SSL certificates. Only change this to V(false) if you can guarantee that you are talking to the correct endpoint and there is no - man-in-the-middle attack happening. - type: bool - default: true - required: false - attributes: - description: - - A list of attribute names and values to enforce. - - All values and parameters are case sensitive and must be provided as strings only. - required: true - type: list - elements: dict - suboptions: - name: - description: - - Attribute name OR hex ID. - - 'Currently defined names are:' - - ' C(App_Manufacturer) (C(0x230683))' - - ' C(CollectionsModelNameString) (C(0x12adb))' - - ' C(Condition) (C(0x1000a))' - - ' C(Criticality) (C(0x1290c))' - - ' C(DeviceType) (C(0x23000e))' - - ' C(isManaged) (C(0x1295d))' - - ' C(Model_Class) (C(0x11ee8))' - - ' C(Model_Handle) (C(0x129fa))' - - ' C(Model_Name) (C(0x1006e))' - - ' C(Modeltype_Handle) (C(0x10001))' - - ' C(Modeltype_Name) (C(0x10000))' - - ' C(Network_Address) (C(0x12d7f))' - - ' C(Notes) (C(0x11564))' - - ' C(ServiceDesk_Asset_ID) (C(0x12db9))' - - ' C(TopologyModelNameString) (C(0x129e7))' - - ' C(sysDescr) (C(0x10052))' - - ' C(sysName) (C(0x10b5b))' - - ' C(Vendor_Name) (C(0x11570))' - - ' C(Description) (C(0x230017))' - - Hex IDs are the direct identifiers in Spectrum and will always work. - - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' - type: str - required: true - value: - description: - - Attribute value. Empty strings should be V("") or V(null). - type: str - required: true -''' + url: + description: + - URL of OneClick server. + type: str + required: true + url_username: + description: + - OneClick username. + type: str + required: true + aliases: [username] + url_password: + description: + - OneClick password. + type: str + required: true + aliases: [password] + use_proxy: + description: + - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts. + default: true + required: false + type: bool + name: + description: + - Model name. + type: str + required: true + type: + description: + - Model type. + type: str + required: true + validate_certs: + description: + - Validate SSL certificates. Only change this to V(false) if you can guarantee that you are talking to the correct endpoint and there is + no man-in-the-middle attack happening. + type: bool + default: true + required: false + attributes: + description: + - A list of attribute names and values to enforce. + - All values and parameters are case sensitive and must be provided as strings only. + required: true + type: list + elements: dict + suboptions: + name: + description: + - Attribute name OR hex ID. + - 'Currently defined names are:' + - C(App_Manufacturer) (C(0x230683)); + - C(CollectionsModelNameString) (C(0x12adb)); + - C(Condition) (C(0x1000a)); + - C(Criticality) (C(0x1290c)); + - C(DeviceType) (C(0x23000e)); + - C(isManaged) (C(0x1295d)); + - C(Model_Class) (C(0x11ee8)); + - C(Model_Handle) (C(0x129fa)); + - C(Model_Name) (C(0x1006e)); + - C(Modeltype_Handle) (C(0x10001)); + - C(Modeltype_Name) (C(0x10000)); + - C(Network_Address) (C(0x12d7f)); + - C(Notes) (C(0x11564)); + - C(ServiceDesk_Asset_ID) (C(0x12db9)); + - C(TopologyModelNameString) (C(0x129e7)); + - C(sysDescr) (C(0x10052)); + - C(sysName) (C(0x10b5b)); + - C(Vendor_Name) (C(0x11570)); + - C(Description) (C(0x230017)). + - Hex IDs are the direct identifiers in Spectrum and will always work. + - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' + type: str + required: true + value: + description: + - Attribute value. Empty strings should be V("") or V(null). + type: str + required: true +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Enforce maintenance mode for modelxyz01 with a note about why community.general.spectrum_model_attrs: url: "http://oneclick.url.com" @@ -128,23 +126,20 @@ EXAMPLES = r''' value: "MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} by {{ tower_user_name | default(ansible_user_id) }}" delegate_to: localhost register: spectrum_model_attrs_status -''' +""" -RETURN = r''' +RETURN = r""" msg: - description: Informational message on the job result. - type: str - returned: always - sample: 'Success' + description: Informational message on the job result. + type: str + returned: always + sample: 'Success' changed_attrs: - description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values. - type: dict - returned: always - sample: { - "Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates", - "isManaged": "true" - } -''' + description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values. + type: dict + returned: always + sample: {"Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates", "isManaged": "true"} +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/spotinst_aws_elastigroup.py b/plugins/modules/spotinst_aws_elastigroup.py index 45556f621c..d07761ee05 100644 --- a/plugins/modules/spotinst_aws_elastigroup.py +++ b/plugins/modules/spotinst_aws_elastigroup.py @@ -5,19 +5,15 @@ # SPDX-License-Identifier: GPL-3.0-or-later from __future__ import (absolute_import, division, print_function) -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: spotinst_aws_elastigroup short_description: Create, update or delete Spotinst AWS Elastigroups author: Spotinst (@talzur) description: - - Can create, update, or delete Spotinst AWS Elastigroups - Launch configuration is part of the elastigroup configuration, - so no additional modules are necessary for handling the launch configuration. - You will have to have a credentials file in this location - /.spotinst/credentials - The credentials file must contain a row that looks like this - token = - Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible- + - Can create, update, or delete Spotinst AWS Elastigroups Launch configuration is part of the elastigroup configuration, so no additional modules + are necessary for handling the launch configuration. You will have to have a credentials file in this location - C($HOME/.spotinst/credentials). + The credentials file must contain a row that looks like this C(token = ). + - Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-). requirements: - spotinst_sdk >= 1.0.38 extends_documentation_fragment: @@ -38,62 +34,43 @@ options: account_id: description: - Optional parameter that allows to set an account-id inside the module configuration. - By default this is retrieved from the credentials path. + - By default this is retrieved from the credentials path. type: str token: description: - A Personal API Access Token issued by Spotinst. - - >- - When not specified, the module will try to obtain it, in that order, from: environment variable E(SPOTINST_TOKEN), or from the credentials path. + - 'When not specified, the module will try to obtain it, in that order, from: environment variable E(SPOTINST_TOKEN), or from the credentials + path.' type: str availability_vs_cost: description: - The strategy orientation. - - "The choices available are: V(availabilityOriented), V(costOriented), V(balanced)." + - 'The choices available are: V(availabilityOriented), V(costOriented), V(balanced).' required: true type: str availability_zones: description: - - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - name (String), - subnet_id (String), - placement_group_name (String), + - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed + are name (String), subnet_id (String), placement_group_name (String),. required: true type: list elements: dict block_device_mappings: description: - - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; - You can specify virtual devices and EBS volumes.; - '[{"key":"value", "key":"value"}]'; - keys allowed are - device_name (List of Strings), - virtual_name (String), - no_device (String), - ebs (Object, expects the following keys- - delete_on_termination(Boolean), - encrypted(Boolean), - iops (Integer), - snapshot_id(Integer), - volume_type(String), - volume_size(Integer)) + - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; You can specify virtual devices and EBS volumes.; '[{"key":"value", + "key":"value"}]'; keys allowed are device_name (List of Strings), virtual_name (String), no_device (String), ebs (Object, expects the + following keys- delete_on_termination(Boolean), encrypted(Boolean), iops (Integer), snapshot_id(Integer), volume_type(String), volume_size(Integer)). type: list elements: dict chef: description: - - The Chef integration configuration.; - Expects the following keys - chef_server (String), - organization (String), - user (String), - pem_key (String), - chef_version (String) + - The Chef integration configuration.; Expects the following keys - chef_server (String), organization (String), user (String), pem_key + (String), chef_version (String). type: dict draining_timeout: @@ -103,36 +80,30 @@ options: ebs_optimized: description: - - Enable EBS optimization for supported instances which are not enabled by default.; - Note - additional charges will be applied. + - Enable EBS optimization for supported instances which are not enabled by default.; Note - additional charges will be applied. type: bool ebs_volume_pool: description: - - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - volume_ids (List of Strings), - device_name (String) + - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; '[{"key":"value", "key":"value"}]'; keys allowed + are - volume_ids (List of Strings), device_name (String). type: list elements: dict ecs: description: - - The ECS integration configuration.; - Expects the following key - - cluster_name (String) + - The ECS integration configuration.; Expects the following key - cluster_name (String). type: dict elastic_ips: description: - - List of ElasticIps Allocation Ids (example V(eipalloc-9d4e16f8)) to associate to the group instances + - List of ElasticIps Allocation Ids (example V(eipalloc-9d4e16f8)) to associate to the group instances. type: list elements: str fallback_to_od: description: - - In case of no spots available, Elastigroup will launch an On-demand instance instead + - In case of no spots available, Elastigroup will launch an On-demand instance instead. type: bool health_check_grace_period: @@ -149,159 +120,129 @@ options: health_check_type: description: - The service to use for the health check. - - "The choices available are: V(ELB), V(HCS), V(TARGET_GROUP), V(MLB), V(EC2)." + - 'The choices available are: V(ELB), V(HCS), V(TARGET_GROUP), V(MLB), V(EC2).' type: str iam_role_name: description: - - The instance profile iamRole name - - Only use iam_role_arn, or iam_role_name + - The instance profile iamRole name. + - Only use O(iam_role_arn) or O(iam_role_name). type: str iam_role_arn: description: - - The instance profile iamRole arn - - Only use iam_role_arn, or iam_role_name + - The instance profile iamRole arn. + - Only use O(iam_role_arn) or O(iam_role_name). type: str id: description: - - The group id if it already exists and you want to update, or delete it. - This will not work unless the uniqueness_by field is set to id. + - The group id if it already exists and you want to update, or delete it. This will not work unless the uniqueness_by field is set to id. When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. type: str image_id: description: - - The image Id used to launch the instance.; - In case of conflict between Instance type and image type, an error will be returned + - The image Id used to launch the instance.; In case of conflict between Instance type and image type, an error will be returned. required: true type: str key_pair: description: - - Specify a Key Pair to attach to the instances + - Specify a Key Pair to attach to the instances. type: str kubernetes: description: - - The Kubernetes integration configuration. - Expects the following keys - - api_server (String), - token (String) + - The Kubernetes integration configuration. Expects the following keys - api_server (String), token (String). type: dict lifetime_period: description: - - Lifetime period + - Lifetime period. type: int load_balancers: description: - - List of classic ELB names + - List of classic ELB names. type: list elements: str max_size: description: - - The upper limit number of instances that you can scale up to + - The upper limit number of instances that you can scale up to. required: true type: int mesosphere: description: - - The Mesosphere integration configuration. - Expects the following key - - api_server (String) + - The Mesosphere integration configuration. Expects the following key - api_server (String). type: dict min_size: description: - - The lower limit number of instances that you can scale down to + - The lower limit number of instances that you can scale down to. required: true type: int monitoring: description: - - Describes whether instance Enhanced Monitoring is enabled + - Describes whether instance Enhanced Monitoring is enabled. type: str name: description: - - Unique name for elastigroup to be created, updated or deleted + - Unique name for elastigroup to be created, updated or deleted. required: true type: str network_interfaces: description: - - A list of hash/dictionaries of network interfaces to add to the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - description (String), - device_index (Integer), - secondary_private_ip_address_count (Integer), - associate_public_ip_address (Boolean), - delete_on_termination (Boolean), - groups (List of Strings), - network_interface_id (String), - private_ip_address (String), - subnet_id (String), - associate_ipv6_address (Boolean), - private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)) + - A list of hash/dictionaries of network interfaces to add to the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - description + (String), device_index (Integer), secondary_private_ip_address_count (Integer), associate_public_ip_address (Boolean), delete_on_termination + (Boolean), groups (List of Strings), network_interface_id (String), private_ip_address (String), subnet_id (String), associate_ipv6_address + (Boolean), private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)). type: list elements: dict on_demand_count: description: - - Required if risk is not set - - Number of on demand instances to launch. All other instances will be spot instances.; - Either set this parameter or the risk parameter + - Required if risk is not set. + - Number of on demand instances to launch. All other instances will be spot instances.; Either set this parameter or the risk parameter. type: int on_demand_instance_type: description: - - On-demand instance type that will be provisioned + - On-demand instance type that will be provisioned. type: str opsworks: description: - - The elastigroup OpsWorks integration configuration.; - Expects the following key - - layer_id (String) + - The elastigroup OpsWorks integration configuration.; Expects the following key - layer_id (String). type: dict persistence: description: - - The Stateful elastigroup configuration.; - Accepts the following keys - - should_persist_root_device (Boolean), - should_persist_block_devices (Boolean), - should_persist_private_ip (Boolean) + - The Stateful elastigroup configuration.; Accepts the following keys - should_persist_root_device (Boolean), should_persist_block_devices + (Boolean), should_persist_private_ip (Boolean). type: dict product: description: - Operation system type. - - "Available choices are: V(Linux/UNIX), V(SUSE Linux), V(Windows), V(Linux/UNIX (Amazon VPC)), V(SUSE Linux (Amazon VPC))." + - 'Available choices are: V(Linux/UNIX), V(SUSE Linux), V(Windows), V(Linux/UNIX (Amazon VPC)), V(SUSE Linux (Amazon VPC)).' required: true type: str rancher: description: - - The Rancher integration configuration.; - Expects the following keys - - version (String), - access_key (String), - secret_key (String), - master_host (String) + - The Rancher integration configuration.; Expects the following keys - version (String), access_key (String), secret_key (String), master_host + (String). type: dict right_scale: description: - - The Rightscale integration configuration.; - Expects the following keys - - account_id (String), - refresh_token (String) + - The Rightscale integration configuration.; Expects the following keys - account_id (String), refresh_token (String). type: dict risk: @@ -311,59 +252,42 @@ options: roll_config: description: - - Roll configuration.; - If you would like the group to roll after updating, please use this feature. - Accepts the following keys - - batch_size_percentage(Integer, Required), - grace_period - (Integer, Required), - health_check_type(String, Optional) + - Roll configuration. + - If you would like the group to roll after updating, please use this feature. + - Accepts the following keys - batch_size_percentage(Integer, Required), grace_period - (Integer, Required), health_check_type(String, Optional). type: dict scheduled_tasks: description: - - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - adjustment (Integer), - scale_target_capacity (Integer), - scale_min_capacity (Integer), - scale_max_capacity (Integer), - adjustment_percentage (Integer), - batch_size_percentage (Integer), - cron_expression (String), - frequency (String), - grace_period (Integer), - task_type (String, required), - is_enabled (Boolean) + - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup, as in V([{"key":"value", "key":"value"}]). + - 'Keys allowed are: adjustment (Integer), scale_target_capacity (Integer), scale_min_capacity (Integer), scale_max_capacity (Integer), + adjustment_percentage (Integer), batch_size_percentage (Integer), cron_expression (String), frequency (String), grace_period (Integer), + task_type (String, required), is_enabled (Boolean).' type: list elements: dict security_group_ids: description: - - One or more security group IDs. ; - In case of update it will override the existing Security Group with the new given array + - One or more security group IDs. + - In case of update it will override the existing Security Group with the new given array. required: true type: list elements: str shutdown_script: description: - - The Base64-encoded shutdown script that executes prior to instance termination. - Encode before setting. + - The Base64-encoded shutdown script that executes prior to instance termination. Encode before setting. type: str signals: description: - - A list of hash/dictionaries of signals to configure in the elastigroup; - keys allowed are - - name (String, required), - timeout (Integer) + - A list of hash/dictionaries of signals to configure in the elastigroup; keys allowed are - name (String, required), timeout (Integer). type: list elements: dict spin_up_time: description: - - Spin up time, in seconds, for the instance + - Spin up time, in seconds, for the instance. type: int spot_instance_types: @@ -378,108 +302,70 @@ options: - present - absent description: - - Create or delete the elastigroup + - Create or delete the elastigroup. default: present type: str tags: description: - - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); + - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value). type: list elements: dict target: description: - - The number of instances to launch + - The number of instances to launch. required: true type: int target_group_arns: description: - - List of target group arns instances should be registered to + - List of target group arns instances should be registered to. type: list elements: str tenancy: description: - Dedicated vs shared tenancy. - - "The available choices are: V(default), V(dedicated)." + - 'The available choices are: V(default), V(dedicated).' type: str terminate_at_end_of_billing_hour: description: - - Terminate at the end of billing hour + - Terminate at the end of billing hour. type: bool unit: description: - The capacity unit to launch instances by. - - "The available choices are: V(instance), V(weight)." + - 'The available choices are: V(instance), V(weight).' type: str up_scaling_policies: description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - metric_name (String, required), - dimensions (List of Objects, Keys allowed are name (String, required) and value (String)), - statistic (String, required) - evaluation_periods (String, required), - period (String, required), - threshold (String, required), - cooldown (String, required), - unit (String, required), - operator (String, required), - action_type (String, required), - adjustment (String), - min_target_capacity (String), - target (String), - maximum (String), - minimum (String) + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are + - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions (List of Objects, Keys allowed + are name (String, required) and value (String)), statistic (String, required) evaluation_periods (String, required), period (String, required), + threshold (String, required), cooldown (String, required), unit (String, required), operator (String, required), action_type (String, + required), adjustment (String), min_target_capacity (String), target (String), maximum (String), minimum (String). type: list elements: dict down_scaling_policies: description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - metric_name (String, required), - dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)), - statistic (String, required), - evaluation_periods (String, required), - period (String, required), - threshold (String, required), - cooldown (String, required), - unit (String, required), - operator (String, required), - action_type (String, required), - adjustment (String), - max_target_capacity (String), - target (String), - maximum (String), - minimum (String) + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are + - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions ((List of Objects), Keys allowed + are name (String, required) and value (String)), statistic (String, required), evaluation_periods (String, required), period (String, + required), threshold (String, required), cooldown (String, required), unit (String, required), operator (String, required), action_type + (String, required), adjustment (String), max_target_capacity (String), target (String), maximum (String), minimum (String). type: list elements: dict target_tracking_policies: description: - - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - source (String, required), - metric_name (String, required), - statistic (String, required), - unit (String, required), - cooldown (String, required), - target (String, required) + - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed + are - policy_name (String, required), namespace (String, required), source (String, required), metric_name (String, required), statistic + (String, required), unit (String, required), cooldown (String, required), target (String, required). type: list elements: dict @@ -488,8 +374,8 @@ options: - id - name description: - - If your group names are not unique, you may use this feature to update or delete a specific group. - Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. + - If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property is set, you + must set a group_id in order to update or delete a group, otherwise a group will be created. default: name type: str @@ -500,20 +386,19 @@ options: utilize_reserved_instances: description: - - In case of any available Reserved Instances, - Elastigroup will utilize your reservations before purchasing Spot instances. + - In case of any available Reserved Instances, Elastigroup will utilize your reservations before purchasing Spot instances. type: bool wait_for_instances: description: - - Whether or not the elastigroup creation / update actions should wait for the instances to spin + - Whether or not the elastigroup creation / update actions should wait for the instances to spin. type: bool default: false wait_timeout: description: - - How long the module should wait for instances before failing the action.; - Only works if wait_for_instances is True. + - How long the module should wait for instances before failing the action. + - Only works if O(wait_for_instances=true). type: int do_not_update: @@ -538,40 +423,39 @@ options: description: - Placeholder parameter for future implementation of Elastic Beanstalk configurations. type: dict - -''' -EXAMPLES = ''' +""" +EXAMPLES = r""" # Basic configuration YAML example - hosts: localhost tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target register: result - ansible.builtin.debug: var=result @@ -581,39 +465,39 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/sda1' - ebs: - volume_size: 100 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: true - wait_timeout: 600 + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/sda1' + ebs: + volume_size: 100 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: true + wait_timeout: 600 register: result - name: Store private ips to file @@ -628,43 +512,43 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/xvda' - ebs: - volume_size: 60 - volume_type: gp2 - - device_name: '/dev/xvdb' - ebs: - volume_size: 120 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: true - wait_timeout: 600 + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/xvda' + ebs: + volume_size: 60 + volume_type: gp2 + - device_name: '/dev/xvdb' + ebs: + volume_size: 120 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: true + wait_timeout: 600 register: result - name: Store private ips to file @@ -678,36 +562,36 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - block_device_mappings: - - device_name: '/dev/xvda' - virtual_name: ephemeral0 - - device_name: '/dev/xvdb/' - virtual_name: ephemeral1 - monitoring: true - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + block_device_mappings: + - device_name: '/dev/xvda' + virtual_name: ephemeral0 + - device_name: '/dev/xvdb/' + virtual_name: ephemeral1 + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target register: result - ansible.builtin.debug: var=result @@ -718,34 +602,34 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - network_interfaces: - - associate_public_ip_address: true - device_index: 0 - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target + state: present + risk: 100 + availability_vs_cost: balanced + network_interfaces: + - associate_public_ip_address: true + device_index: 0 + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target register: result - ansible.builtin.debug: var=result @@ -756,70 +640,68 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - account_id: act-92d45673 - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-79da021e - image_id: ami-f173cc91 - fallback_to_od: true - tags: - - Creator: ValueOfCreatorTag - - Environment: ValueOfEnvironmentTag - key_pair: spotinst-labs-oregon - max_size: 10 - min_size: 0 - target: 2 - unit: instance - monitoring: true - name: ansible-group-1 - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-46cdc13d - spot_instance_types: - - c3.large - target_tracking_policies: - - policy_name: target-tracking-1 - namespace: AWS/EC2 - metric_name: CPUUtilization - statistic: average - unit: percent - target: 50 - cooldown: 120 - do_not_update: - - image_id + account_id: act-92d45673 + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-79da021e + image_id: ami-f173cc91 + fallback_to_od: true + tags: + - Creator: ValueOfCreatorTag + - Environment: ValueOfEnvironmentTag + key_pair: spotinst-labs-oregon + max_size: 10 + min_size: 0 + target: 2 + unit: instance + monitoring: true + name: ansible-group-1 + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-46cdc13d + spot_instance_types: + - c3.large + target_tracking_policies: + - policy_name: target-tracking-1 + namespace: AWS/EC2 + metric_name: CPUUtilization + statistic: average + unit: percent + target: 50 + cooldown: 120 + do_not_update: + - image_id register: result - ansible.builtin.debug: var=result -''' +""" -RETURN = ''' ---- +RETURN = r""" instances: - description: List of active elastigroup instances and their details. - returned: success - type: dict - sample: [ - { - "spotInstanceRequestId": "sir-regs25zp", - "instanceId": "i-09640ad8678234c", - "instanceType": "m4.large", - "product": "Linux/UNIX", - "availabilityZone": "us-west-2b", - "privateIp": "180.0.2.244", - "createdAt": "2017-07-17T12:46:18.000Z", - "status": "fulfilled" - } - ] + description: List of active elastigroup instances and their details. + returned: success + type: dict + sample: [ + { + "spotInstanceRequestId": "sir-regs25zp", + "instanceId": "i-09640ad8678234c", + "instanceType": "m4.large", + "product": "Linux/UNIX", + "availabilityZone": "us-west-2b", + "privateIp": "180.0.2.244", + "createdAt": "2017-07-17T12:46:18.000Z", + "status": "fulfilled" + } + ] group_id: - description: Created / Updated group's ID. - returned: success - type: str - sample: "sig-12345" - -''' + description: Created / Updated group's ID. + returned: success + type: str + sample: "sig-12345" +""" HAS_SPOTINST_SDK = False __metaclass__ = type diff --git a/plugins/modules/ss_3par_cpg.py b/plugins/modules/ss_3par_cpg.py index 32c1cd443f..c9c9b4bd90 100644 --- a/plugins/modules/ss_3par_cpg.py +++ b/plugins/modules/ss_3par_cpg.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" short_description: Manage HPE StoreServ 3PAR CPG author: - Farhan Nomani (@farhan7500) @@ -43,18 +42,15 @@ options: type: str growth_increment: description: - - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage - created on each auto-grow operation. + - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage created on each auto-grow operation. type: str growth_limit: description: - - Specifies that the autogrow operation is limited to the specified - storage amount that sets the growth limit(in MiB, GiB or TiB). + - Specifies that the autogrow operation is limited to the specified storage amount that sets the growth limit(in MiB, GiB or TiB). type: str growth_warning: description: - - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded - results in a warning alert. + - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded results in a warning alert. type: str high_availability: choices: @@ -62,8 +58,7 @@ options: - CAGE - MAG description: - - Specifies that the layout must support the failure of one port pair, - one cage, or one magazine. + - Specifies that the layout must support the failure of one port pair, one cage, or one magazine. type: str raid_type: choices: @@ -92,13 +87,12 @@ options: type: bool default: false extends_documentation_fragment: -- community.general.hpe3par -- community.general.attributes - -''' + - community.general.hpe3par + - community.general.attributes +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create CPG sample_cpg community.general.ss_3par_cpg: storage_system_ip: 10.10.10.1 @@ -124,10 +118,10 @@ EXAMPLES = r''' state: absent cpg_name: sample_cpg secure: false -''' +""" -RETURN = r''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par diff --git a/plugins/modules/ssh_config.py b/plugins/modules/ssh_config.py index d974f45373..4e090b5457 100644 --- a/plugins/modules/ssh_config.py +++ b/plugins/modules/ssh_config.py @@ -11,18 +11,17 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ssh_config short_description: Manage SSH config for user version_added: '2.0.0' description: - - Configures SSH hosts with special C(IdentityFile)s and hostnames. + - Configures SSH hosts with special C(IdentityFile)s and hostnames. author: - - Björn Andersson (@gaqzi) - - Abhijeet Kasurde (@Akasurde) + - Björn Andersson (@gaqzi) + - Abhijeet Kasurde (@Akasurde) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -33,7 +32,7 @@ options: description: - Whether a host entry should exist or not. default: present - choices: [ 'present', 'absent' ] + choices: ['present', 'absent'] type: str user: description: @@ -50,8 +49,7 @@ options: host: description: - The endpoint this configuration is valid for. - - Can be an actual address on the internet or an alias that will - connect to the value of O(hostname). + - Can be an actual address on the internet or an alias that will connect to the value of O(hostname). required: true type: str hostname: @@ -68,17 +66,14 @@ options: type: str identity_file: description: - - The path to an identity file (SSH private key) that will be used - when connecting to this host. + - The path to an identity file (SSH private key) that will be used when connecting to this host. - File need to exist and have mode V(0600) to be valid. type: path identities_only: description: - - Specifies that SSH should only use the configured authentication - identity and certificate files (either the default files, or - those explicitly configured in the C(ssh_config) files or passed on - the ssh command-line), even if ssh-agent or a PKCS11Provider or - SecurityKeyProvider offers more identities. + - Specifies that SSH should only use the configured authentication identity and certificate files (either the default files, or those explicitly + configured in the C(ssh_config) files or passed on the ssh command-line), even if ssh-agent or a PKCS11Provider or SecurityKeyProvider + offers more identities. type: bool version_added: 8.2.0 user_known_hosts_file: @@ -89,7 +84,7 @@ options: description: - Whether to strictly check the host key when doing connections to the remote host. - The value V(accept-new) is supported since community.general 8.6.0. - choices: [ 'yes', 'no', 'ask', 'accept-new' ] + choices: ['yes', 'no', 'ask', 'accept-new'] type: str proxycommand: description: @@ -126,7 +121,7 @@ options: controlmaster: description: - Sets the C(ControlMaster) option. - choices: [ 'yes', 'no', 'ask', 'auto', 'autoask' ] + choices: ['yes', 'no', 'ask', 'auto', 'autoask'] type: str version_added: 8.1.0 controlpath: @@ -140,10 +135,10 @@ options: type: str version_added: 8.1.0 requirements: -- paramiko -''' + - paramiko +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a host in the configuration community.general.ssh_config: user: akasurde @@ -158,9 +153,9 @@ EXAMPLES = r''' ssh_config_file: "{{ ssh_config_test }}" host: "example.com" state: absent -''' +""" -RETURN = r''' +RETURN = r""" hosts_added: description: A list of host added. returned: success @@ -196,7 +191,7 @@ hosts_change_diff: } } ] -''' +""" import os diff --git a/plugins/modules/stacki_host.py b/plugins/modules/stacki_host.py index 57440a24d0..4b37d256cb 100644 --- a/plugins/modules/stacki_host.py +++ b/plugins/modules/stacki_host.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: stacki_host short_description: Add or remove host to stacki front-end description: @@ -35,8 +34,7 @@ options: type: str stacki_password: description: - - Password for authenticating with Stacki API, but if not - specified, the environment variable E(stacki_password) is used instead. + - Password for authenticating with Stacki API, but if not specified, the environment variable E(stacki_password) is used instead. required: true type: str stacki_endpoint: @@ -68,7 +66,7 @@ options: description: - Set value to the desired state for the specified host. type: str - choices: [ absent, present ] + choices: [absent, present] default: present appliance: description: @@ -96,10 +94,10 @@ options: type: str default: private author: -- Hugh Ma (@bbyhuy) -''' + - Hugh Ma (@bbyhuy) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add a host named test-1 community.general.stacki_host: name: test-1 @@ -117,27 +115,27 @@ EXAMPLES = ''' stacki_password: pwd stacki_endpoint: url state: absent -''' +""" -RETURN = ''' +RETURN = r""" changed: - description: response to whether or not the api call completed successfully + description: Response to whether or not the api call completed successfully. returned: always type: bool sample: true stdout: - description: the set of responses from the commands + description: The set of responses from the commands. returned: always type: list sample: ['...', '...'] stdout_lines: - description: the value of stdout split into a list + description: The value of stdout split into a list. returned: always type: list sample: [['...', '...'], ['...'], ['...']] -''' +""" import json diff --git a/plugins/modules/statsd.py b/plugins/modules/statsd.py index 8bc0f0b187..dcb3f0252e 100644 --- a/plugins/modules/statsd.py +++ b/plugins/modules/statsd.py @@ -7,15 +7,14 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: statsd short_description: Send metrics to StatsD version_added: 2.1.0 description: - The C(statsd) module sends metrics to StatsD. - For more information, see U(https://statsd-metrics.readthedocs.io/en/latest/). - - Supported metric types are V(counter) and V(gauge). - Currently unupported metric types are V(timer), V(set), and V(gaugedelta). + - Supported metric types are V(counter) and V(gauge). Currently unupported metric types are V(timer), V(set), and V(gaugedelta). author: "Mark Mercado (@mamercad)" requirements: - statsd @@ -80,9 +79,9 @@ options: default: false description: - If the metric is of type V(gauge), change the value by O(delta). -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Increment the metric my_counter by 1 community.general.statsd: host: localhost @@ -100,7 +99,7 @@ EXAMPLES = ''' metric: my_gauge metric_type: gauge value: 7 -''' +""" from ansible.module_utils.basic import (AnsibleModule, missing_required_lib) diff --git a/plugins/modules/statusio_maintenance.py b/plugins/modules/statusio_maintenance.py index 0a96d0fb41..6f17523e25 100644 --- a/plugins/modules/statusio_maintenance.py +++ b/plugins/modules/statusio_maintenance.py @@ -9,127 +9,123 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: statusio_maintenance short_description: Create maintenance windows for your status.io dashboard description: - - Creates a maintenance window for status.io - - Deletes a maintenance window for status.io + - Creates or deletes a maintenance window for status.io. notes: - - You can use the apiary API url (http://docs.statusio.apiary.io/) to - capture API traffic - - Use start_date and start_time with minutes to set future maintenance window + - You can use the apiary API URL (U(http://docs.statusio.apiary.io/)) to capture API traffic. + - Use start_date and start_time with minutes to set future maintenance window. author: Benjamin Copeland (@bhcopeland) extends_documentation_fragment: - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - title: - type: str - description: - - A descriptive title for the maintenance window - default: "A new maintenance window" - desc: - type: str - description: - - Message describing the maintenance window - default: "Created by Ansible" - state: - type: str - description: - - Desired state of the package. - default: "present" - choices: ["present", "absent"] - api_id: - type: str - description: - - Your unique API ID from status.io - required: true - api_key: - type: str - description: - - Your unique API Key from status.io - required: true - statuspage: - type: str - description: - - Your unique StatusPage ID from status.io - required: true - url: - type: str - description: - - Status.io API URL. A private apiary can be used instead. - default: "https://api.status.io" - components: - type: list - elements: str - description: - - The given name of your component (server name) - aliases: ['component'] - containers: - type: list - elements: str - description: - - The given name of your container (data center) - aliases: ['container'] - all_infrastructure_affected: - description: - - If it affects all components and containers - type: bool - default: false - automation: - description: - - Automatically start and end the maintenance window - type: bool - default: false - maintenance_notify_now: - description: - - Notify subscribers now - type: bool - default: false - maintenance_notify_72_hr: - description: - - Notify subscribers 72 hours before maintenance start time - type: bool - default: false - maintenance_notify_24_hr: - description: - - Notify subscribers 24 hours before maintenance start time - type: bool - default: false - maintenance_notify_1_hr: - description: - - Notify subscribers 1 hour before maintenance start time - type: bool - default: false - maintenance_id: - type: str - description: - - The maintenance id number when deleting a maintenance window - minutes: - type: int - description: - - The length of time in UTC that the maintenance will run - (starting from playbook runtime) - default: 10 - start_date: - type: str - description: - - Date maintenance is expected to start (Month/Day/Year) (UTC) - - End Date is worked out from start_date + minutes - start_time: - type: str - description: - - Time maintenance is expected to start (Hour:Minutes) (UTC) - - End Time is worked out from start_time + minutes -''' + title: + type: str + description: + - A descriptive title for the maintenance window. + default: "A new maintenance window" + desc: + type: str + description: + - Message describing the maintenance window. + default: "Created by Ansible" + state: + type: str + description: + - Desired state of the package. + default: "present" + choices: ["present", "absent"] + api_id: + type: str + description: + - Your unique API ID from status.io. + required: true + api_key: + type: str + description: + - Your unique API Key from status.io. + required: true + statuspage: + type: str + description: + - Your unique StatusPage ID from status.io. + required: true + url: + type: str + description: + - Status.io API URL. A private apiary can be used instead. + default: "https://api.status.io" + components: + type: list + elements: str + description: + - The given name of your component (server name). + aliases: ['component'] + containers: + type: list + elements: str + description: + - The given name of your container (data center). + aliases: ['container'] + all_infrastructure_affected: + description: + - If it affects all components and containers. + type: bool + default: false + automation: + description: + - Automatically start and end the maintenance window. + type: bool + default: false + maintenance_notify_now: + description: + - Notify subscribers now. + type: bool + default: false + maintenance_notify_72_hr: + description: + - Notify subscribers 72 hours before maintenance start time. + type: bool + default: false + maintenance_notify_24_hr: + description: + - Notify subscribers 24 hours before maintenance start time. + type: bool + default: false + maintenance_notify_1_hr: + description: + - Notify subscribers 1 hour before maintenance start time. + type: bool + default: false + maintenance_id: + type: str + description: + - The maintenance id number when deleting a maintenance window. + minutes: + type: int + description: + - The length of time in UTC that the maintenance will run (starting from playbook runtime). + default: 10 + start_date: + type: str + description: + - Date maintenance is expected to start (Month/Day/Year) (UTC). + - End Date is worked out from O(start_date) + O(minutes). + start_time: + type: str + description: + - Time maintenance is expected to start (Hour:Minutes) (UTC). + - End Time is worked out from O(start_time) + O(minutes). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance community.general.statusio_maintenance: title: Router Upgrade from ansible @@ -176,10 +172,9 @@ EXAMPLES = ''' api_id: api_id api_key: api_key state: absent - -''' +""" # TODO: Add RETURN documentation. -RETURN = ''' # ''' +RETURN = """ # """ import datetime import json diff --git a/plugins/modules/sudoers.py b/plugins/modules/sudoers.py index a392b4adfa..2735ce72cd 100644 --- a/plugins/modules/sudoers.py +++ b/plugins/modules/sudoers.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sudoers short_description: Manage sudoers files version_added: "4.3.0" @@ -98,11 +97,11 @@ options: - If V(required), visudo must be available to validate the sudoers rule. type: str default: detect - choices: [ absent, detect, required ] + choices: [absent, detect, required] version_added: 5.2.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Allow the backup user to sudo /usr/local/bin/backup community.general.sudoers: name: allow-backup @@ -158,7 +157,7 @@ EXAMPLES = ''' user: alice commands: /usr/bin/less noexec: true -''' +""" import os from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/supervisorctl.py b/plugins/modules/supervisorctl.py index e8d9c89a65..9e6900e234 100644 --- a/plugins/modules/supervisorctl.py +++ b/plugins/modules/supervisorctl.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: supervisorctl short_description: Manage the state of a program or group of programs running via supervisord description: - - Manage the state of a program or group of programs running via supervisord + - Manage the state of a program or group of programs running via supervisord. extends_documentation_fragment: - community.general.attributes attributes: @@ -33,29 +32,29 @@ options: config: type: path description: - - The supervisor configuration file path + - The supervisor configuration file path. server_url: type: str description: - - URL on which supervisord server is listening + - URL on which supervisord server is listening. username: type: str description: - - username to use for authentication + - Username to use for authentication. password: type: str description: - - password to use for authentication + - Password to use for authentication. state: type: str description: - The desired state of program/group. required: true - choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ] + choices: ["present", "started", "stopped", "restarted", "absent", "signalled"] stop_before_removing: type: bool description: - - Use O(stop_before_removing=true) to stop the program/group before removing it + - Use O(stop_before_removing=true) to stop the program/group before removing it. required: false default: false version_added: 7.5.0 @@ -66,19 +65,19 @@ options: supervisorctl_path: type: path description: - - path to supervisorctl executable + - Path to C(supervisorctl) executable. notes: - When O(state=present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. - When O(state=restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). - - When O(state=absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group. - If the program/group is still running, the action will fail. If you want to stop the program/group before removing, use O(stop_before_removing=true). -requirements: [ "supervisorctl" ] + - When O(state=absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group. If the + program/group is still running, the action will fail. If you want to stop the program/group before removing, use O(stop_before_removing=true). +requirements: ["supervisorctl"] author: - - "Matt Wright (@mattupstate)" - - "Aaron Wang (@inetfuture) " -''' + - "Matt Wright (@mattupstate)" + - "Aaron Wang (@inetfuture) " +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Manage the state of program to be in started state community.general.supervisorctl: name: my_app @@ -113,7 +112,7 @@ EXAMPLES = ''' community.general.supervisorctl: name: all state: restarted -''' +""" import os from ansible.module_utils.basic import AnsibleModule, is_executable diff --git a/plugins/modules/svc.py b/plugins/modules/svc.py index b327ddfd60..110b61856a 100644 --- a/plugins/modules/svc.py +++ b/plugins/modules/svc.py @@ -8,60 +8,56 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: svc author: -- Brian Coca (@bcoca) -short_description: Manage daemontools services + - Brian Coca (@bcoca) +short_description: Manage C(daemontools) services description: - - Controls daemontools services on remote hosts using the svc utility. + - Controls C(daemontools) services on remote hosts using the C(svc) utility. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the service to manage. - type: str - required: true - state: - description: - - V(started)/V(stopped) are idempotent actions that will not run - commands unless necessary. V(restarted) will always bounce the - svc (svc -t) and V(killed) will always bounce the svc (svc -k). - V(reloaded) will send a sigusr1 (svc -1). - V(once) will run a normally downed svc once (svc -o), not really - an idempotent operation. - type: str - choices: [ killed, once, reloaded, restarted, started, stopped ] - downed: - description: - - Should a 'down' file exist or not, if it exists it disables auto startup. - Defaults to no. Downed does not imply stopped. - type: bool - enabled: - description: - - Whether the service is enabled or not, if disabled it also implies stopped. - Take note that a service can be enabled and downed (no auto restart). - type: bool - service_dir: - description: - - Directory svscan watches for services - type: str - default: /service - service_src: - description: - - Directory where services are defined, the source of symlinks to service_dir. - type: str - default: /etc/service -''' + name: + description: + - Name of the service to manage. + type: str + required: true + state: + description: + - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. + - V(restarted) will always bounce the svc (svc -t) and V(killed) will always bounce the svc (svc -k). + - V(reloaded) will send a sigusr1 (svc -1). + - V(once) will run a normally downed svc once (svc -o), not really an idempotent operation. + type: str + choices: [killed, once, reloaded, restarted, started, stopped] + downed: + description: + - Should a C(down) file exist or not, if it exists it disables auto startup. Defaults to V(false). Downed does not imply stopped. + type: bool + enabled: + description: + - Whether the service is enabled or not, if disabled it also implies O(state=stopped). Take note that a service can be enabled and downed (no auto + restart). + type: bool + service_dir: + description: + - Directory C(svscan) watches for services. + type: str + default: /service + service_src: + description: + - Directory where services are defined, the source of symlinks to O(service_dir). + type: str + default: /etc/service +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Start svc dnscache, if not running community.general.svc: name: dnscache @@ -92,7 +88,7 @@ EXAMPLES = ''' name: dnscache state: reloaded service_dir: /var/service -''' +""" import os import re diff --git a/plugins/modules/svr4pkg.py b/plugins/modules/svr4pkg.py index 56ded66e62..ac919d749b 100644 --- a/plugins/modules/svr4pkg.py +++ b/plugins/modules/svr4pkg.py @@ -10,19 +10,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: svr4pkg short_description: Manage Solaris SVR4 packages description: - - Manages SVR4 packages on Solaris 10 and 11. - - These were the native packages on Solaris <= 10 and are available - as a legacy feature in Solaris 11. - - Note that this is a very basic packaging system. It will not enforce - dependencies on install or remove. + - Manages SVR4 packages on Solaris 10 and 11. + - These were the native packages on Solaris <= 10 and are available as a legacy feature in Solaris 11. + - Note that this is a very basic packaging system. It will not enforce dependencies on install or remove. author: "Boyd Adamson (@brontitall)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -47,8 +44,9 @@ options: src: description: - Specifies the location to install the package from. Required when O(state=present). - - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. For example: V(somefile.pkg), V(/dir/with/pkgs), V(http:/server/mypkgs.pkg)." - - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there. + - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. For example: V(somefile.pkg), V(/dir/with/pkgs), V(http://server/mypkgs.pkg)." + - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them + there. type: str proxy: description: @@ -73,9 +71,9 @@ options: required: false type: bool default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install a package from an already copied file community.general.svr4pkg: name: CSWcommon @@ -106,7 +104,7 @@ EXAMPLES = ''' name: FIREFOX state: absent category: true -''' +""" import os diff --git a/plugins/modules/swdepot.py b/plugins/modules/swdepot.py index 9ba1b02b30..628c63f810 100644 --- a/plugins/modules/swdepot.py +++ b/plugins/modules/swdepot.py @@ -12,41 +12,40 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: swdepot short_description: Manage packages with swdepot package manager (HP-UX) description: - - Will install, upgrade and remove packages with swdepot package manager (HP-UX) + - Will install, upgrade and remove packages with swdepot package manager (HP-UX). notes: [] author: "Raul Melo (@melodous)" extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - package name. - aliases: [pkg] - required: true - type: str - state: - description: - - whether to install (V(present), V(latest)), or remove (V(absent)) a package. - required: true - choices: [ 'present', 'latest', 'absent'] - type: str - depot: - description: - - The source repository from which install or upgrade a package. - type: str -''' + name: + description: + - Package name. + aliases: [pkg] + required: true + type: str + state: + description: + - Whether to install (V(present), V(latest)), or remove (V(absent)) a package. + required: true + choices: ['present', 'latest', 'absent'] + type: str + depot: + description: + - The source repository from which install or upgrade a package. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install a package community.general.swdepot: name: unzip-6.0 @@ -63,7 +62,7 @@ EXAMPLES = ''' community.general.swdepot: name: unzip state: absent -''' +""" import re diff --git a/plugins/modules/swupd.py b/plugins/modules/swupd.py index 16738c8cb8..9b13a4e658 100644 --- a/plugins/modules/swupd.py +++ b/plugins/modules/swupd.py @@ -10,13 +10,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: swupd short_description: Manages updates and bundles in ClearLinux systems description: - - Manages updates and bundles with the swupd bundle manager, which is used by the - Clear Linux Project for Intel Architecture. + - Manages updates and bundles with the swupd bundle manager, which is used by the Clear Linux Project for Intel Architecture. author: Alberto Murillo (@albertomurillo) extends_documentation_fragment: - community.general.attributes @@ -28,19 +26,16 @@ attributes: options: contenturl: description: - - URL pointing to the contents of available bundles. - If not specified, the contents are retrieved from clearlinux.org. + - URL pointing to the contents of available bundles. If not specified, the contents are retrieved from clearlinux.org. type: str format: description: - - The format suffix for version file downloads. For example [1,2,3,staging,etc]. - If not specified, the default format is used. + - The format suffix for version file downloads. For example [1,2,3,staging,etc]. If not specified, the default format is used. type: str manifest: description: - - The manifest contains information about the bundles at certain version of the OS. - Specify a Manifest version to verify against that version or leave unspecified to - verify against the current version. + - The manifest contains information about the bundles at certain version of the OS. Specify a Manifest version to verify against that version + or leave unspecified to verify against the current version. aliases: [release, version] type: int name: @@ -50,8 +45,7 @@ options: type: str state: description: - - Indicates the desired (I)bundle state. V(present) ensures the bundle - is installed while V(absent) ensures the (I)bundle is not installed. + - Indicates the desired (I)bundle state. V(present) ensures the bundle is installed while V(absent) ensures the (I)bundle is not installed. default: present choices: [present, absent] type: str @@ -73,9 +67,9 @@ options: description: - URL for version string download. type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Update the OS to the latest version community.general.swupd: update: true @@ -98,18 +92,18 @@ EXAMPLES = ''' community.general.swupd: verify: true manifest: 12920 -''' +""" -RETURN = ''' +RETURN = r""" stdout: - description: stdout of swupd + description: C(stdout) of C(swupd). returned: always type: str stderr: - description: stderr of swupd + description: C(stderr) of C(swupd). returned: always type: str -''' +""" import os from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/syslogger.py b/plugins/modules/syslogger.py index 3a7abf4fbe..ca9aebfcfc 100644 --- a/plugins/modules/syslogger.py +++ b/plugins/modules/syslogger.py @@ -7,55 +7,53 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: syslogger short_description: Log messages in the syslog description: - - Uses syslog to add log entries to the host. + - Uses syslog to add log entries to the host. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - msg: - type: str - description: - - This is the message to place in syslog. - required: true - priority: - type: str - description: - - Set the log priority. - choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ] - default: "info" - facility: - type: str - description: - - Set the log facility. - choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news", - "uucp", "cron", "syslog", "local0", "local1", "local2", - "local3", "local4", "local5", "local6", "local7" ] - default: "daemon" - log_pid: - description: - - Log the PID in brackets. - type: bool - default: false - ident: - description: - - Specify the name of application name which is sending the log to syslog. - type: str - default: 'ansible_syslogger' - version_added: '0.2.0' + msg: + type: str + description: + - This is the message to place in syslog. + required: true + priority: + type: str + description: + - Set the log priority. + choices: ["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"] + default: "info" + facility: + type: str + description: + - Set the log facility. + choices: ["kern", "user", "mail", "daemon", "auth", "lpr", "news", "uucp", "cron", "syslog", "local0", "local1", "local2", "local3", "local4", + "local5", "local6", "local7"] + default: "daemon" + log_pid: + description: + - Log the PID in brackets. + type: bool + default: false + ident: + description: + - Specify the name of application name which is sending the log to syslog. + type: str + default: 'ansible_syslogger' + version_added: '0.2.0' author: - - Tim Rightnour (@garbled1) -''' + - Tim Rightnour (@garbled1) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Simple Usage community.general.syslogger: msg: "I will end up as daemon.info" @@ -72,36 +70,36 @@ EXAMPLES = r''' ident: "MyApp" msg: "I want to believe" priority: "alert" -''' +""" -RETURN = r''' +RETURN = r""" ident: - description: Name of application sending the message to log + description: Name of application sending the message to log. returned: always type: str sample: "ansible_syslogger" version_added: '0.2.0' priority: - description: Priority level + description: Priority level. returned: always type: str sample: "daemon" facility: - description: Syslog facility + description: Syslog facility. returned: always type: str sample: "info" log_pid: - description: Log PID status + description: Log PID status. returned: always type: bool sample: true msg: - description: Message sent to syslog + description: Message sent to syslog. returned: always type: str sample: "Hello from Ansible" -''' +""" import syslog import traceback diff --git a/plugins/modules/syspatch.py b/plugins/modules/syspatch.py index c90ef0d227..3cedc220f7 100644 --- a/plugins/modules/syspatch.py +++ b/plugins/modules/syspatch.py @@ -8,37 +8,35 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: syspatch short_description: Manage OpenBSD system patches description: - - "Manage OpenBSD system patches using syspatch." - + - Manage OpenBSD system patches using syspatch. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - revert: - description: - - Revert system patches. - type: str - choices: [ all, one ] + revert: + description: + - Revert system patches. + type: str + choices: [all, one] author: - - Andrew Klaus (@precurse) -''' + - Andrew Klaus (@precurse) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Apply all available system patches community.general.syspatch: @@ -58,20 +56,20 @@ EXAMPLES = ''' - name: Reboot if patch requires it ansible.builtin.reboot: when: syspatch.reboot_needed -''' +""" -RETURN = r''' +RETURN = r""" rc: - description: The command return code (0 means success) + description: The command return code (0 means success). returned: always type: int stdout: - description: syspatch standard output. + description: C(syspatch) standard output. returned: always type: str sample: "001_rip6cksum" stderr: - description: syspatch standard error. + description: C(syspatch) standard error. returned: always type: str sample: "syspatch: need root privileges" @@ -80,7 +78,7 @@ reboot_needed: returned: always type: bool sample: true -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/sysrc.py b/plugins/modules/sysrc.py index 6780975d4f..d93bccd620 100644 --- a/plugins/modules/sysrc.py +++ b/plugins/modules/sysrc.py @@ -9,64 +9,62 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - - David Lundgren (@dlundgren) + - David Lundgren (@dlundgren) module: sysrc short_description: Manage FreeBSD using sysrc version_added: '2.0.0' description: - - Manages C(/etc/rc.conf) for FreeBSD. + - Manages C(/etc/rc.conf) for FreeBSD. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: none + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of variable in C(/etc/rc.conf) to manage. - type: str - required: true - value: - description: - - The value to set when O(state=present). - - The value to add when O(state=value_present). - - The value to remove when O(state=value_absent). - type: str - state: - description: - - Use V(present) to add the variable. - - Use V(absent) to remove the variable. - - Use V(value_present) to add the value to the existing variable. - - Use V(value_absent) to remove the value from the existing variable. - type: str - default: "present" - choices: [ absent, present, value_present, value_absent ] - path: - description: - - Path to file to use instead of V(/etc/rc.conf). - type: str - default: "/etc/rc.conf" - delim: - description: - - Delimiter to be used instead of V(" ") (space). - - Only used when O(state=value_present) or O(state=value_absent). - default: " " - type: str - jail: - description: - - Name or ID of the jail to operate on. - type: str + name: + description: + - Name of variable in C(/etc/rc.conf) to manage. + type: str + required: true + value: + description: + - The value to set when O(state=present). + - The value to add when O(state=value_present). + - The value to remove when O(state=value_absent). + type: str + state: + description: + - Use V(present) to add the variable. + - Use V(absent) to remove the variable. + - Use V(value_present) to add the value to the existing variable. + - Use V(value_absent) to remove the value from the existing variable. + type: str + default: "present" + choices: [absent, present, value_present, value_absent] + path: + description: + - Path to file to use instead of V(/etc/rc.conf). + type: str + default: "/etc/rc.conf" + delim: + description: + - Delimiter to be used instead of V(" ") (space). + - Only used when O(state=value_present) or O(state=value_absent). + default: " " + type: str + jail: + description: + - Name or ID of the jail to operate on. + type: str notes: - The O(name) cannot contain periods as sysrc does not support OID style names. -''' +""" -EXAMPLES = r''' ---- +EXAMPLES = r""" # enable mysql in the /etc/rc.conf - name: Configure mysql pid file community.general.sysrc: @@ -94,15 +92,15 @@ EXAMPLES = r''' name: nginx_enable value: "YES" jail: testjail -''' +""" -RETURN = r''' +RETURN = r""" changed: description: Return changed for sysrc actions. returned: always type: bool sample: true -''' +""" from ansible.module_utils.basic import AnsibleModule import re diff --git a/plugins/modules/sysupgrade.py b/plugins/modules/sysupgrade.py index 639fa345ad..26232cd98d 100644 --- a/plugins/modules/sysupgrade.py +++ b/plugins/modules/sysupgrade.py @@ -8,54 +8,53 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: sysupgrade short_description: Manage OpenBSD system upgrades version_added: 1.1.0 description: - - Manage OpenBSD system upgrades using sysupgrade. + - Manage OpenBSD system upgrades using C(sysupgrade). extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - snapshot: - description: - - Apply the latest snapshot. - - Otherwise release will be applied. - default: false - type: bool - force: - description: - - Force upgrade (for snapshots only). - default: false - type: bool - keep_files: - description: - - Keep the files under /home/_sysupgrade. - - By default, the files will be deleted after the upgrade. - default: false - type: bool - fetch_only: - description: - - Fetch and verify files and create /bsd.upgrade but do not reboot. - - Set to V(false) if you want sysupgrade to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples. - default: true - type: bool - installurl: - description: - - OpenBSD mirror top-level URL for fetching an upgrade. - - By default, the mirror URL is pulled from /etc/installurl. - type: str + snapshot: + description: + - Apply the latest snapshot. + - Otherwise release will be applied. + default: false + type: bool + force: + description: + - Force upgrade (for snapshots only). + default: false + type: bool + keep_files: + description: + - Keep the files under C(/home/_sysupgrade). + - By default, the files will be deleted after the upgrade. + default: false + type: bool + fetch_only: + description: + - Fetch and verify files and create C(/bsd.upgrade) but do not reboot. + - Set to V(false) if you want C(sysupgrade) to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples. + default: true + type: bool + installurl: + description: + - OpenBSD mirror top-level URL for fetching an upgrade. + - By default, the mirror URL is pulled from C(/etc/installurl). + type: str author: - - Andrew Klaus (@precurse) -''' + - Andrew Klaus (@precurse) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Upgrade to latest release community.general.sysupgrade: register: sysupgrade @@ -77,9 +76,9 @@ EXAMPLES = r''' community.general.sysupgrade: fetch_only: false ignore_errors: true -''' +""" -RETURN = r''' +RETURN = r""" rc: description: The command return code (0 means success). returned: always @@ -93,7 +92,7 @@ stderr: returned: always type: str sample: "sysupgrade: need root privileges" -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/taiga_issue.py b/plugins/modules/taiga_issue.py index e80ff43b89..b66db29dba 100644 --- a/plugins/modules/taiga_issue.py +++ b/plugins/modules/taiga_issue.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: taiga_issue short_description: Creates/deletes an issue in a Taiga Project Management Platform description: @@ -89,10 +88,11 @@ options: author: Alejandro Guirao (@lekum) requirements: [python-taiga] notes: -- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD -''' + - The authentication is achieved either by the environment variable E(TAIGA_TOKEN) or by the pair + of environment variables E(TAIGA_USERNAME) and E(TAIGA_PASSWORD). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an issue in the my hosted Taiga environment and attach an error log community.general.taiga_issue: taiga_host: https://mytaigahost.example.com @@ -117,9 +117,9 @@ EXAMPLES = ''' subject: An error has been found issue_type: Bug state: absent -''' +""" -RETURN = '''# ''' +RETURN = """# """ import traceback from os import getenv diff --git a/plugins/modules/telegram.py b/plugins/modules/telegram.py index 963c66353f..c2fee153ff 100644 --- a/plugins/modules/telegram.py +++ b/plugins/modules/telegram.py @@ -9,22 +9,21 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: telegram author: - - "Artem Feofanov (@tyouxa)" - - "Nikolai Lomov (@lomserman)" + - "Artem Feofanov (@tyouxa)" + - "Nikolai Lomov (@lomserman)" short_description: Send notifications via telegram description: - - Send notifications via telegram bot, to a verified group or user. - - Also, the user may try to use any other telegram bot API method, if you specify O(api_method) argument. + - Send notifications via telegram bot, to a verified group or user. + - Also, the user may try to use any other telegram bot API method, if you specify O(api_method) argument. notes: - - You will require a telegram account and create telegram bot to use this module. + - You will require a telegram account and create telegram bot to use this module. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -49,11 +48,9 @@ options: - Any parameters for the method. - For reference to default method, V(SendMessage), see U(https://core.telegram.org/bots/api#sendmessage). version_added: 2.0.0 +""" -''' - -EXAMPLES = """ - +EXAMPLES = r""" - name: Send notify to Telegram community.general.telegram: token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX' @@ -75,15 +72,14 @@ EXAMPLES = """ message_id: '{{ saved_msg_id }}' """ -RETURN = """ - +RETURN = r""" msg: - description: The message you attempted to send + description: The message you attempted to send. returned: success type: str sample: "Ansible task finished" telegram_error: - description: Error message gotten from Telegram API + description: Error message gotten from Telegram API. returned: failure type: str sample: "Bad Request: message text is empty" diff --git a/plugins/modules/terraform.py b/plugins/modules/terraform.py index 5906657c66..cf2d07fc71 100644 --- a/plugins/modules/terraform.py +++ b/plugins/modules/terraform.py @@ -8,13 +8,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: terraform short_description: Manages a Terraform deployment (and plans) description: - - Provides support for deploying resources with Terraform and pulling - resource information back into Ansible. + - Provides support for deploying resources with Terraform and pulling resource information back into Ansible. extends_documentation_fragment: - community.general.attributes attributes: @@ -27,18 +25,16 @@ options: state: choices: ['planned', 'present', 'absent'] description: - - Goal state of given stage/project + - Goal state of given stage/project. type: str default: present binary_path: description: - - The path of a terraform binary to use, relative to the 'service_path' - unless you supply an absolute path. + - The path of a C(terraform) binary to use, relative to the 'service_path' unless you supply an absolute path. type: path project_path: description: - - The path to the root of the Terraform directory with the - vars.tf/main.tf/etc to use. + - The path to the root of the Terraform directory with the C(vars.tf)/C(main.tf)/etc to use. type: path required: true plugin_paths: @@ -48,88 +44,80 @@ options: - When set, the plugin discovery and auto-download behavior of Terraform is disabled. - The directory structure in the plugin path can be tricky. The Terraform docs U(https://learn.hashicorp.com/tutorials/terraform/automate-terraform#pre-installed-plugins) - show a simple directory of files, but actually, the directory structure - has to follow the same structure you would see if Terraform auto-downloaded the plugins. - See the examples below for a tree output of an example plugin directory. + show a simple directory of files, but actually, the directory structure has to follow the same structure you would see if Terraform auto-downloaded + the plugins. See the examples below for a tree output of an example plugin directory. type: list elements: path version_added: 3.0.0 workspace: description: - - The terraform workspace to work with. This sets the E(TF_WORKSPACE) environmental variable - that is used to override workspace selection. For more information about workspaces - have a look at U(https://developer.hashicorp.com/terraform/language/state/workspaces). + - The terraform workspace to work with. This sets the E(TF_WORKSPACE) environmental variable that is used to override workspace selection. + For more information about workspaces have a look at U(https://developer.hashicorp.com/terraform/language/state/workspaces). type: str default: default purge_workspace: description: - - Only works with state = absent + - Only works with state = absent. - If true, the workspace will be deleted after the "terraform destroy" action. - The 'default' workspace will not be deleted. default: false type: bool plan_file: description: - - The path to an existing Terraform plan file to apply. If this is not - specified, Ansible will build a new TF plan and execute it. - Note that this option is required if 'state' has the 'planned' value. + - The path to an existing Terraform plan file to apply. If this is not specified, Ansible will build a new TF plan and execute it. Note + that this option is required if 'state' has the 'planned' value. type: path state_file: description: - - The path to an existing Terraform state file to use when building plan. - If this is not specified, the default C(terraform.tfstate) will be used. + - The path to an existing Terraform state file to use when building plan. If this is not specified, the default C(terraform.tfstate) will + be used. - This option is ignored when plan is specified. type: path variables_files: description: - - The path to a variables file for Terraform to fill into the TF - configurations. This can accept a list of paths to multiple variables files. + - The path to a variables file for Terraform to fill into the TF configurations. This can accept a list of paths to multiple variables files. type: list elements: path - aliases: [ 'variables_file' ] + aliases: ['variables_file'] variables: description: - - A group of key-values pairs to override template variables or those in variables files. - By default, only string and number values are allowed, which are passed on unquoted. + - A group of key-values pairs to override template variables or those in variables files. By default, only string and number values are + allowed, which are passed on unquoted. - Support complex variable structures (lists, dictionaries, numbers, and booleans) to reflect terraform variable syntax when O(complex_vars=true). - Ansible integers or floats are mapped to terraform numbers. - Ansible strings are mapped to terraform strings. - Ansible dictionaries are mapped to terraform objects. - Ansible lists are mapped to terraform lists. - Ansible booleans are mapped to terraform booleans. - - "B(Note) passwords passed as variables will be visible in the log output. Make sure to use C(no_log=true) in production!" + - B(Note) passwords passed as variables will be visible in the log output. Make sure to use C(no_log=true) in production!. type: dict complex_vars: description: - Enable/disable capability to handle complex variable structures for C(terraform). - - If V(true) the O(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform). - Strings that are passed are correctly quoted. + - If V(true) the O(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform). Strings that are passed are correctly + quoted. - When disabled, supports only simple variables (strings, integers, and floats), and passes them on unquoted. type: bool default: false version_added: 5.7.0 targets: description: - - A list of specific resources to target in this plan/application. The - resources selected here will also auto-include any dependencies. + - A list of specific resources to target in this plan/application. The resources selected here will also auto-include any dependencies. type: list elements: str default: [] lock: description: - - Enable statefile locking, if you use a service that accepts locks (such - as S3+DynamoDB) to store your statefile. + - Enable statefile locking, if you use a service that accepts locks (such as S3+DynamoDB) to store your statefile. type: bool default: true lock_timeout: description: - - How long to maintain the lock on the statefile, if you use a service - that accepts locks (such as S3+DynamoDB). + - How long to maintain the lock on the statefile, if you use a service that accepts locks (such as S3+DynamoDB). type: int force_init: description: - - To avoid duplicating infra, if a state file can't be found this will - force a C(terraform init). Generally, this should be turned off unless + - To avoid duplicating infra, if a state file can't be found this will force a C(terraform init). Generally, this should be turned off unless you intend to provision an entirely new Terraform deployment. default: false type: bool @@ -145,8 +133,8 @@ options: type: dict backend_config_files: description: - - The path to a configuration file to provide at init state to the -backend-config parameter. - This can accept a list of paths to multiple configuration files. + - The path to a configuration file to provide at init state to the -backend-config parameter. This can accept a list of paths to multiple + configuration files. type: list elements: path version_added: '0.2.0' @@ -164,8 +152,8 @@ options: version_added: '1.3.0' check_destroy: description: - - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions, - but not "destroy and re-create" actions. This option is ignored when O(state=absent). + - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions, but not "destroy and re-create" actions. This + option is ignored when O(state=absent). type: bool default: false version_added: '3.3.0' @@ -175,12 +163,12 @@ options: type: int version_added: '3.8.0' notes: - - To just run a C(terraform plan), use check mode. -requirements: [ "terraform" ] + - To just run a C(terraform plan), use check mode. +requirements: ["terraform"] author: "Ryan Scott Brown (@ryansb)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Basic deploy of a service community.general.terraform: project_path: '{{ project_dir }}' @@ -248,7 +236,7 @@ EXAMPLES = """ # └── terraform-provider-vsphere_v1.26.0_x4 """ -RETURN = """ +RETURN = r""" outputs: type: complex description: A dictionary of all the TF outputs by their assigned name. Use RV(ignore:outputs.MyOutputName.value) to access the value. @@ -258,18 +246,18 @@ outputs: sensitive: type: bool returned: always - description: Whether Terraform has marked this value as sensitive + description: Whether Terraform has marked this value as sensitive. type: type: str returned: always - description: The type of the value (string, int, etc) + description: The type of the value (string, int, etc). value: type: str returned: always - description: The value of the output as interpolated by Terraform + description: The value of the output as interpolated by Terraform. stdout: type: str - description: Full C(terraform) command stdout, in case you want to display it or examine the event log + description: Full C(terraform) command stdout, in case you want to display it or examine the event log. returned: always sample: '' command: diff --git a/plugins/modules/timezone.py b/plugins/modules/timezone.py index e027290e86..78df0cd45f 100644 --- a/plugins/modules/timezone.py +++ b/plugins/modules/timezone.py @@ -8,20 +8,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: timezone short_description: Configure timezone setting description: - - This module configures the timezone setting, both of the system clock and of the hardware clock. - If you want to set up the NTP, use M(ansible.builtin.service) module. + - This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up the NTP, use + M(ansible.builtin.service) module. - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time. - - Several different tools are used depending on the OS/Distribution involved. - For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock). - On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified. - On AIX, C(chtz) is used. - - Make sure that the zoneinfo files are installed with the appropriate OS package, like C(tzdata) (usually always installed, - when not using a minimal installation like Alpine Linux). + - Several different tools are used depending on the OS/Distribution involved. For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) + or C(/etc/timezone) and C(hwclock). On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified. On + AIX, C(chtz) is used. + - Make sure that the zoneinfo files are installed with the appropriate OS package, like C(tzdata) (usually always installed, when not using + a minimal installation like Alpine Linux). - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails. extends_documentation_fragment: - community.general.attributes @@ -35,49 +33,49 @@ options: description: - Name of the timezone for the system clock. - Default is to keep current setting. - - B(At least one of name and hwclock are required.) + - B(At least one) of O(name) and O(hwclock) are required. type: str hwclock: description: - Whether the hardware clock is in UTC or in local timezone. - Default is to keep current setting. - - Note that this option is recommended not to change and may fail - to configure, especially on virtual environments such as AWS. - - B(At least one of name and hwclock are required.) - - I(Only used on Linux.) + - Note that this option is recommended not to change and may fail to configure, especially on virtual environments such as AWS. + - B(At least one) of O(name) and O(hwclock) are required. + - I(Only used on Linux). type: str - aliases: [ rtc ] - choices: [ local, UTC ] + aliases: [rtc] + choices: [local, UTC] notes: - - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone - - On AIX only Olson/tz database timezones are usable (POSIX is not supported). - An OS reboot is also required on AIX for the new timezone setting to take effect. - Note that AIX 6.1+ is needed (OS level 61 or newer). + - On Ubuntu 24.04 the C(util-linux-extra) package is required to provide the C(hwclock) command. + - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone. + - On AIX only Olson/tz database timezones are usable (POSIX is not supported). An OS reboot is also required on AIX for the new timezone setting + to take effect. Note that AIX 6.1+ is needed (OS level 61 or newer). author: - Shinichi TAMURA (@tmshn) - Jasper Lievisse Adriaanse (@jasperla) - Indrajit Raychaudhuri (@indrajitr) -''' +""" -RETURN = r''' +RETURN = r""" diff: description: The differences about the given arguments. returned: success type: complex contains: before: - description: The values before change + description: The values before change. type: dict after: - description: The values after change + description: The values after change. type: dict -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set timezone to Asia/Tokyo + become: true community.general.timezone: name: Asia/Tokyo -''' +""" import errno import os diff --git a/plugins/modules/twilio.py b/plugins/modules/twilio.py index 270320c465..dc397d4831 100644 --- a/plugins/modules/twilio.py +++ b/plugins/modules/twilio.py @@ -9,20 +9,17 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: twilio short_description: Sends a text message to a mobile phone through Twilio description: - - Sends a text message to a phone number through the Twilio messaging API. + - Sends a text message to a phone number through the Twilio messaging API. notes: - - This module is non-idempotent because it sends an email through the - external API. It is idempotent only in the case that the module fails. - - Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need a Twilio account with - a purchased or verified phone number to send the text message. + - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you'll need a Twilio account with a purchased + or verified phone number to send the text message. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -50,7 +47,7 @@ options: description: - One or more phone numbers to send the text message to, format C(+15551112222). required: true - aliases: [ to_number ] + aliases: [to_number] from_number: type: str description: @@ -59,14 +56,13 @@ options: media_url: type: str description: - - A URL with a picture, video or sound clip to send with an MMS - (multimedia message) instead of a plain SMS. + - A URL with a picture, video or sound clip to send with an MMS (multimedia message) instead of a plain SMS. required: false author: "Matt Makai (@makaimc)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # send an SMS about the build status to (555) 303 5681 # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account @@ -75,8 +71,8 @@ EXAMPLES = ''' msg: All servers with webserver role are now configured. account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15552014545 - to_number: +15553035681 + from_number: "+15552014545" + to_number: "+15553035681" delegate_to: localhost # send an SMS to multiple phone numbers about the deployment @@ -87,11 +83,11 @@ EXAMPLES = ''' msg: This server configuration is now complete. account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15553258899 + from_number: "+15553258899" to_numbers: - - +15551113232 - - +12025551235 - - +19735559010 + - "+15551113232" + - "+12025551235" + - "+19735559010" delegate_to: localhost # send an MMS to a single recipient with an update on the deployment @@ -103,11 +99,11 @@ EXAMPLES = ''' msg: Deployment complete! account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX - from_number: +15552014545 - to_number: +15553035681 + from_number: "+15552014545" + to_number: "+15553035681" media_url: https://demo.twilio.com/logo.png delegate_to: localhost -''' +""" # ======================================= # twilio module support methods diff --git a/plugins/modules/typetalk.py b/plugins/modules/typetalk.py index ddf9f35605..6364cdc45b 100644 --- a/plugins/modules/typetalk.py +++ b/plugins/modules/typetalk.py @@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: typetalk short_description: Send a message to typetalk description: - - Send a message to typetalk using typetalk API + - Send a message to typetalk using typetalk API. extends_documentation_fragment: - community.general.attributes attributes: @@ -26,35 +25,35 @@ options: client_id: type: str description: - - OAuth2 client ID + - OAuth2 client ID. required: true client_secret: type: str description: - - OAuth2 client secret + - OAuth2 client secret. required: true topic: type: int description: - - topic id to post message + - Topic id to post message. required: true msg: type: str description: - - message body + - Message body. required: true -requirements: [ json ] +requirements: [json] author: "Takashi Someda (@tksmd)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a message to typetalk community.general.typetalk: client_id: 12345 client_secret: 12345 topic: 1 msg: install completed -''' +""" import json diff --git a/plugins/modules/udm_dns_record.py b/plugins/modules/udm_dns_record.py index 99fe10c63e..a87ce5fede 100644 --- a/plugins/modules/udm_dns_record.py +++ b/plugins/modules/udm_dns_record.py @@ -10,63 +10,60 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: udm_dns_record author: - - Tobias Rüetschi (@keachi) + - Tobias Rüetschi (@keachi) short_description: Manage dns entries on a univention corporate server description: - - "This module allows to manage dns records on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." + - This module allows to manage dns records on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object + or edit it. requirements: - - Univention - - ipaddress (for O(type=ptr_record)) + - Univention + - ipaddress (for O(type=ptr_record)) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: partial + check_mode: + support: full + diff_mode: + support: partial options: - state: - type: str - default: "present" - choices: [ present, absent ] - description: - - Whether the dns record is present or not. - name: - type: str - required: true - description: - - "Name of the record, this is also the DNS record. E.g. www for - www.example.com." - - For PTR records this has to be the IP address. - zone: - type: str - required: true - description: - - Corresponding DNS zone for this record, e.g. example.com. - - For PTR records this has to be the full reverse zone (for example V(1.1.192.in-addr.arpa)). - type: - type: str - required: true - description: - - "Define the record type. V(host_record) is a A or AAAA record, - V(alias) is a CNAME, V(ptr_record) is a PTR record, V(srv_record) - is a SRV record and V(txt_record) is a TXT record." - - "The available choices are: V(host_record), V(alias), V(ptr_record), V(srv_record), V(txt_record)." - data: - type: dict - default: {} - description: - - "Additional data for this record, for example V({'a': '192.0.2.1'})." - - Required if O(state=present). -''' + state: + type: str + default: "present" + choices: [present, absent] + description: + - Whether the dns record is present or not. + name: + type: str + required: true + description: + - Name of the record, this is also the DNS record. For example V(www) for www.example.com. + - For PTR records this has to be the IP address. + zone: + type: str + required: true + description: + - Corresponding DNS zone for this record, for example V(example.com). + - For PTR records this has to be the full reverse zone (for example V(1.1.192.in-addr.arpa)). + type: + type: str + required: true + description: + - Define the record type. V(host_record) is a A or AAAA record, V(alias) is a CNAME, V(ptr_record) is a PTR record, V(srv_record) is a SRV + record and V(txt_record) is a TXT record. + - 'The available choices are: V(host_record), V(alias), V(ptr_record), V(srv_record), V(txt_record).' + data: + type: dict + default: {} + description: + - "Additional data for this record, for example V({'a': '192.0.2.1'})." + - Required if O(state=present). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a DNS record on a UCS community.general.udm_dns_record: name: www @@ -74,8 +71,8 @@ EXAMPLES = ''' type: host_record data: a: - - 192.0.2.1 - - 2001:0db8::42 + - 192.0.2.1 + - 2001:0db8::42 - name: Create a DNS v4 PTR record on a UCS community.general.udm_dns_record: @@ -92,10 +89,10 @@ EXAMPLES = ''' type: ptr_record data: ptr_record: "www.example.com." -''' +""" -RETURN = '''#''' +RETURN = """#""" from ansible.module_utils.basic import AnsibleModule @@ -196,8 +193,7 @@ def main(): else: obj['name'] = name - for k, v in data.items(): - obj[k] = v + obj.update(data) diff = obj.diff() changed = obj.diff() != [] if not module.check_mode: diff --git a/plugins/modules/udm_dns_zone.py b/plugins/modules/udm_dns_zone.py index 387d5cc45b..765f996aba 100644 --- a/plugins/modules/udm_dns_zone.py +++ b/plugins/modules/udm_dns_zone.py @@ -10,91 +10,87 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: udm_dns_zone author: - - Tobias Rüetschi (@keachi) + - Tobias Rüetschi (@keachi) short_description: Manage dns zones on a univention corporate server description: - - "This module allows to manage dns zones on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." + - This module allows to manage dns zones on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object or + edit it. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: partial + check_mode: + support: full + diff_mode: + support: partial options: - state: - type: str - default: "present" - choices: [ present, absent ] - description: - - Whether the dns zone is present or not. - type: - type: str - required: true - description: - - Define if the zone is a forward or reverse DNS zone. - - "The available choices are: V(forward_zone), V(reverse_zone)." - zone: - type: str - required: true - description: - - DNS zone name, for example V(example.com). - aliases: [name] - nameserver: - type: list - elements: str - default: [] - description: - - List of appropriate name servers. Required if O(state=present). - interfaces: - type: list - elements: str - default: [] - description: - - List of interface IP addresses, on which the server should - response this zone. Required if O(state=present). - - refresh: - type: int - default: 3600 - description: - - Interval before the zone should be refreshed. - retry: - type: int - default: 1800 - description: - - Interval that should elapse before a failed refresh should be retried. - expire: - type: int - default: 604800 - description: - - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative. - ttl: - type: int - default: 600 - description: - - Minimum TTL field that should be exported with any RR from this zone. - - contact: - type: str - default: '' - description: - - Contact person in the SOA record. - mx: - type: list - elements: str - default: [] - description: - - List of MX servers. (Must declared as A or AAAA records). -''' + state: + type: str + default: "present" + choices: [present, absent] + description: + - Whether the DNS zone is present or not. + type: + type: str + required: true + description: + - Define if the zone is a forward or reverse DNS zone. + - 'The available choices are: V(forward_zone), V(reverse_zone).' + zone: + type: str + required: true + description: + - DNS zone name, for example V(example.com). + aliases: [name] + nameserver: + type: list + elements: str + default: [] + description: + - List of appropriate name servers. Required if O(state=present). + interfaces: + type: list + elements: str + default: [] + description: + - List of interface IP addresses, on which the server should response this zone. Required if O(state=present). + refresh: + type: int + default: 3600 + description: + - Interval before the zone should be refreshed. + retry: + type: int + default: 1800 + description: + - Interval that should elapse before a failed refresh should be retried. + expire: + type: int + default: 604800 + description: + - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative. + ttl: + type: int + default: 600 + description: + - Minimum TTL field that should be exported with any RR from this zone. + contact: + type: str + default: '' + description: + - Contact person in the SOA record. + mx: + type: list + elements: str + default: [] + description: + - List of MX servers. (Must declared as A or AAAA records). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a DNS zone on a UCS community.general.udm_dns_zone: zone: example.com @@ -103,10 +99,10 @@ EXAMPLES = ''' - ucs.example.com interfaces: - 192.0.2.1 -''' +""" -RETURN = '''# ''' +RETURN = """# """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.univention_umc import ( diff --git a/plugins/modules/udm_group.py b/plugins/modules/udm_group.py index 5fe2422f8b..238b0182ed 100644 --- a/plugins/modules/udm_group.py +++ b/plugins/modules/udm_group.py @@ -10,63 +10,61 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: udm_group author: - - Tobias Rüetschi (@keachi) + - Tobias Rüetschi (@keachi) short_description: Manage of the posix group description: - - "This module allows to manage user groups on a univention corporate server (UCS). - It uses the python API of the UCS to create a new object or edit it." + - This module allows to manage user groups on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object + or edit it. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: partial + check_mode: + support: full + diff_mode: + support: partial options: - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the group is present or not. - type: str - name: - required: true - description: - - Name of the posix group. - type: str + state: + required: false + default: "present" + choices: [present, absent] description: - required: false - description: - - Group description. - type: str - position: - required: false - description: - - define the whole ldap position of the group, e.g. - C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com). - type: str - default: '' - ou: - required: false - description: - - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com). - type: str - default: '' - subpath: - required: false - description: - - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups). - type: str - default: "cn=groups" -''' + - Whether the group is present or not. + type: str + name: + required: true + description: + - Name of the POSIX group. + type: str + description: + required: false + description: + - Group description. + type: str + position: + required: false + description: + - Define the whole LDAP position of the group, for example V(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com). + type: str + default: '' + ou: + required: false + description: + - LDAP OU, for example V(school) for LDAP OU V(ou=school,dc=example,dc=com). + type: str + default: '' + subpath: + required: false + description: + - Subpath inside the OU, for example V(cn=classes,cn=students,cn=groups). + type: str + default: "cn=groups" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a POSIX group community.general.udm_group: name: g123m-1A @@ -84,10 +82,10 @@ EXAMPLES = ''' community.general.udm_group: name: g123m-1A position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com' -''' +""" -RETURN = '''# ''' +RETURN = """# """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.univention_umc import ( diff --git a/plugins/modules/udm_share.py b/plugins/modules/udm_share.py index 8ae243b3de..3489607b09 100644 --- a/plugins/modules/udm_share.py +++ b/plugins/modules/udm_share.py @@ -10,339 +10,337 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: udm_share author: - - Tobias Rüetschi (@keachi) + - Tobias Rüetschi (@keachi) short_description: Manage samba shares on a univention corporate server description: - - "This module allows to manage samba shares on a univention corporate - server (UCS). - It uses the python API of the UCS to create a new object or edit it." + - This module allows to manage samba shares on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object + or edit it. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: partial + check_mode: + support: full + diff_mode: + support: partial options: - state: - default: "present" - choices: [ present, absent ] - description: - - Whether the share is present or not. - type: str - name: - required: true - description: - - Name - type: str - host: - required: false - description: - - Host FQDN (server which provides the share), for example V({{ ansible_fqdn }}). Required if O(state=present). - type: str - path: - required: false - description: - - Directory on the providing server, for example V(/home). Required if O(state=present). - type: path - sambaName: - required: false - description: - - Windows name. Required if O(state=present). - type: str - aliases: [ samba_name ] - ou: - required: true - description: - - Organisational unit, inside the LDAP Base DN. - type: str - owner: - default: '0' - description: - - Directory owner of the share's root directory. - type: str - group: - default: '0' - description: - - Directory owner group of the share's root directory. - type: str - directorymode: - default: '00755' - description: - - Permissions for the share's root directory. - type: str - root_squash: - default: true - description: - - Modify user ID for root user (root squashing). - type: bool - subtree_checking: - default: true - description: - - Subtree checking. - type: bool - sync: - default: 'sync' - description: - - NFS synchronisation. - type: str - writeable: - default: true - description: - - NFS write access. - type: bool - sambaBlockSize: - description: - - Blocking size. - type: str - aliases: [ samba_block_size ] - sambaBlockingLocks: - default: true - description: - - Blocking locks. - type: bool - aliases: [ samba_blocking_locks ] - sambaBrowseable: - description: - - Show in Windows network environment. - type: bool - default: true - aliases: [ samba_browsable ] - sambaCreateMode: - default: '0744' - description: - - File mode. - type: str - aliases: [ samba_create_mode ] - sambaCscPolicy: - default: 'manual' - description: - - Client-side caching policy. - type: str - aliases: [ samba_csc_policy ] - sambaCustomSettings: - default: [] - description: - - Option name in smb.conf and its value. - type: list - elements: dict - aliases: [ samba_custom_settings ] - sambaDirectoryMode: - default: '0755' - description: - - Directory mode. - type: str - aliases: [ samba_directory_mode ] - sambaDirectorySecurityMode: - default: '0777' - description: - - Directory security mode. - type: str - aliases: [ samba_directory_security_mode ] - sambaDosFilemode: - default: false - description: - - Users with write access may modify permissions. - type: bool - aliases: [ samba_dos_filemode ] - sambaFakeOplocks: - default: false - description: - - Fake oplocks. - type: bool - aliases: [ samba_fake_oplocks ] - sambaForceCreateMode: - default: false - description: - - Force file mode. - type: bool - aliases: [ samba_force_create_mode ] - sambaForceDirectoryMode: - default: false - description: - - Force directory mode. - type: bool - aliases: [ samba_force_directory_mode ] - sambaForceDirectorySecurityMode: - default: false - description: - - Force directory security mode. - type: bool - aliases: [ samba_force_directory_security_mode ] - sambaForceGroup: - description: - - Force group. - type: str - aliases: [ samba_force_group ] - sambaForceSecurityMode: - default: false - description: - - Force security mode. - type: bool - aliases: [ samba_force_security_mode ] - sambaForceUser: - description: - - Force user. - type: str - aliases: [ samba_force_user ] - sambaHideFiles: - description: - - Hide files. - type: str - aliases: [ samba_hide_files ] - sambaHideUnreadable: - default: false - description: - - Hide unreadable files/directories. - type: bool - aliases: [ samba_hide_unreadable ] - sambaHostsAllow: - default: [] - description: - - Allowed host/network. - type: list - elements: str - aliases: [ samba_hosts_allow ] - sambaHostsDeny: - default: [] - description: - - Denied host/network. - type: list - elements: str - aliases: [ samba_hosts_deny ] - sambaInheritAcls: - default: true - description: - - Inherit ACLs. - type: bool - aliases: [ samba_inherit_acls ] - sambaInheritOwner: - default: false - description: - - Create files/directories with the owner of the parent directory. - type: bool - aliases: [ samba_inherit_owner ] - sambaInheritPermissions: - default: false - description: - - Create files/directories with permissions of the parent directory. - type: bool - aliases: [ samba_inherit_permissions ] - sambaInvalidUsers: - description: - - Invalid users or groups. - type: str - aliases: [ samba_invalid_users ] - sambaLevel2Oplocks: - default: true - description: - - Level 2 oplocks. - type: bool - aliases: [ samba_level_2_oplocks ] - sambaLocking: - default: true - description: - - Locking. - type: bool - aliases: [ samba_locking ] - sambaMSDFSRoot: - default: false - description: - - MSDFS root. - type: bool - aliases: [ samba_msdfs_root ] - sambaNtAclSupport: - default: true - description: - - NT ACL support. - type: bool - aliases: [ samba_nt_acl_support ] - sambaOplocks: - default: true - description: - - Oplocks. - type: bool - aliases: [ samba_oplocks ] - sambaPostexec: - description: - - Postexec script. - type: str - aliases: [ samba_postexec ] - sambaPreexec: - description: - - Preexec script. - type: str - aliases: [ samba_preexec ] - sambaPublic: - default: false - description: - - Allow anonymous read-only access with a guest user. - type: bool - aliases: [ samba_public ] - sambaSecurityMode: - default: '0777' - description: - - Security mode. - type: str - aliases: [ samba_security_mode ] - sambaStrictLocking: - default: 'Auto' - description: - - Strict locking. - type: str - aliases: [ samba_strict_locking ] - sambaVFSObjects: - description: - - VFS objects. - type: str - aliases: [ samba_vfs_objects ] - sambaValidUsers: - description: - - Valid users or groups. - type: str - aliases: [ samba_valid_users ] - sambaWriteList: - description: - - Restrict write access to these users/groups. - type: str - aliases: [ samba_write_list ] - sambaWriteable: - default: true - description: - - Samba write access. - type: bool - aliases: [ samba_writeable ] - nfs_hosts: - default: [] - description: - - Only allow access for this host, IP address or network. - type: list - elements: str - nfsCustomSettings: - default: [] - description: - - Option name in exports file. - type: list - elements: str - aliases: [ nfs_custom_settings ] -''' + state: + default: "present" + choices: [present, absent] + description: + - Whether the share is present or not. + type: str + name: + required: true + description: + - Name. + type: str + host: + required: false + description: + - Host FQDN (server which provides the share), for example V({{ ansible_fqdn }}). Required if O(state=present). + type: str + path: + required: false + description: + - Directory on the providing server, for example V(/home). Required if O(state=present). + type: path + sambaName: + required: false + description: + - Windows name. Required if O(state=present). + type: str + aliases: [samba_name] + ou: + required: true + description: + - Organisational unit, inside the LDAP Base DN. + type: str + owner: + default: '0' + description: + - Directory owner of the share's root directory. + type: str + group: + default: '0' + description: + - Directory owner group of the share's root directory. + type: str + directorymode: + default: '00755' + description: + - Permissions for the share's root directory. + type: str + root_squash: + default: true + description: + - Modify user ID for root user (root squashing). + type: bool + subtree_checking: + default: true + description: + - Subtree checking. + type: bool + sync: + default: 'sync' + description: + - NFS synchronisation. + type: str + writeable: + default: true + description: + - NFS write access. + type: bool + sambaBlockSize: + description: + - Blocking size. + type: str + aliases: [samba_block_size] + sambaBlockingLocks: + default: true + description: + - Blocking locks. + type: bool + aliases: [samba_blocking_locks] + sambaBrowseable: + description: + - Show in Windows network environment. + type: bool + default: true + aliases: [samba_browsable] + sambaCreateMode: + default: '0744' + description: + - File mode. + type: str + aliases: [samba_create_mode] + sambaCscPolicy: + default: 'manual' + description: + - Client-side caching policy. + type: str + aliases: [samba_csc_policy] + sambaCustomSettings: + default: [] + description: + - Option name in smb.conf and its value. + type: list + elements: dict + aliases: [samba_custom_settings] + sambaDirectoryMode: + default: '0755' + description: + - Directory mode. + type: str + aliases: [samba_directory_mode] + sambaDirectorySecurityMode: + default: '0777' + description: + - Directory security mode. + type: str + aliases: [samba_directory_security_mode] + sambaDosFilemode: + default: false + description: + - Users with write access may modify permissions. + type: bool + aliases: [samba_dos_filemode] + sambaFakeOplocks: + default: false + description: + - Fake oplocks. + type: bool + aliases: [samba_fake_oplocks] + sambaForceCreateMode: + default: false + description: + - Force file mode. + type: bool + aliases: [samba_force_create_mode] + sambaForceDirectoryMode: + default: false + description: + - Force directory mode. + type: bool + aliases: [samba_force_directory_mode] + sambaForceDirectorySecurityMode: + default: false + description: + - Force directory security mode. + type: bool + aliases: [samba_force_directory_security_mode] + sambaForceGroup: + description: + - Force group. + type: str + aliases: [samba_force_group] + sambaForceSecurityMode: + default: false + description: + - Force security mode. + type: bool + aliases: [samba_force_security_mode] + sambaForceUser: + description: + - Force user. + type: str + aliases: [samba_force_user] + sambaHideFiles: + description: + - Hide files. + type: str + aliases: [samba_hide_files] + sambaHideUnreadable: + default: false + description: + - Hide unreadable files/directories. + type: bool + aliases: [samba_hide_unreadable] + sambaHostsAllow: + default: [] + description: + - Allowed host/network. + type: list + elements: str + aliases: [samba_hosts_allow] + sambaHostsDeny: + default: [] + description: + - Denied host/network. + type: list + elements: str + aliases: [samba_hosts_deny] + sambaInheritAcls: + default: true + description: + - Inherit ACLs. + type: bool + aliases: [samba_inherit_acls] + sambaInheritOwner: + default: false + description: + - Create files/directories with the owner of the parent directory. + type: bool + aliases: [samba_inherit_owner] + sambaInheritPermissions: + default: false + description: + - Create files/directories with permissions of the parent directory. + type: bool + aliases: [samba_inherit_permissions] + sambaInvalidUsers: + description: + - Invalid users or groups. + type: str + aliases: [samba_invalid_users] + sambaLevel2Oplocks: + default: true + description: + - Level 2 oplocks. + type: bool + aliases: [samba_level_2_oplocks] + sambaLocking: + default: true + description: + - Locking. + type: bool + aliases: [samba_locking] + sambaMSDFSRoot: + default: false + description: + - MSDFS root. + type: bool + aliases: [samba_msdfs_root] + sambaNtAclSupport: + default: true + description: + - NT ACL support. + type: bool + aliases: [samba_nt_acl_support] + sambaOplocks: + default: true + description: + - Oplocks. + type: bool + aliases: [samba_oplocks] + sambaPostexec: + description: + - Postexec script. + type: str + aliases: [samba_postexec] + sambaPreexec: + description: + - Preexec script. + type: str + aliases: [samba_preexec] + sambaPublic: + default: false + description: + - Allow anonymous read-only access with a guest user. + type: bool + aliases: [samba_public] + sambaSecurityMode: + default: '0777' + description: + - Security mode. + type: str + aliases: [samba_security_mode] + sambaStrictLocking: + default: 'Auto' + description: + - Strict locking. + type: str + aliases: [samba_strict_locking] + sambaVFSObjects: + description: + - VFS objects. + type: str + aliases: [samba_vfs_objects] + sambaValidUsers: + description: + - Valid users or groups. + type: str + aliases: [samba_valid_users] + sambaWriteList: + description: + - Restrict write access to these users/groups. + type: str + aliases: [samba_write_list] + sambaWriteable: + default: true + description: + - Samba write access. + type: bool + aliases: [samba_writeable] + nfs_hosts: + default: [] + description: + - Only allow access for this host, IP address or network. + type: list + elements: str + nfsCustomSettings: + default: [] + description: + - Option name in exports file. + type: list + elements: str + aliases: [nfs_custom_settings] +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a share named home on the server ucs.example.com with the path /home community.general.udm_share: name: home path: /home host: ucs.example.com sambaName: Home -''' +""" -RETURN = '''# ''' +RETURN = """# """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.univention_umc import ( diff --git a/plugins/modules/udm_user.py b/plugins/modules/udm_user.py index dcbf0ec85e..bb431ca75f 100644 --- a/plugins/modules/udm_user.py +++ b/plugins/modules/udm_user.py @@ -10,291 +10,285 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: udm_user author: - - Tobias Rüetschi (@keachi) + - Tobias Rüetschi (@keachi) short_description: Manage posix users on a univention corporate server description: - - "This module allows to manage posix users on a univention corporate - server (UCS). - It uses the python API of the UCS to create a new object or edit it." + - This module allows to manage posix users on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object + or edit it. +notes: + - This module requires the deprecated L(crypt Python module, https://docs.python.org/3.12/library/crypt.html) library which was removed from + Python 3.13. For Python 3.13 or newer, you need to install L(legacycrypt, https://pypi.org/project/legacycrypt/). +requirements: + - legacycrypt (on Python 3.13 or newer) extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: full - diff_mode: - support: partial + check_mode: + support: full + diff_mode: + support: partial options: - state: - default: "present" - choices: [ present, absent ] - description: - - Whether the user is present or not. - type: str - username: - required: true - description: - - User name - aliases: ['name'] - type: str - firstname: - description: - - First name. Required if O(state=present). - type: str - lastname: - description: - - Last name. Required if O(state=present). - type: str - password: - description: - - Password. Required if O(state=present). - type: str - birthday: - description: - - Birthday - type: str - city: - description: - - City of users business address. - type: str - country: - description: - - Country of users business address. - type: str - department_number: - description: - - Department number of users business address. - aliases: [ departmentNumber ] - type: str + state: + default: "present" + choices: [present, absent] description: - description: - - Description (not gecos) - type: str - display_name: - description: - - Display name (not gecos) - aliases: [ displayName ] - type: str - email: - default: [''] - description: - - A list of e-mail addresses. - type: list - elements: str - employee_number: - description: - - Employee number - aliases: [ employeeNumber ] - type: str - employee_type: - description: - - Employee type - aliases: [ employeeType ] - type: str - gecos: - description: - - GECOS - type: str - groups: - default: [] - description: - - "POSIX groups, the LDAP DNs of the groups will be found with the - LDAP filter for each group as $GROUP: - V((&(objectClass=posixGroup\\)(cn=$GROUP\\)\\))." - type: list - elements: str - home_share: - description: - - "Home NFS share. Must be a LDAP DN, e.g. - V(cn=home,cn=shares,ou=school,dc=example,dc=com)." - aliases: [ homeShare ] - type: str - home_share_path: - description: - - Path to home NFS share, inside the homeShare. - aliases: [ homeSharePath ] - type: str - home_telephone_number: - default: [] - description: - - List of private telephone numbers. - aliases: [ homeTelephoneNumber ] - type: list - elements: str - homedrive: - description: - - Windows home drive, for example V("H:"). - type: str - mail_alternative_address: - default: [] - description: - - List of alternative e-mail addresses. - aliases: [ mailAlternativeAddress ] - type: list - elements: str - mail_home_server: - description: - - FQDN of mail server - aliases: [ mailHomeServer ] - type: str - mail_primary_address: - description: - - Primary e-mail address - aliases: [ mailPrimaryAddress ] - type: str - mobile_telephone_number: - default: [] - description: - - Mobile phone number - aliases: [ mobileTelephoneNumber ] - type: list - elements: str - organisation: - description: - - Organisation - aliases: [ organization ] - type: str - overridePWHistory: - type: bool - default: false - description: - - Override password history - aliases: [ override_pw_history ] - overridePWLength: - type: bool - default: false - description: - - Override password check - aliases: [ override_pw_length ] - pager_telephonenumber: - default: [] - description: - - List of pager telephone numbers. - aliases: [ pagerTelephonenumber ] - type: list - elements: str - phone: - description: - - List of telephone numbers. - type: list - elements: str - default: [] - postcode: - description: - - Postal code of users business address. - type: str - primary_group: - description: - - Primary group. This must be the group LDAP DN. - - If not specified, it defaults to V(cn=Domain Users,cn=groups,$LDAP_BASE_DN). - aliases: [ primaryGroup ] - type: str - profilepath: - description: - - Windows profile directory - type: str - pwd_change_next_login: - choices: [ '0', '1' ] - description: - - Change password on next login. - aliases: [ pwdChangeNextLogin ] - type: str - room_number: - description: - - Room number of users business address. - aliases: [ roomNumber ] - type: str - samba_privileges: - description: - - "Samba privilege, like allow printer administration, do domain - join." - aliases: [ sambaPrivileges ] - type: list - elements: str - default: [] - samba_user_workstations: - description: - - Allow the authentication only on this Microsoft Windows host. - aliases: [ sambaUserWorkstations ] - type: list - elements: str - default: [] - sambahome: - description: - - Windows home path, for example V('\\\\$FQDN\\$USERNAME'). - type: str - scriptpath: - description: - - Windows logon script. - type: str - secretary: - default: [] - description: - - A list of superiors as LDAP DNs. - type: list - elements: str - serviceprovider: - default: [''] - description: - - Enable user for the following service providers. - type: list - elements: str - shell: - default: '/bin/bash' - description: - - Login shell - type: str - street: - description: - - Street of users business address. - type: str - title: - description: - - Title, for example V(Prof.). - type: str - unixhome: - description: - - Unix home directory - - If not specified, it defaults to C(/home/$USERNAME). - type: str - userexpiry: - description: - - Account expiry date, for example V(1999-12-31). - - If not specified, it defaults to the current day plus one year. - type: str - position: - default: '' - description: - - "Define the whole position of users object inside the LDAP tree, - for example V(cn=employee,cn=users,ou=school,dc=example,dc=com)." - type: str - update_password: - default: always - choices: [ always, on_create ] - description: - - "V(always) will update passwords if they differ. - V(on_create) will only set the password for newly created users." - type: str - ou: - default: '' - description: - - "Organizational Unit inside the LDAP Base DN, for example V(school) for - LDAP OU C(ou=school,dc=example,dc=com)." - type: str - subpath: - default: 'cn=users' - description: - - "LDAP subpath inside the organizational unit, for example - V(cn=teachers,cn=users) for LDAP container - C(cn=teachers,cn=users,dc=example,dc=com)." - type: str -''' + - Whether the user is present or not. + type: str + username: + required: true + description: + - User name. + aliases: ['name'] + type: str + firstname: + description: + - First name. Required if O(state=present). + type: str + lastname: + description: + - Last name. Required if O(state=present). + type: str + password: + description: + - Password. Required if O(state=present). + type: str + birthday: + description: + - Birthday. + type: str + city: + description: + - City of users business address. + type: str + country: + description: + - Country of users business address. + type: str + department_number: + description: + - Department number of users business address. + aliases: [departmentNumber] + type: str + description: + description: + - Description (not gecos). + type: str + display_name: + description: + - Display name (not gecos). + aliases: [displayName] + type: str + email: + default: [''] + description: + - A list of e-mail addresses. + type: list + elements: str + employee_number: + description: + - Employee number. + aliases: [employeeNumber] + type: str + employee_type: + description: + - Employee type. + aliases: [employeeType] + type: str + gecos: + description: + - GECOS. + type: str + groups: + default: [] + description: + - 'POSIX groups, the LDAP DNs of the groups will be found with the LDAP filter for each group as $GROUP: V((&(objectClass=posixGroup\)(cn=$GROUP\)\)).' + type: list + elements: str + home_share: + description: + - Home NFS share. Must be a LDAP DN, for example V(cn=home,cn=shares,ou=school,dc=example,dc=com). + aliases: [homeShare] + type: str + home_share_path: + description: + - Path to home NFS share, inside the homeShare. + aliases: [homeSharePath] + type: str + home_telephone_number: + default: [] + description: + - List of private telephone numbers. + aliases: [homeTelephoneNumber] + type: list + elements: str + homedrive: + description: + - Windows home drive, for example V("H:"). + type: str + mail_alternative_address: + default: [] + description: + - List of alternative e-mail addresses. + aliases: [mailAlternativeAddress] + type: list + elements: str + mail_home_server: + description: + - FQDN of mail server. + aliases: [mailHomeServer] + type: str + mail_primary_address: + description: + - Primary e-mail address. + aliases: [mailPrimaryAddress] + type: str + mobile_telephone_number: + default: [] + description: + - Mobile phone number. + aliases: [mobileTelephoneNumber] + type: list + elements: str + organisation: + description: + - Organisation. + aliases: [organization] + type: str + overridePWHistory: + type: bool + default: false + description: + - Override password history. + aliases: [override_pw_history] + overridePWLength: + type: bool + default: false + description: + - Override password check. + aliases: [override_pw_length] + pager_telephonenumber: + default: [] + description: + - List of pager telephone numbers. + aliases: [pagerTelephonenumber] + type: list + elements: str + phone: + description: + - List of telephone numbers. + type: list + elements: str + default: [] + postcode: + description: + - Postal code of users business address. + type: str + primary_group: + description: + - Primary group. This must be the group LDAP DN. + - If not specified, it defaults to V(cn=Domain Users,cn=groups,$LDAP_BASE_DN). + aliases: [primaryGroup] + type: str + profilepath: + description: + - Windows profile directory. + type: str + pwd_change_next_login: + choices: ['0', '1'] + description: + - Change password on next login. + aliases: [pwdChangeNextLogin] + type: str + room_number: + description: + - Room number of users business address. + aliases: [roomNumber] + type: str + samba_privileges: + description: + - Samba privilege, like allow printer administration, do domain join. + aliases: [sambaPrivileges] + type: list + elements: str + default: [] + samba_user_workstations: + description: + - Allow the authentication only on this Microsoft Windows host. + aliases: [sambaUserWorkstations] + type: list + elements: str + default: [] + sambahome: + description: + - Windows home path, for example V('\\\\$FQDN\\$USERNAME'). + type: str + scriptpath: + description: + - Windows logon script. + type: str + secretary: + default: [] + description: + - A list of superiors as LDAP DNs. + type: list + elements: str + serviceprovider: + default: [''] + description: + - Enable user for the following service providers. + type: list + elements: str + shell: + default: '/bin/bash' + description: + - Login shell. + type: str + street: + description: + - Street of users business address. + type: str + title: + description: + - Title, for example V(Prof.). + type: str + unixhome: + description: + - Unix home directory. + - If not specified, it defaults to C(/home/$USERNAME). + type: str + userexpiry: + description: + - Account expiry date, for example V(1999-12-31). + - If not specified, it defaults to the current day plus one year. + type: str + position: + default: '' + description: + - Define the whole position of users object inside the LDAP tree, for example V(cn=employee,cn=users,ou=school,dc=example,dc=com). + type: str + update_password: + default: always + choices: [always, on_create] + description: + - V(always) will update passwords if they differ. V(on_create) will only set the password for newly created users. + type: str + ou: + default: '' + description: + - Organizational Unit inside the LDAP Base DN, for example V(school) for LDAP OU C(ou=school,dc=example,dc=com). + type: str + subpath: + default: 'cn=users' + description: + - LDAP subpath inside the organizational unit, for example V(cn=teachers,cn=users) for LDAP container C(cn=teachers,cn=users,dc=example,dc=com). + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a user on a UCS community.general.udm_user: name: FooBar @@ -319,15 +313,15 @@ EXAMPLES = ''' firstname: Foo lastname: Bar position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com' -''' +""" -RETURN = '''# ''' +RETURN = """# """ -import crypt from datetime import date, timedelta +import traceback -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible_collections.community.general.plugins.module_utils.univention_umc import ( umc_module_for_add, umc_module_for_edit, @@ -335,6 +329,26 @@ from ansible_collections.community.general.plugins.module_utils.univention_umc i base_dn, ) +try: + import crypt +except ImportError: + HAS_CRYPT = False + CRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_CRYPT = True + CRYPT_IMPORT_ERROR = None + +try: + import legacycrypt + if not HAS_CRYPT: + crypt = legacycrypt +except ImportError: + HAS_LEGACYCRYPT = False + LEGACYCRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_LEGACYCRYPT = True + LEGACYCRYPT_IMPORT_ERROR = None + def main(): expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d") @@ -451,6 +465,13 @@ def main(): ('state', 'present', ['firstname', 'lastname', 'password']) ]) ) + + if not HAS_CRYPT and not HAS_LEGACYCRYPT: + module.fail_json( + msg=missing_required_lib('crypt (part of standard library up to Python 3.12) or legacycrypt (PyPI)'), + exception=LEGACYCRYPT_IMPORT_ERROR, + ) + username = module.params['username'] position = module.params['position'] ou = module.params['ou'] diff --git a/plugins/modules/ufw.py b/plugins/modules/ufw.py index 5d187793bd..e0d765eeac 100644 --- a/plugins/modules/ufw.py +++ b/plugins/modules/ufw.py @@ -11,22 +11,24 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ufw short_description: Manage firewall with UFW description: - - Manage firewall with UFW. + - Manage firewall with UFW. author: - - Aleksey Ovcharenko (@ovcharenko) - - Jarno Keskikangas (@pyykkis) - - Ahti Kitsik (@ahtik) + - Aleksey Ovcharenko (@ovcharenko) + - Jarno Keskikangas (@pyykkis) + - Ahti Kitsik (@ahtik) notes: - - See C(man ufw) for more examples. + - See C(man ufw) for more examples. + - "B(Warning:) Whilst the module itself can be run using concurrent strategies, C(ufw) does not support concurrency, as firewall rules are meant + to be ordered and parallel executions do not guarantee order. B(Do not use concurrency:) The results are unpredictable and the module may + fail silently if you do." requirements: - - C(ufw) package + - C(ufw) package extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -40,59 +42,52 @@ options: - V(reloaded) reloads firewall. - V(reset) disables and resets firewall to installation defaults. type: str - choices: [ disabled, enabled, reloaded, reset ] + choices: [disabled, enabled, reloaded, reset] default: description: - Change the default policy for incoming or outgoing traffic. type: str - choices: [ allow, deny, reject ] - aliases: [ policy ] + choices: [allow, deny, reject] + aliases: [policy] direction: description: - - Select direction for a rule or default policy command. Mutually - exclusive with O(interface_in) and O(interface_out). + - Select direction for a rule or default policy command. Mutually exclusive with O(interface_in) and O(interface_out). type: str - choices: [ in, incoming, out, outgoing, routed ] + choices: [in, incoming, out, outgoing, routed] logging: description: - Toggles logging. Logged packets use the LOG_KERN syslog facility. type: str - choices: [ 'on', 'off', low, medium, high, full ] + choices: ['on', 'off', low, medium, high, full] insert: description: - Insert the corresponding rule as rule number NUM. - Note that ufw numbers rules starting with 1. - - If O(delete=true) and a value is provided for O(insert), - then O(insert) is ignored. + - If O(delete=true) and a value is provided for O(insert), then O(insert) is ignored. type: int insert_relative_to: description: - Allows to interpret the index in O(insert) relative to a position. - - V(zero) interprets the rule number as an absolute index (i.e. 1 is - the first rule). - - V(first-ipv4) interprets the rule number relative to the index of the - first IPv4 rule, or relative to the position where the first IPv4 rule + - V(zero) interprets the rule number as an absolute index (that is, 1 is the first rule). + - V(first-ipv4) interprets the rule number relative to the index of the first IPv4 rule, or relative to the position where the first IPv4 + rule would be if there is currently none. + - V(last-ipv4) interprets the rule number relative to the index of the last IPv4 rule, or relative to the position where the last IPv4 rule would be if there is currently none. - - V(last-ipv4) interprets the rule number relative to the index of the - last IPv4 rule, or relative to the position where the last IPv4 rule - would be if there is currently none. - - V(first-ipv6) interprets the rule number relative to the index of the - first IPv6 rule, or relative to the position where the first IPv6 rule - would be if there is currently none. - - V(last-ipv6) interprets the rule number relative to the index of the - last IPv6 rule, or relative to the position where the last IPv6 rule + - V(first-ipv6) interprets the rule number relative to the index of the first IPv6 rule, or relative to the position where the first IPv6 + rule would be if there is currently none. + - V(last-ipv6) interprets the rule number relative to the index of the last IPv6 rule, or relative to the position where the last IPv6 rule would be if there is currently none. type: str - choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ] + choices: [first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero] default: zero rule: description: - - Add firewall rule + - Add firewall rule. type: str - choices: [ allow, deny, limit, reject ] + choices: [allow, deny, limit, reject] log: description: - - Log new connections matched to this rule + - Log new connections matched to this rule. type: bool default: false from_ip: @@ -100,7 +95,7 @@ options: - Source IP address. type: str default: any - aliases: [ from, src ] + aliases: [from, src] from_port: description: - Source port. @@ -110,54 +105,49 @@ options: - Destination IP address. type: str default: any - aliases: [ dest, to] + aliases: [dest, to] to_port: description: - Destination port. type: str - aliases: [ port ] + aliases: [port] proto: description: - TCP/IP protocol. type: str - choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ] - aliases: [ protocol ] + choices: [any, tcp, udp, ipv6, esp, ah, gre, igmp] + aliases: [protocol] name: description: - Use profile located in C(/etc/ufw/applications.d). type: str - aliases: [ app ] + aliases: [app] delete: description: - Delete rule. - - If O(delete=true) and a value is provided for O(insert), - then O(insert) is ignored. + - If O(delete=true) and a value is provided for O(insert), then O(insert) is ignored. type: bool default: false interface: description: - - Specify interface for the rule. The direction (in or out) used - for the interface depends on the value of O(direction). See - O(interface_in) and O(interface_out) for routed rules that needs - to supply both an input and output interface. Mutually - exclusive with O(interface_in) and O(interface_out). + - Specify interface for the rule. The direction (in or out) used for the interface depends on the value of O(direction). See O(interface_in) + and O(interface_out) for routed rules that needs to supply both an input and output interface. Mutually exclusive with O(interface_in) + and O(interface_out). type: str - aliases: [ if ] + aliases: [if] interface_in: description: - - Specify input interface for the rule. This is mutually - exclusive with O(direction) and O(interface). However, it is - compatible with O(interface_out) for routed rules. + - Specify input interface for the rule. This is mutually exclusive with O(direction) and O(interface). However, it is compatible with O(interface_out) + for routed rules. type: str - aliases: [ if_in ] + aliases: [if_in] version_added: '0.2.0' interface_out: description: - - Specify output interface for the rule. This is mutually - exclusive with O(direction) and O(interface). However, it is - compatible with O(interface_in) for routed rules. + - Specify output interface for the rule. This is mutually exclusive with O(direction) and O(interface). However, it is compatible with O(interface_in) + for routed rules. type: str - aliases: [ if_out ] + aliases: [if_out] version_added: '0.2.0' route: description: @@ -168,9 +158,9 @@ options: description: - Add a comment to the rule. Requires UFW version >=0.35. type: str -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Allow everything and enable UFW community.general.ufw: state: enabled @@ -295,7 +285,7 @@ EXAMPLES = r''' route: true src: 192.0.2.0/24 dest: 198.51.100.0/24 -''' +""" import re @@ -446,7 +436,7 @@ def main(): params = module.params - commands = dict((key, params[key]) for key in command_keys if params[key]) + commands = {key: params[key] for key in command_keys if params[key]} # Ensure ufw is available ufw_bin = module.get_bin_path('ufw', True) diff --git a/plugins/modules/uptimerobot.py b/plugins/modules/uptimerobot.py index c1894e90a0..ed6b6431f1 100644 --- a/plugins/modules/uptimerobot.py +++ b/plugins/modules/uptimerobot.py @@ -8,44 +8,43 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: uptimerobot short_description: Pause and start Uptime Robot monitoring description: - - This module will let you start and pause Uptime Robot Monitoring + - This module will let you start and pause Uptime Robot Monitoring. author: "Nate Kingsley (@nate-kingsley)" requirements: - - Valid Uptime Robot API Key + - Valid Uptime Robot API Key extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - state: - type: str - description: - - Define whether or not the monitor should be running or paused. - required: true - choices: [ "started", "paused" ] - monitorid: - type: str - description: - - ID of the monitor to check. - required: true - apikey: - type: str - description: - - Uptime Robot API key. - required: true + state: + type: str + description: + - Define whether or not the monitor should be running or paused. + required: true + choices: ["started", "paused"] + monitorid: + type: str + description: + - ID of the monitor to check. + required: true + apikey: + type: str + description: + - Uptime Robot API key. + required: true notes: - - Support for adding and removing monitors and alert contacts has not yet been implemented. -''' + - Support for adding and removing monitors and alert contacts has not yet been implemented. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Pause the monitor with an ID of 12345 community.general.uptimerobot: monitorid: 12345 @@ -57,7 +56,7 @@ EXAMPLES = ''' monitorid: 12345 apikey: 12345-1234512345 state: started -''' +""" import json diff --git a/plugins/modules/urpmi.py b/plugins/modules/urpmi.py index 75c0af90fc..9c08a22c21 100644 --- a/plugins/modules/urpmi.py +++ b/plugins/modules/urpmi.py @@ -11,12 +11,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: urpmi short_description: Urpmi manager description: - - Manages packages with C(urpmi) (such as for Mageia or Mandriva) + - Manages packages with C(urpmi) (such as for Mageia or Mandriva). extends_documentation_fragment: - community.general.attributes attributes: @@ -29,13 +28,13 @@ options: description: - A list of package names to install, upgrade or remove. required: true - aliases: [ package, pkg ] + aliases: [package, pkg] type: list elements: str state: description: - Indicates the desired package state. - choices: [ absent, present, installed, removed ] + choices: [absent, present, installed, removed] default: present type: str update_cache: @@ -50,21 +49,19 @@ options: default: true force: description: - - Assume "yes" is the answer to any question urpmi has to ask. - Corresponds to the C(--force) option for C(urpmi). + - Assume "yes" is the answer to any question urpmi has to ask. Corresponds to the C(--force) option for C(urpmi). type: bool default: true root: description: - - Specifies an alternative install root, relative to which all packages will be installed. - Corresponds to the C(--root) option for C(urpmi). - aliases: [ installroot ] + - Specifies an alternative install root, relative to which all packages will be installed. Corresponds to the C(--root) option for C(urpmi). + aliases: [installroot] type: str author: -- Philippe Makowski (@pmakowski) -''' + - Philippe Makowski (@pmakowski) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.urpmi: pkg: foo @@ -85,7 +82,7 @@ EXAMPLES = ''' name: bar state: present update_cache: true -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/usb_facts.py b/plugins/modules/usb_facts.py index 340c71ee54..4f0195bde3 100644 --- a/plugins/modules/usb_facts.py +++ b/plugins/modules/usb_facts.py @@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: usb_facts short_description: Allows listing information about USB devices version_added: 8.5.0 @@ -25,9 +24,9 @@ extends_documentation_fragment: - community.general.attributes.facts_module requirements: - lsusb binary on PATH (usually installed through the package usbutils and preinstalled on many systems) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get information about USB devices community.general.usb_facts: @@ -35,9 +34,9 @@ EXAMPLES = ''' ansible.builtin.debug: msg: "On bus {{ item.bus }} device {{ item.device }} with id {{ item.id }} is {{ item.name }}" loop: "{{ ansible_facts.usb_devices }}" -''' +""" -RETURN = r''' +RETURN = r""" ansible_facts: description: Dictionary containing details of connected USB devices. returned: always @@ -69,7 +68,7 @@ ansible_facts: returned: always type: str sample: Linux Foundation 2.0 root hub -''' +""" import re from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/utm_aaa_group.py b/plugins/modules/utm_aaa_group.py index 9c595284da..b29f3d50af 100644 --- a/plugins/modules/utm_aaa_group.py +++ b/plugins/modules/utm_aaa_group.py @@ -8,120 +8,117 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_aaa_group author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy an aaa group object in Sophos UTM description: - - Create, update or destroy an aaa group object in Sophos UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy an aaa group object in Sophos UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - The name of the object. Will be used to identify the entry. - type: str - required: true - adirectory_groups: - description: - - List of adirectory group strings. - type: list - elements: str - default: [] - adirectory_groups_sids: - description: - - Dictionary of group sids. - type: dict - default: {} - backend_match: - description: - - The backend for the group. - type: str - choices: - - none - - adirectory - - edirectory - - radius - - tacacs - - ldap - default: none - comment: - description: - - Comment that describes the AAA group. - type: str - default: '' - dynamic: - description: - - Group type. Is static if none is selected. - type: str - default: none - choices: - - none - - ipsec_dn - - directory_groups - edirectory_groups: - description: - - List of edirectory group strings. - type: list - elements: str - default: [] - ipsec_dn: - description: - - The ipsec dn string. - type: str - default: '' - ldap_attribute: - description: - - The ldap attribute to check against. - type: str - default: '' - ldap_attribute_value: - description: - - The ldap attribute value to check against. - type: str - default: '' - members: - description: - - A list of user ref names (aaa/user). - type: list - elements: str - default: [] - network: - description: - - The network reference name. The objects contains the known ip addresses for the authentication object (network/aaa). - type: str - default: "" - radius_groups: - description: - - A list of radius group strings. - type: list - elements: str - default: [] - tacacs_groups: - description: - - A list of tacacs group strings. - type: list - elements: str - default: [] + name: + description: + - The name of the object. Will be used to identify the entry. + type: str + required: true + adirectory_groups: + description: + - List of adirectory group strings. + type: list + elements: str + default: [] + adirectory_groups_sids: + description: + - Dictionary of group sids. + type: dict + default: {} + backend_match: + description: + - The backend for the group. + type: str + choices: + - none + - adirectory + - edirectory + - radius + - tacacs + - ldap + default: none + comment: + description: + - Comment that describes the AAA group. + type: str + default: '' + dynamic: + description: + - Group type. Is static if none is selected. + type: str + default: none + choices: + - none + - ipsec_dn + - directory_groups + edirectory_groups: + description: + - List of edirectory group strings. + type: list + elements: str + default: [] + ipsec_dn: + description: + - The ipsec dn string. + type: str + default: '' + ldap_attribute: + description: + - The ldap attribute to check against. + type: str + default: '' + ldap_attribute_value: + description: + - The ldap attribute value to check against. + type: str + default: '' + members: + description: + - A list of user ref names (aaa/user). + type: list + elements: str + default: [] + network: + description: + - The network reference name. The objects contains the known IP addresses for the authentication object (network/aaa). + type: str + default: "" + radius_groups: + description: + - A list of radius group strings. + type: list + elements: str + default: [] + tacacs_groups: + description: + - A list of tacacs group strings. + type: list + elements: str + default: [] extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create UTM aaa_group community.general.utm_aaa_group: utm_host: sophos.host.name @@ -142,63 +139,63 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created. - returned: success - type: complex - contains: - _ref: - description: The reference name of the object. - type: str - _locked: - description: Whether or not the object is currently locked. - type: bool - _type: - description: The type of the object. - type: str - name: - description: The name of the object. - type: str - adirectory_groups: - description: List of Active Directory Groups. - type: str - adirectory_groups_sids: - description: List of Active Directory Groups SIDS. - type: list - backend_match: - description: The backend to use. - type: str - comment: - description: The comment string. - type: str - dynamic: - description: Whether the group match is ipsec_dn or directory_group. - type: str - edirectory_groups: - description: List of eDirectory Groups. - type: str - ipsec_dn: - description: ipsec_dn identifier to match. - type: str - ldap_attribute: - description: The LDAP Attribute to match against. - type: str - ldap_attribute_value: - description: The LDAP Attribute Value to match against. - type: str - members: - description: List of member identifiers of the group. - type: list - network: - description: The identifier of the network (network/aaa). - type: str - radius_group: - description: The radius group identifier. - type: str - tacacs_group: - description: The tacacs group identifier. - type: str + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + adirectory_groups: + description: List of Active Directory Groups. + type: str + adirectory_groups_sids: + description: List of Active Directory Groups SIDS. + type: list + backend_match: + description: The backend to use. + type: str + comment: + description: The comment string. + type: str + dynamic: + description: Whether the group match is ipsec_dn or directory_group. + type: str + edirectory_groups: + description: List of eDirectory Groups. + type: str + ipsec_dn: + description: Ipsec_dn identifier to match. + type: str + ldap_attribute: + description: The LDAP Attribute to match against. + type: str + ldap_attribute_value: + description: The LDAP Attribute Value to match against. + type: str + members: + description: List of member identifiers of the group. + type: list + network: + description: The identifier of the network (network/aaa). + type: str + radius_group: + description: The radius group identifier. + type: str + tacacs_group: + description: The tacacs group identifier. + type: str """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_aaa_group_info.py b/plugins/modules/utm_aaa_group_info.py index 37e01c736c..4f073176f2 100644 --- a/plugins/modules/utm_aaa_group_info.py +++ b/plugins/modules/utm_aaa_group_info.py @@ -10,38 +10,35 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_aaa_group_info author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Get info for reverse_proxy frontend entry in Sophos UTM description: - - get info for a reverse_proxy frontend entry in SOPHOS UTM. - + - Get info for a reverse_proxy frontend entry in SOPHOS UTM. attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true extends_documentation_fragment: - - community.general.utm - - community.general.attributes - - community.general.attributes.info_module + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Remove UTM aaa_group community.general.utm_aaa_group_info: utm_host: sophos.host.name @@ -49,63 +46,63 @@ EXAMPLES = """ name: TestAAAGroupEntry """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - adirectory_groups: - description: List of Active Directory Groups - type: str - adirectory_groups_sids: - description: List of Active Directory Groups SIDS - type: list - backend_match: - description: The backend to use - type: str - comment: - description: The comment string - type: str - dynamic: - description: Whether the group match is ipsec_dn or directory_group - type: str - edirectory_groups: - description: List of eDirectory Groups - type: str - ipsec_dn: - description: ipsec_dn identifier to match - type: str - ldap_attribute: - description: The LDAP Attribute to match against - type: str - ldap_attribute_value: - description: The LDAP Attribute Value to match against - type: str - members: - description: List of member identifiers of the group - type: list - network: - description: The identifier of the network (network/aaa) - type: str - radius_group: - description: The radius group identifier - type: str - tacacs_group: - description: The tacacs group identifier - type: str + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + adirectory_groups: + description: List of Active Directory Groups. + type: str + adirectory_groups_sids: + description: List of Active Directory Groups SIDS. + type: list + backend_match: + description: The backend to use. + type: str + comment: + description: The comment string. + type: str + dynamic: + description: Whether the group match is ipsec_dn or directory_group. + type: str + edirectory_groups: + description: List of eDirectory Groups. + type: str + ipsec_dn: + description: Ipsec_dn identifier to match. + type: str + ldap_attribute: + description: The LDAP Attribute to match against. + type: str + ldap_attribute_value: + description: The LDAP Attribute Value to match against. + type: str + members: + description: List of member identifiers of the group. + type: list + network: + description: The identifier of the network (network/aaa). + type: str + radius_group: + description: The radius group identifier. + type: str + tacacs_group: + description: The tacacs group identifier. + type: str """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_ca_host_key_cert.py b/plugins/modules/utm_ca_host_key_cert.py index b944e83124..b67531c061 100644 --- a/plugins/modules/utm_ca_host_key_cert.py +++ b/plugins/modules/utm_ca_host_key_cert.py @@ -9,67 +9,64 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_ca_host_key_cert author: - - Stephan Schwarz (@stearz) + - Stephan Schwarz (@stearz) short_description: Create, update or destroy ca host_key_cert entry in Sophos UTM description: - - Create, update or destroy a ca host_key_cert entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a ca host_key_cert entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - The name of the object. Will be used to identify the entry. - required: true - type: str - ca: - description: - - A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. - required: true - type: str - meta: - description: - - A reference to an existing utm_ca_meta_x509 object. - required: true - type: str - certificate: - description: - - The certificate in PEM format. - required: true - type: str - comment: - description: - - Optional comment string. - type: str - encrypted: - description: - - Optionally enable encryption. - default: false - type: bool - key: - description: - - Optional private key in PEM format. - type: str + name: + description: + - The name of the object. Will be used to identify the entry. + required: true + type: str + ca: + description: + - A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + required: true + type: str + meta: + description: + - A reference to an existing utm_ca_meta_x509 object. + required: true + type: str + certificate: + description: + - The certificate in PEM format. + required: true + type: str + comment: + description: + - Optional comment string. + type: str + encrypted: + description: + - Optionally enable encryption. + default: false + type: bool + key: + description: + - Optional private key in PEM format. + type: str extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create a ca_host_key_cert entry community.general.utm_ca_host_key_cert: utm_host: sophos.host.name @@ -98,45 +95,44 @@ EXAMPLES = """ utm_token: abcdefghijklmno1234 name: TestHostKeyCertEntry state: info - """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - ca: - description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. - type: str - meta: - description: A reference to an existing utm_ca_meta_x509 object. - type: str - certificate: - description: The certificate in PEM format - type: str - comment: - description: Comment string (may be empty string) - type: str - encrypted: - description: If encryption is enabled - type: bool - key: - description: Private key in PEM format (may be empty string) - type: str + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + ca: + description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + type: str + meta: + description: A reference to an existing utm_ca_meta_x509 object. + type: str + certificate: + description: The certificate in PEM format. + type: str + comment: + description: Comment string (may be empty string). + type: str + encrypted: + description: If encryption is enabled. + type: bool + key: + description: Private key in PEM format (may be empty string). + type: str """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_ca_host_key_cert_info.py b/plugins/modules/utm_ca_host_key_cert_info.py index d81eede69f..cab6657ab6 100644 --- a/plugins/modules/utm_ca_host_key_cert_info.py +++ b/plugins/modules/utm_ca_host_key_cert_info.py @@ -9,37 +9,35 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_ca_host_key_cert_info author: - - Stephan Schwarz (@stearz) + - Stephan Schwarz (@stearz) short_description: Get info for a ca host_key_cert entry in Sophos UTM description: - - Get info for a ca host_key_cert entry in SOPHOS UTM. - + - Get info for a ca host_key_cert entry in SOPHOS UTM. attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true extends_documentation_fragment: - - community.general.utm - - community.general.attributes - - community.general.attributes.info_module -''' + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Get info for a ca host_key_cert entry community.general.utm_ca_host_key_cert_info: utm_host: sophos.host.name @@ -47,42 +45,42 @@ EXAMPLES = """ name: TestHostKeyCertEntry """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - ca: - description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. - type: str - meta: - description: A reference to an existing utm_ca_meta_x509 object. - type: str - certificate: - description: The certificate in PEM format - type: str - comment: - description: Comment string (may be empty string) - type: str - encrypted: - description: If encryption is enabled - type: bool - key: - description: Private key in PEM format (may be empty string) - type: str + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + ca: + description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + type: str + meta: + description: A reference to an existing utm_ca_meta_x509 object. + type: str + certificate: + description: The certificate in PEM format. + type: str + comment: + description: Comment string (may be empty string). + type: str + encrypted: + description: If encryption is enabled. + type: bool + key: + description: Private key in PEM format (may be empty string). + type: str """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_dns_host.py b/plugins/modules/utm_dns_host.py index 6b3725557b..bbb93e9eb0 100644 --- a/plugins/modules/utm_dns_host.py +++ b/plugins/modules/utm_dns_host.py @@ -8,78 +8,75 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_dns_host author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy dns entry in Sophos UTM description: - - Create, update or destroy a dns entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a dns entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true - address: - type: str - description: - - The IPV4 Address of the entry. Can be left empty for automatic resolving. - default: 0.0.0.0 - address6: - type: str - description: - - The IPV6 Address of the entry. Can be left empty for automatic resolving. - default: "::" - comment: - type: str - description: - - An optional comment to add to the dns host object - default: '' - hostname: - type: str - description: - - The hostname for the dns host object - interface: - type: str - description: - - The reference name of the interface to use. If not provided the default interface will be used - default: '' - resolved: - description: - - whether the hostname's ipv4 address is already resolved or not - default: false - type: bool - resolved6: - description: - - whether the hostname's ipv6 address is already resolved or not - default: false - type: bool - timeout: - type: int - description: - - the timeout for the utm to resolve the ip address for the hostname again - default: 0 + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true + address: + type: str + description: + - The IPV4 Address of the entry. Can be left empty for automatic resolving. + default: 0.0.0.0 + address6: + type: str + description: + - The IPV6 Address of the entry. Can be left empty for automatic resolving. + default: "::" + comment: + type: str + description: + - An optional comment to add to the dns host object. + default: '' + hostname: + type: str + description: + - The hostname for the dns host object. + interface: + type: str + description: + - The reference name of the interface to use. If not provided the default interface will be used. + default: '' + resolved: + description: + - Whether the hostname's ipv4 address is already resolved or not. + default: false + type: bool + resolved6: + description: + - Whether the hostname's ipv6 address is already resolved or not. + default: false + type: bool + timeout: + type: int + description: + - The timeout for the utm to resolve the ip address for the hostname again. + default: 0 extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create UTM dns host entry community.general.utm_dns_host: utm_host: sophos.host.name @@ -96,45 +93,45 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - name: - description: The name of the object - type: str - address: - description: The ipv4 address of the object - type: str - address6: - description: The ipv6 address of the object - type: str - comment: - description: The comment string - type: str - hostname: - description: The hostname of the object - type: str - interface: - description: The reference name of the interface the object is associated with - type: str - resolved: - description: Whether the ipv4 address is resolved or not - type: bool - resolved6: - description: Whether the ipv6 address is resolved or not - type: bool - timeout: - description: The timeout until a new resolving will be attempted - type: int + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + name: + description: The name of the object. + type: str + address: + description: The ipv4 address of the object. + type: str + address6: + description: The ipv6 address of the object. + type: str + comment: + description: The comment string. + type: str + hostname: + description: The hostname of the object. + type: str + interface: + description: The reference name of the interface the object is associated with. + type: str + resolved: + description: Whether the ipv4 address is resolved or not. + type: bool + resolved6: + description: Whether the ipv6 address is resolved or not. + type: bool + timeout: + description: The timeout until a new resolving will be attempted. + type: int """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_network_interface_address.py b/plugins/modules/utm_network_interface_address.py index a85a46aeab..1e3d2ee5c3 100644 --- a/plugins/modules/utm_network_interface_address.py +++ b/plugins/modules/utm_network_interface_address.py @@ -8,62 +8,58 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_network_interface_address author: - - Juergen Wiebe (@steamx) + - Juergen Wiebe (@steamx) short_description: Create, update or destroy network/interface_address object description: - - Create, update or destroy a network/interface_address object in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a network/interface_address object in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true - address: - type: str - description: - - The ip4 address of the network/interface_address object. - required: true - address6: - type: str - description: - - The ip6 address of the network/interface_address object. - required: false - comment: - type: str - description: - - An optional comment to add to the object - default: '' - resolved: - type: bool - description: - - Whether or not the object is resolved - resolved6: - type: bool - description: - - Whether or not the object is resolved - + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true + address: + type: str + description: + - The ip4 address of the network/interface_address object. + required: true + address6: + type: str + description: + - The ip6 address of the network/interface_address object. + required: false + comment: + type: str + description: + - An optional comment to add to the object. + default: '' + resolved: + type: bool + description: + - Whether or not the object is resolved. + resolved6: + type: bool + description: + - Whether or not the object is resolved. extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create a network interface address utm_proxy_backend: utm_host: sophos.host.name @@ -81,39 +77,39 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - address: - description: The ip4 address of the network/interface_address object - type: str - address6: - description: The ip6 address of the network/interface_address object - type: str - comment: - description: The comment string - type: str - resolved: - description: Whether or not the object is resolved - type: bool - resolved6: - description: Whether or not the object is resolved - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + address: + description: The ip4 address of the network/interface_address object. + type: str + address6: + description: The ip6 address of the network/interface_address object. + type: str + comment: + description: The comment string. + type: str + resolved: + description: Whether or not the object is resolved. + type: bool + resolved6: + description: Whether or not the object is resolved. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_network_interface_address_info.py b/plugins/modules/utm_network_interface_address_info.py index 9dc08ad094..b9c394c848 100644 --- a/plugins/modules/utm_network_interface_address_info.py +++ b/plugins/modules/utm_network_interface_address_info.py @@ -8,37 +8,35 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_network_interface_address_info author: - - Juergen Wiebe (@steamx) + - Juergen Wiebe (@steamx) short_description: Get info for a network/interface_address object description: - - Get info for a network/interface_address object in SOPHOS UTM. - + - Get info for a network/interface_address object in SOPHOS UTM. attributes: - check_mode: - version_added: 3.3.0 - # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true extends_documentation_fragment: - - community.general.utm - - community.general.attributes - - community.general.attributes.info_module -''' + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Get network interface address info utm_proxy_interface_address_info: utm_host: sophos.host.name @@ -46,39 +44,39 @@ EXAMPLES = """ name: TestNetworkInterfaceAddress """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - address: - description: The ip4 address of the network/interface_address object - type: str - address6: - description: The ip6 address of the network/interface_address object - type: str - comment: - description: The comment string - type: str - resolved: - description: Whether or not the object is resolved - type: bool - resolved6: - description: Whether or not the object is resolved - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + address: + description: The ip4 address of the network/interface_address object. + type: str + address6: + description: The ip6 address of the network/interface_address object. + type: str + comment: + description: The comment string. + type: str + resolved: + description: Whether or not the object is resolved. + type: bool + resolved6: + description: Whether or not the object is resolved. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_auth_profile.py b/plugins/modules/utm_proxy_auth_profile.py index 3b482483bf..207c4ba156 100644 --- a/plugins/modules/utm_proxy_auth_profile.py +++ b/plugins/modules/utm_proxy_auth_profile.py @@ -9,183 +9,180 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_auth_profile author: - - Stephan Schwarz (@stearz) + - Stephan Schwarz (@stearz) short_description: Create, update or destroy reverse_proxy auth_profile entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy auth_profile entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy auth_profile entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true - aaa: - type: list - elements: str - description: - - List of references to utm_aaa objects (allowed users or groups) - required: true - basic_prompt: - type: str - description: - - The message in the basic authentication prompt - required: true - backend_mode: - type: str - description: - - Specifies if the backend server needs authentication ([Basic|None]) - default: None - choices: - - Basic - - None - backend_strip_basic_auth: - description: - - Should the login data be stripped when proxying the request to the backend host - type: bool - default: true - backend_user_prefix: - type: str - description: - - Prefix string to prepend to the username for backend authentication - default: "" - backend_user_suffix: - type: str - description: - - Suffix string to append to the username for backend authentication - default: "" - comment: - type: str - description: - - Optional comment string - default: "" - frontend_cookie: - type: str - description: - - Frontend cookie name - frontend_cookie_secret: - type: str - description: - - Frontend cookie secret - frontend_form: - type: str - description: - - Frontend authentication form name - frontend_form_template: - type: str - description: - - Frontend authentication form template - default: "" - frontend_login: - type: str - description: - - Frontend login name - frontend_logout: - type: str - description: - - Frontend logout name - frontend_mode: - type: str - description: - - Frontend authentication mode (Form|Basic) - default: Basic - choices: - - Basic - - Form - frontend_realm: - type: str - description: - - Frontend authentication realm - frontend_session_allow_persistency: - description: - - Allow session persistency - type: bool - default: false - frontend_session_lifetime: - type: int - description: - - session lifetime - required: true - frontend_session_lifetime_limited: - description: - - Specifies if limitation of session lifetime is active - type: bool - default: true - frontend_session_lifetime_scope: - type: str - description: - - scope for frontend_session_lifetime (days|hours|minutes) - default: hours - choices: - - days - - hours - - minutes - frontend_session_timeout: - type: int - description: - - session timeout - required: true - frontend_session_timeout_enabled: - description: - - Specifies if session timeout is active - type: bool - default: true - frontend_session_timeout_scope: - type: str - description: - - scope for frontend_session_timeout (days|hours|minutes) - default: minutes - choices: - - days - - hours - - minutes - logout_delegation_urls: - type: list - elements: str - description: - - List of logout URLs that logouts are delegated to - default: [] - logout_mode: - type: str - description: - - Mode of logout (None|Delegation) - default: None - choices: - - None - - Delegation - redirect_to_requested_url: - description: - - Should a redirect to the requested URL be made - type: bool - default: false + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true + aaa: + type: list + elements: str + description: + - List of references to utm_aaa objects (allowed users or groups). + required: true + basic_prompt: + type: str + description: + - The message in the basic authentication prompt. + required: true + backend_mode: + type: str + description: + - Specifies if the backend server needs authentication ([Basic|None]). + default: None + choices: + - Basic + - None + backend_strip_basic_auth: + description: + - Should the login data be stripped when proxying the request to the backend host. + type: bool + default: true + backend_user_prefix: + type: str + description: + - Prefix string to prepend to the username for backend authentication. + default: "" + backend_user_suffix: + type: str + description: + - Suffix string to append to the username for backend authentication. + default: "" + comment: + type: str + description: + - Optional comment string. + default: "" + frontend_cookie: + type: str + description: + - Frontend cookie name. + frontend_cookie_secret: + type: str + description: + - Frontend cookie secret. + frontend_form: + type: str + description: + - Frontend authentication form name. + frontend_form_template: + type: str + description: + - Frontend authentication form template. + default: "" + frontend_login: + type: str + description: + - Frontend login name. + frontend_logout: + type: str + description: + - Frontend logout name. + frontend_mode: + type: str + description: + - Frontend authentication mode (Form|Basic). + default: Basic + choices: + - Basic + - Form + frontend_realm: + type: str + description: + - Frontend authentication realm. + frontend_session_allow_persistency: + description: + - Allow session persistency. + type: bool + default: false + frontend_session_lifetime: + type: int + description: + - Session lifetime. + required: true + frontend_session_lifetime_limited: + description: + - Specifies if limitation of session lifetime is active. + type: bool + default: true + frontend_session_lifetime_scope: + type: str + description: + - Scope for frontend_session_lifetime (days|hours|minutes). + default: hours + choices: + - days + - hours + - minutes + frontend_session_timeout: + type: int + description: + - Session timeout. + required: true + frontend_session_timeout_enabled: + description: + - Specifies if session timeout is active. + type: bool + default: true + frontend_session_timeout_scope: + type: str + description: + - Scope for frontend_session_timeout (days|hours|minutes). + default: minutes + choices: + - days + - hours + - minutes + logout_delegation_urls: + type: list + elements: str + description: + - List of logout URLs that logouts are delegated to. + default: [] + logout_mode: + type: str + description: + - Mode of logout (None|Delegation). + default: None + choices: + - None + - Delegation + redirect_to_requested_url: + description: + - Should a redirect to the requested URL be made. + type: bool + default: false extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create UTM proxy_auth_profile community.general.utm_proxy_auth_profile: utm_host: sophos.host.name utm_token: abcdefghijklmno1234 name: TestAuthProfileEntry - aaa: [REF_OBJECT_STRING,REF_ANOTHEROBJECT_STRING] + aaa: [REF_OBJECT_STRING, REF_ANOTHEROBJECT_STRING] basic_prompt: "Authentication required: Please login" frontend_session_lifetime: 1 frontend_session_timeout: 1 @@ -204,99 +201,98 @@ EXAMPLES = """ utm_token: abcdefghijklmno1234 name: TestAuthProfileEntry state: info - """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - aaa: - description: List of references to utm_aaa objects (allowed users or groups) - type: list - basic_prompt: - description: The message in the basic authentication prompt - type: str - backend_mode: - description: Specifies if the backend server needs authentication ([Basic|None]) - type: str - backend_strip_basic_auth: - description: Should the login data be stripped when proxying the request to the backend host - type: bool - backend_user_prefix: - description: Prefix string to prepend to the username for backend authentication - type: str - backend_user_suffix: - description: Suffix string to append to the username for backend authentication - type: str - comment: - description: Optional comment string - type: str - frontend_cookie: - description: Frontend cookie name - type: str - frontend_form: - description: Frontend authentication form name - type: str - frontend_form_template: - description: Frontend authentication form template - type: str - frontend_login: - description: Frontend login name - type: str - frontend_logout: - description: Frontend logout name - type: str - frontend_mode: - description: Frontend authentication mode (Form|Basic) - type: str - frontend_realm: - description: Frontend authentication realm - type: str - frontend_session_allow_persistency: - description: Allow session persistency - type: bool - frontend_session_lifetime: - description: session lifetime - type: int - frontend_session_lifetime_limited: - description: Specifies if limitation of session lifetime is active - type: bool - frontend_session_lifetime_scope: - description: scope for frontend_session_lifetime (days|hours|minutes) - type: str - frontend_session_timeout: - description: session timeout - type: int - frontend_session_timeout_enabled: - description: Specifies if session timeout is active - type: bool - frontend_session_timeout_scope: - description: scope for frontend_session_timeout (days|hours|minutes) - type: str - logout_delegation_urls: - description: List of logout URLs that logouts are delegated to - type: list - logout_mode: - description: Mode of logout (None|Delegation) - type: str - redirect_to_requested_url: - description: Should a redirect to the requested URL be made - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + aaa: + description: List of references to utm_aaa objects (allowed users or groups). + type: list + basic_prompt: + description: The message in the basic authentication prompt. + type: str + backend_mode: + description: Specifies if the backend server needs authentication ([Basic|None]). + type: str + backend_strip_basic_auth: + description: Should the login data be stripped when proxying the request to the backend host. + type: bool + backend_user_prefix: + description: Prefix string to prepend to the username for backend authentication. + type: str + backend_user_suffix: + description: Suffix string to append to the username for backend authentication. + type: str + comment: + description: Optional comment string. + type: str + frontend_cookie: + description: Frontend cookie name. + type: str + frontend_form: + description: Frontend authentication form name. + type: str + frontend_form_template: + description: Frontend authentication form template. + type: str + frontend_login: + description: Frontend login name. + type: str + frontend_logout: + description: Frontend logout name. + type: str + frontend_mode: + description: Frontend authentication mode (Form|Basic). + type: str + frontend_realm: + description: Frontend authentication realm. + type: str + frontend_session_allow_persistency: + description: Allow session persistency. + type: bool + frontend_session_lifetime: + description: Session lifetime. + type: int + frontend_session_lifetime_limited: + description: Specifies if limitation of session lifetime is active. + type: bool + frontend_session_lifetime_scope: + description: Scope for frontend_session_lifetime (days|hours|minutes). + type: str + frontend_session_timeout: + description: Session timeout. + type: int + frontend_session_timeout_enabled: + description: Specifies if session timeout is active. + type: bool + frontend_session_timeout_scope: + description: Scope for frontend_session_timeout (days|hours|minutes). + type: str + logout_delegation_urls: + description: List of logout URLs that logouts are delegated to. + type: list + logout_mode: + description: Mode of logout (None|Delegation). + type: str + redirect_to_requested_url: + description: Should a redirect to the requested URL be made. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_exception.py b/plugins/modules/utm_proxy_exception.py index a0a3f85b5b..96cb592e59 100644 --- a/plugins/modules/utm_proxy_exception.py +++ b/plugins/modules/utm_proxy_exception.py @@ -9,130 +9,127 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_exception author: - - Sebastian Schenzel (@RickS-C137) + - Sebastian Schenzel (@RickS-C137) short_description: Create, update or destroy reverse_proxy exception entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - The name of the object. Will be used to identify the entry - required: true - type: str - op: - description: - - The operand to be used with the entries of the path parameter - default: 'AND' - choices: - - 'AND' - - 'OR' - required: false - type: str - path: - description: - - The paths the exception in the reverse proxy is defined for - type: list - elements: str - default: [] - required: false - skip_custom_threats_filters: - description: - - A list of threats to be skipped - type: list - elements: str - default: [] - required: false - skip_threats_filter_categories: - description: - - Define which categories of threats are skipped - type: list - elements: str - default: [] - required: false - skipav: - description: - - Skip the Antivirus Scanning - default: false - type: bool - required: false - skipbadclients: - description: - - Block clients with bad reputation - default: false - type: bool - required: false - skipcookie: - description: - - Skip the Cookie Signing check - default: false - type: bool - required: false - skipform: - description: - - Enable form hardening - default: false - type: bool - required: false - skipform_missingtoken: - description: - - Enable form hardening with missing tokens - default: false - type: bool - required: false - skiphtmlrewrite: - description: - - Protection against SQL - default: false - type: bool - required: false - skiptft: - description: - - Enable true file type control - default: false - type: bool - required: false - skipurl: - description: - - Enable static URL hardening - default: false - type: bool - required: false - source: - description: - - Define which categories of threats are skipped - type: list - elements: str - default: [] - required: false - status: - description: - - Status of the exception rule set - default: true - type: bool - required: false + name: + description: + - The name of the object. Will be used to identify the entry. + required: true + type: str + op: + description: + - The operand to be used with the entries of the path parameter. + default: 'AND' + choices: + - 'AND' + - 'OR' + required: false + type: str + path: + description: + - The paths the exception in the reverse proxy is defined for. + type: list + elements: str + default: [] + required: false + skip_custom_threats_filters: + description: + - A list of threats to be skipped. + type: list + elements: str + default: [] + required: false + skip_threats_filter_categories: + description: + - Define which categories of threats are skipped. + type: list + elements: str + default: [] + required: false + skipav: + description: + - Skip the Antivirus Scanning. + default: false + type: bool + required: false + skipbadclients: + description: + - Block clients with bad reputation. + default: false + type: bool + required: false + skipcookie: + description: + - Skip the Cookie Signing check. + default: false + type: bool + required: false + skipform: + description: + - Enable form hardening. + default: false + type: bool + required: false + skipform_missingtoken: + description: + - Enable form hardening with missing tokens. + default: false + type: bool + required: false + skiphtmlrewrite: + description: + - Protection against SQL. + default: false + type: bool + required: false + skiptft: + description: + - Enable true file type control. + default: false + type: bool + required: false + skipurl: + description: + - Enable static URL hardening. + default: false + type: bool + required: false + source: + description: + - Define which categories of threats are skipped. + type: list + elements: str + default: [] + required: false + status: + description: + - Status of the exception rule set. + default: true + type: bool + required: false extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create UTM proxy_exception community.general.utm_proxy_exception: utm_host: sophos.host.name @@ -149,66 +146,66 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - comment: - description: The optional comment string - type: str - op: - description: The operand to be used with the entries of the path parameter - type: str - path: - description: The paths the exception in the reverse proxy is defined for - type: list - skip_custom_threats_filters: - description: A list of threats to be skipped - type: list - skip_threats_filter_categories: - description: Define which categories of threats are skipped - type: list - skipav: - description: Skip the Antivirus Scanning - type: bool - skipbadclients: - description: Block clients with bad reputation - type: bool - skipcookie: - description: Skip the Cookie Signing check - type: bool - skipform: - description: Enable form hardening - type: bool - skipform_missingtoken: - description: Enable form hardening with missing tokens - type: bool - skiphtmlrewrite: - description: Protection against SQL - type: bool - skiptft: - description: Enable true file type control - type: bool - skipurl: - description: Enable static URL hardening - type: bool - source: - description: Define which categories of threats are skipped - type: list + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + comment: + description: The optional comment string. + type: str + op: + description: The operand to be used with the entries of the path parameter. + type: str + path: + description: The paths the exception in the reverse proxy is defined for. + type: list + skip_custom_threats_filters: + description: A list of threats to be skipped. + type: list + skip_threats_filter_categories: + description: Define which categories of threats are skipped. + type: list + skipav: + description: Skip the Antivirus Scanning. + type: bool + skipbadclients: + description: Block clients with bad reputation. + type: bool + skipcookie: + description: Skip the Cookie Signing check. + type: bool + skipform: + description: Enable form hardening. + type: bool + skipform_missingtoken: + description: Enable form hardening with missing tokens. + type: bool + skiphtmlrewrite: + description: Protection against SQL. + type: bool + skiptft: + description: Enable true file type control. + type: bool + skipurl: + description: Enable static URL hardening. + type: bool + source: + description: Define which categories of threats are skipped. + type: list """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_frontend.py b/plugins/modules/utm_proxy_frontend.py index 22a773fef8..1c3489f493 100644 --- a/plugins/modules/utm_proxy_frontend.py +++ b/plugins/modules/utm_proxy_frontend.py @@ -9,145 +9,142 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_frontend author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy reverse_proxy frontend entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy frontend entry in Sophos UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy frontend entry in Sophos UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true - add_content_type_header : - description: - - Whether to add the content type header or not - type: bool - default: false - address: - type: str - description: - - The reference name of the network/interface_address object. - default: REF_DefaultInternalAddress - allowed_networks: - type: list - elements: str - description: - - A list of reference names for the allowed networks. - default: ['REF_NetworkAny'] - certificate: - type: str - description: - - The reference name of the ca/host_key_cert object. - default: "" - comment: - type: str - description: - - An optional comment to add to the object - default: "" - disable_compression: - description: - - Whether to enable the compression - type: bool - default: false - domain: - type: list - elements: str - description: - - A list of domain names for the frontend object - exceptions: - type: list - elements: str - description: - - A list of exception ref names (reverse_proxy/exception) - default: [] - htmlrewrite: - description: - - Whether to enable html rewrite or not - type: bool - default: false - htmlrewrite_cookies: - description: - - Whether to enable html rewrite cookie or not - type: bool - default: false - implicitredirect: - description: - - Whether to enable implicit redirection or not - type: bool - default: false - lbmethod: - type: str - description: - - Which loadbalancer method should be used - choices: - - "" - - bybusyness - - bytraffic - - byrequests - default: bybusyness - locations: - type: list - elements: str - description: - - A list of location ref names (reverse_proxy/location) - default: [] - port: - type: int - description: - - The frontend http port - default: 80 - preservehost: - description: - - Whether to preserve host header - type: bool - default: false - profile: - type: str - description: - - The reference string of the reverse_proxy/profile - default: "" - status: - description: - - Whether to activate the frontend entry or not - type: bool - default: true - type: - type: str - description: - - Which protocol should be used - choices: - - http - - https - default: http - xheaders: - description: - - Whether to pass the host header or not - type: bool - default: false + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true + add_content_type_header: + description: + - Whether to add the content type header or not. + type: bool + default: false + address: + type: str + description: + - The reference name of the network/interface_address object. + default: REF_DefaultInternalAddress + allowed_networks: + type: list + elements: str + description: + - A list of reference names for the allowed networks. + default: ['REF_NetworkAny'] + certificate: + type: str + description: + - The reference name of the ca/host_key_cert object. + default: "" + comment: + type: str + description: + - An optional comment to add to the object. + default: "" + disable_compression: + description: + - Whether to enable the compression. + type: bool + default: false + domain: + type: list + elements: str + description: + - A list of domain names for the frontend object. + exceptions: + type: list + elements: str + description: + - A list of exception ref names (reverse_proxy/exception). + default: [] + htmlrewrite: + description: + - Whether to enable html rewrite or not. + type: bool + default: false + htmlrewrite_cookies: + description: + - Whether to enable html rewrite cookie or not. + type: bool + default: false + implicitredirect: + description: + - Whether to enable implicit redirection or not. + type: bool + default: false + lbmethod: + type: str + description: + - Which loadbalancer method should be used. + choices: + - "" + - bybusyness + - bytraffic + - byrequests + default: bybusyness + locations: + type: list + elements: str + description: + - A list of location ref names (reverse_proxy/location). + default: [] + port: + type: int + description: + - The frontend http port. + default: 80 + preservehost: + description: + - Whether to preserve host header. + type: bool + default: false + profile: + type: str + description: + - The reference string of the reverse_proxy/profile. + default: "" + status: + description: + - Whether to activate the frontend entry or not. + type: bool + default: true + type: + type: str + description: + - Which protocol should be used. + choices: + - http + - https + default: http + xheaders: + description: + - Whether to pass the host header or not. + type: bool + default: false extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create utm proxy_frontend community.general.utm_proxy_frontend: utm_host: sophos.host.name @@ -164,81 +161,81 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - add_content_type_header: - description: Whether to add the content type header - type: bool - address: - description: The reference name of the address - type: str - allowed_networks: - description: List of reference names of networks associated - type: list - certificate: - description: Reference name of certificate (ca/host_key_cert) - type: str - comment: - description: The comment string - type: str - disable_compression: - description: State of compression support - type: bool - domain: - description: List of hostnames - type: list - exceptions: - description: List of associated proxy exceptions - type: list - htmlrewrite: - description: State of html rewrite - type: bool - htmlrewrite_cookies: - description: Whether the html rewrite cookie will be set - type: bool - implicitredirect: - description: Whether to use implicit redirection - type: bool - lbmethod: - description: The method of loadbalancer to use - type: str - locations: - description: The reference names of reverse_proxy/locations associated with the object - type: list - port: - description: The port of the frontend connection - type: int - preservehost: - description: Preserve host header - type: bool - profile: - description: The associated reverse_proxy/profile - type: str - status: - description: Whether the frontend object is active or not - type: bool - type: - description: The connection type - type: str - xheaders: - description: The xheaders state - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + add_content_type_header: + description: Whether to add the content type header. + type: bool + address: + description: The reference name of the address. + type: str + allowed_networks: + description: List of reference names of networks associated. + type: list + certificate: + description: Reference name of certificate (ca/host_key_cert). + type: str + comment: + description: The comment string. + type: str + disable_compression: + description: State of compression support. + type: bool + domain: + description: List of hostnames. + type: list + exceptions: + description: List of associated proxy exceptions. + type: list + htmlrewrite: + description: State of html rewrite. + type: bool + htmlrewrite_cookies: + description: Whether the html rewrite cookie will be set. + type: bool + implicitredirect: + description: Whether to use implicit redirection. + type: bool + lbmethod: + description: The method of loadbalancer to use. + type: str + locations: + description: The reference names of reverse_proxy/locations associated with the object. + type: list + port: + description: The port of the frontend connection. + type: int + preservehost: + description: Preserve host header. + type: bool + profile: + description: The associated reverse_proxy/profile. + type: str + status: + description: Whether the frontend object is active or not. + type: bool + type: + description: The connection type. + type: str + xheaders: + description: The xheaders state. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_frontend_info.py b/plugins/modules/utm_proxy_frontend_info.py index 0435ef9494..0709cad01e 100644 --- a/plugins/modules/utm_proxy_frontend_info.py +++ b/plugins/modules/utm_proxy_frontend_info.py @@ -9,38 +9,36 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_frontend_info author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy reverse_proxy frontend entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy frontend entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy frontend entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - version_added: 3.3.0 + check_mode: + version_added: 3.3.0 # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true extends_documentation_fragment: - - community.general.utm - - community.general.attributes - - community.general.attributes.info_module -''' + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Get utm proxy_frontend community.general.utm_proxy_frontend_info: utm_host: sophos.host.name @@ -49,81 +47,81 @@ EXAMPLES = """ host: REF_OBJECT_STRING """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - add_content_type_header: - description: Whether to add the content type header - type: bool - address: - description: The reference name of the address - type: str - allowed_networks: - description: List of reference names of networks associated - type: list - certificate: - description: Reference name of certificate (ca/host_key_cert) - type: str - comment: - description: The comment string - type: str - disable_compression: - description: State of compression support - type: bool - domain: - description: List of hostnames - type: list - exceptions: - description: List of associated proxy exceptions - type: list - htmlrewrite: - description: State of html rewrite - type: bool - htmlrewrite_cookies: - description: whether the html rewrite cookie will be set - type: bool - implicitredirect: - description: whether to use implicit redirection - type: bool - lbmethod: - description: The method of loadbalancer to use - type: str - locations: - description: The reference names of reverse_proxy/locations associated with the object - type: list - port: - description: The port of the frontend connection - type: int - preservehost: - description: Preserve host header - type: bool - profile: - description: The associated reverse_proxy/profile - type: str - status: - description: Whether the frontend object is active or not - type: bool - type: - description: The connection type - type: str - xheaders: - description: The xheaders state - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + add_content_type_header: + description: Whether to add the content type header. + type: bool + address: + description: The reference name of the address. + type: str + allowed_networks: + description: List of reference names of networks associated. + type: list + certificate: + description: Reference name of certificate (ca/host_key_cert). + type: str + comment: + description: The comment string. + type: str + disable_compression: + description: State of compression support. + type: bool + domain: + description: List of hostnames. + type: list + exceptions: + description: List of associated proxy exceptions. + type: list + htmlrewrite: + description: State of html rewrite. + type: bool + htmlrewrite_cookies: + description: Whether the html rewrite cookie will be set. + type: bool + implicitredirect: + description: Whether to use implicit redirection. + type: bool + lbmethod: + description: The method of loadbalancer to use. + type: str + locations: + description: The reference names of reverse_proxy/locations associated with the object. + type: list + port: + description: The port of the frontend connection. + type: int + preservehost: + description: Preserve host header. + type: bool + profile: + description: The associated reverse_proxy/profile. + type: str + status: + description: Whether the frontend object is active or not. + type: bool + type: + description: The connection type. + type: str + xheaders: + description: The xheaders state. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_location.py b/plugins/modules/utm_proxy_location.py index 736f564d58..944050bfb6 100644 --- a/plugins/modules/utm_proxy_location.py +++ b/plugins/modules/utm_proxy_location.py @@ -9,111 +9,108 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_location author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy reverse_proxy location entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true - access_control: - description: - - whether to activate the access control for the location - type: str - default: '0' - choices: - - '0' - - '1' - allowed_networks: - description: - - A list of allowed networks - type: list - elements: str - default: - - REF_NetworkAny - auth_profile: - type: str - description: - - The reference name of the auth profile - default: '' - backend: - type: list - elements: str - description: - - A list of backends that are connected with this location declaration - default: [] - be_path: - type: str - description: - - The path of the backend - default: '' - comment: - type: str - description: - - The optional comment string - default: '' - denied_networks: - type: list - elements: str - description: - - A list of denied network references - default: [] - hot_standby: - description: - - Activate hot standby mode - type: bool - default: false - path: - type: str - description: - - The path of the location - default: "/" - status: - description: - - Whether the location is active or not - type: bool - default: true - stickysession_id: - type: str - description: - - The stickysession id - default: ROUTEID - stickysession_status: - description: - - Enable the stickysession - type: bool - default: false - websocket_passthrough: - description: - - Enable the websocket passthrough - type: bool - default: false + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true + access_control: + description: + - Whether to activate the access control for the location. + type: str + default: '0' + choices: + - '0' + - '1' + allowed_networks: + description: + - A list of allowed networks. + type: list + elements: str + default: + - REF_NetworkAny + auth_profile: + type: str + description: + - The reference name of the auth profile. + default: '' + backend: + type: list + elements: str + description: + - A list of backends that are connected with this location declaration. + default: [] + be_path: + type: str + description: + - The path of the backend. + default: '' + comment: + type: str + description: + - The optional comment string. + default: '' + denied_networks: + type: list + elements: str + description: + - A list of denied network references. + default: [] + hot_standby: + description: + - Activate hot standby mode. + type: bool + default: false + path: + type: str + description: + - The path of the location. + default: "/" + status: + description: + - Whether the location is active or not. + type: bool + default: true + stickysession_id: + type: str + description: + - The stickysession id. + default: ROUTEID + stickysession_status: + description: + - Enable the stickysession. + type: bool + default: false + websocket_passthrough: + description: + - Enable the websocket passthrough. + type: bool + default: false extends_documentation_fragment: -- community.general.utm -- community.general.attributes + - community.general.utm + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create UTM proxy_location utm_proxy_backend: utm_host: sophos.host.name @@ -130,63 +127,63 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - access_control: - description: Whether to use access control state - type: str - allowed_networks: - description: List of allowed network reference names - type: list - auth_profile: - description: The auth profile reference name - type: str - backend: - description: The backend reference name - type: str - be_path: - description: The backend path - type: str - comment: - description: The comment string - type: str - denied_networks: - description: The list of the denied network names - type: list - hot_standby: - description: Use hot standby - type: bool - path: - description: Path name - type: str - status: - description: Whether the object is active or not - type: bool - stickysession_id: - description: The identifier of the stickysession - type: str - stickysession_status: - description: Whether to use stickysession or not - type: bool - websocket_passthrough: - description: Whether websocket passthrough will be used or not - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + access_control: + description: Whether to use access control state. + type: str + allowed_networks: + description: List of allowed network reference names. + type: list + auth_profile: + description: The auth profile reference name. + type: str + backend: + description: The backend reference name. + type: str + be_path: + description: The backend path. + type: str + comment: + description: The comment string. + type: str + denied_networks: + description: The list of the denied network names. + type: list + hot_standby: + description: Use hot standby. + type: bool + path: + description: Path name. + type: str + status: + description: Whether the object is active or not. + type: bool + stickysession_id: + description: The identifier of the stickysession. + type: str + stickysession_status: + description: Whether to use stickysession or not. + type: bool + websocket_passthrough: + description: Whether websocket passthrough will be used or not. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/utm_proxy_location_info.py b/plugins/modules/utm_proxy_location_info.py index 4e4ba9b139..a7ea37ea79 100644 --- a/plugins/modules/utm_proxy_location_info.py +++ b/plugins/modules/utm_proxy_location_info.py @@ -9,38 +9,36 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: utm_proxy_location_info author: - - Johannes Brunswicker (@MatrixCrawler) + - Johannes Brunswicker (@MatrixCrawler) short_description: Create, update or destroy reverse_proxy location entry in Sophos UTM description: - - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. - - This module needs to have the REST Ability of the UTM to be activated. - + - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. attributes: - check_mode: - version_added: 3.3.0 + check_mode: + version_added: 3.3.0 # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - type: str - description: - - The name of the object. Will be used to identify the entry - required: true + name: + type: str + description: + - The name of the object. Will be used to identify the entry. + required: true extends_documentation_fragment: - - community.general.utm - - community.general.attributes - - community.general.attributes.info_module -''' + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Remove UTM proxy_location community.general.utm_proxy_location_info: utm_host: sophos.host.name @@ -48,63 +46,63 @@ EXAMPLES = """ name: TestLocationEntry """ -RETURN = """ +RETURN = r""" result: - description: The utm object that was created - returned: success - type: complex - contains: - _ref: - description: The reference name of the object - type: str - _locked: - description: Whether or not the object is currently locked - type: bool - _type: - description: The type of the object - type: str - name: - description: The name of the object - type: str - access_control: - description: Whether to use access control state - type: str - allowed_networks: - description: List of allowed network reference names - type: list - auth_profile: - description: The auth profile reference name - type: str - backend: - description: The backend reference name - type: str - be_path: - description: The backend path - type: str - comment: - description: The comment string - type: str - denied_networks: - description: The list of the denied network names - type: list - hot_standby: - description: Use hot standby - type: bool - path: - description: Path name - type: str - status: - description: Whether the object is active or not - type: bool - stickysession_id: - description: The identifier of the stickysession - type: str - stickysession_status: - description: Whether to use stickysession or not - type: bool - websocket_passthrough: - description: Whether websocket passthrough will be used or not - type: bool + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + access_control: + description: Whether to use access control state. + type: str + allowed_networks: + description: List of allowed network reference names. + type: list + auth_profile: + description: The auth profile reference name. + type: str + backend: + description: The backend reference name. + type: str + be_path: + description: The backend path. + type: str + comment: + description: The comment string. + type: str + denied_networks: + description: The list of the denied network names. + type: list + hot_standby: + description: Use hot standby. + type: bool + path: + description: Path name. + type: str + status: + description: Whether the object is active or not. + type: bool + stickysession_id: + description: The identifier of the stickysession. + type: str + stickysession_status: + description: Whether to use stickysession or not. + type: bool + websocket_passthrough: + description: Whether websocket passthrough will be used or not. + type: bool """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule diff --git a/plugins/modules/vdo.py b/plugins/modules/vdo.py index 8b0e745960..c7df2d234c 100644 --- a/plugins/modules/vdo.py +++ b/plugins/modules/vdo.py @@ -8,10 +8,9 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - - Bryan Gurney (@bgurney-rh) + - Bryan Gurney (@bgurney-rh) module: vdo @@ -19,277 +18,189 @@ short_description: Module to control VDO description: - - This module controls the VDO dedupe and compression device. - - VDO, or Virtual Data Optimizer, is a device-mapper target that - provides inline block-level deduplication, compression, and - thin provisioning capabilities to primary storage. - + - This module controls the VDO dedupe and compression device. + - VDO, or Virtual Data Optimizer, is a device-mapper target that provides inline block-level deduplication, compression, and thin provisioning + capabilities to primary storage. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - The name of the VDO volume. - type: str - required: true - state: - description: - - Whether this VDO volume should be "present" or "absent". - If a "present" VDO volume does not exist, it will be - created. If a "present" VDO volume already exists, it - will be modified, by updating the configuration, which - will take effect when the VDO volume is restarted. - Not all parameters of an existing VDO volume can be - modified; the "statusparamkeys" list contains the - parameters that can be modified after creation. If an - "absent" VDO volume does not exist, it will not be - removed. - type: str - choices: [ absent, present ] - default: present - activated: - description: - - The "activate" status for a VDO volume. If this is set - to V(false), the VDO volume cannot be started, and it will - not start on system startup. However, on initial - creation, a VDO volume with "activated" set to "off" - will be running, until stopped. This is the default - behavior of the "vdo create" command; it provides the - user an opportunity to write a base amount of metadata - (filesystem, LVM headers, etc.) to the VDO volume prior - to stopping the volume, and leaving it deactivated - until ready to use. - type: bool - running: - description: - - Whether this VDO volume is running. - - A VDO volume must be activated in order to be started. - type: bool - device: - description: - - The full path of the device to use for VDO storage. - - This is required if "state" is "present". - type: str - logicalsize: - description: - - The logical size of the VDO volume (in megabytes, or - LVM suffix format). If not specified for a new volume, - this defaults to the same size as the underlying storage - device, which is specified in the 'device' parameter. - Existing volumes will maintain their size if the - logicalsize parameter is not specified, or is smaller - than or identical to the current size. If the specified - size is larger than the current size, a growlogical - operation will be performed. - type: str - deduplication: - description: - - Configures whether deduplication is enabled. The - default for a created volume is 'enabled'. Existing - volumes will maintain their previously configured - setting unless a different value is specified in the - playbook. - type: str - choices: [ disabled, enabled ] - compression: - description: - - Configures whether compression is enabled. The default - for a created volume is 'enabled'. Existing volumes - will maintain their previously configured setting unless - a different value is specified in the playbook. - type: str - choices: [ disabled, enabled ] - blockmapcachesize: - description: - - The amount of memory allocated for caching block map - pages, in megabytes (or may be issued with an LVM-style - suffix of K, M, G, or T). The default (and minimum) - value is 128M. The value specifies the size of the - cache; there is a 15% memory usage overhead. Each 1.25G - of block map covers 1T of logical blocks, therefore a - small amount of block map cache memory can cache a - significantly large amount of block map data. Existing - volumes will maintain their previously configured - setting unless a different value is specified in the - playbook. - type: str - readcache: - description: - - Enables or disables the read cache. The default is - 'disabled'. Choosing 'enabled' enables a read cache - which may improve performance for workloads of high - deduplication, read workloads with a high level of - compression, or on hard disk storage. Existing - volumes will maintain their previously configured - setting unless a different value is specified in the - playbook. - - The read cache feature is available in VDO 6.1 and older. - type: str - choices: [ disabled, enabled ] - readcachesize: - description: - - Specifies the extra VDO device read cache size in - megabytes. This is in addition to a system-defined - minimum. Using a value with a suffix of K, M, G, or T - is optional. The default value is 0. 1.125 MB of - memory per bio thread will be used per 1 MB of read - cache specified (for example, a VDO volume configured - with 4 bio threads will have a read cache memory usage - overhead of 4.5 MB per 1 MB of read cache specified). - Existing volumes will maintain their previously - configured setting unless a different value is specified - in the playbook. - - The read cache feature is available in VDO 6.1 and older. - type: str - emulate512: - description: - - Enables 512-byte emulation mode, allowing drivers or - filesystems to access the VDO volume at 512-byte - granularity, instead of the default 4096-byte granularity. - Default is 'disabled'; only recommended when a driver - or filesystem requires 512-byte sector level access to - a device. This option is only available when creating - a new volume, and cannot be changed for an existing - volume. - type: bool - default: false - growphysical: - description: - - Specifies whether to attempt to execute a growphysical - operation, if there is enough unused space on the - device. A growphysical operation will be executed if - there is at least 64 GB of free space, relative to the - previous physical size of the affected VDO volume. - type: bool - default: false - slabsize: - description: - - The size of the increment by which the physical size of - a VDO volume is grown, in megabytes (or may be issued - with an LVM-style suffix of K, M, G, or T). Must be a - power of two between 128M and 32G. The default is 2G, - which supports volumes having a physical size up to 16T. - The maximum, 32G, supports a physical size of up to 256T. - This option is only available when creating a new - volume, and cannot be changed for an existing volume. - type: str - writepolicy: - description: - - Specifies the write policy of the VDO volume. The - 'sync' mode acknowledges writes only after data is on - stable storage. The 'async' mode acknowledges writes - when data has been cached for writing to stable - storage. The default (and highly recommended) 'auto' - mode checks the storage device to determine whether it - supports flushes. Devices that support flushes will - result in a VDO volume in 'async' mode, while devices - that do not support flushes will run in sync mode. - Existing volumes will maintain their previously - configured setting unless a different value is - specified in the playbook. - type: str - choices: [ async, auto, sync ] - indexmem: - description: - - Specifies the amount of index memory in gigabytes. The - default is 0.25. The special decimal values 0.25, 0.5, - and 0.75 can be used, as can any positive integer. - This option is only available when creating a new - volume, and cannot be changed for an existing volume. - type: str - indexmode: - description: - - Specifies the index mode of the Albireo index. The - default is 'dense', which has a deduplication window of - 1 GB of index memory per 1 TB of incoming data, - requiring 10 GB of index data on persistent storage. - The 'sparse' mode has a deduplication window of 1 GB of - index memory per 10 TB of incoming data, but requires - 100 GB of index data on persistent storage. This option - is only available when creating a new volume, and cannot - be changed for an existing volume. - type: str - choices: [ dense, sparse ] - ackthreads: - description: - - Specifies the number of threads to use for - acknowledging completion of requested VDO I/O operations. - Valid values are integer values from 1 to 100 (lower - numbers are preferable due to overhead). The default is - 1. Existing volumes will maintain their previously - configured setting unless a different value is specified - in the playbook. - type: str - biothreads: - description: - - Specifies the number of threads to use for submitting I/O - operations to the storage device. Valid values are - integer values from 1 to 100 (lower numbers are - preferable due to overhead). The default is 4. - Existing volumes will maintain their previously - configured setting unless a different value is specified - in the playbook. - type: str - cputhreads: - description: - - Specifies the number of threads to use for CPU-intensive - work such as hashing or compression. Valid values are - integer values from 1 to 100 (lower numbers are - preferable due to overhead). The default is 2. - Existing volumes will maintain their previously - configured setting unless a different value is specified - in the playbook. - type: str - logicalthreads: - description: - - Specifies the number of threads across which to - subdivide parts of the VDO processing based on logical - block addresses. Valid values are integer values from - 1 to 100 (lower numbers are preferable due to overhead). - The default is 1. Existing volumes will maintain their - previously configured setting unless a different value - is specified in the playbook. - type: str - physicalthreads: - description: - - Specifies the number of threads across which to - subdivide parts of the VDO processing based on physical - block addresses. Valid values are integer values from - 1 to 16 (lower numbers are preferable due to overhead). - The physical space used by the VDO volume must be - larger than (slabsize * physicalthreads). The default - is 1. Existing volumes will maintain their previously - configured setting unless a different value is specified - in the playbook. - type: str - force: - description: - - When creating a volume, ignores any existing file system - or VDO signature already present in the storage device. - When stopping or removing a VDO volume, first unmounts - the file system stored on the device if mounted. - - "B(Warning:) Since this parameter removes all safety - checks it is important to make sure that all parameters - provided are accurate and intentional." - type: bool - default: false - version_added: 2.4.0 + name: + description: + - The name of the VDO volume. + type: str + required: true + state: + description: + - Whether this VDO volume should be V(present) or V(absent). If a V(present) VDO volume does not exist, it will be created. If a V(present) + VDO volume already exists, it will be modified, by updating the configuration, which will take effect when the VDO volume is restarted. + Not all parameters of an existing VDO volume can be modified; the C(statusparamkeys) list in the code contains the parameters that can + be modified after creation. If an V(absent) VDO volume does not exist, it will not be removed. + type: str + choices: [absent, present] + default: present + activated: + description: + - The C(activate) status for a VDO volume. If this is set to V(false), the VDO volume cannot be started, and it will not start on system + startup. However, on initial creation, a VDO volume with "activated" set to "off" will be running, until stopped. This is the default + behavior of the C(vdo create) command; it provides the user an opportunity to write a base amount of metadata (filesystem, LVM headers, + etc.) to the VDO volume prior to stopping the volume, and leaving it deactivated until ready to use. + type: bool + running: + description: + - Whether this VDO volume is running. + - A VDO volume must be activated in order to be started. + type: bool + device: + description: + - The full path of the device to use for VDO storage. + - This is required if O(state=present). + type: str + logicalsize: + description: + - The logical size of the VDO volume (in megabytes, or LVM suffix format). If not specified for a new volume, this defaults to the same + size as the underlying storage device, which is specified in the O(device) parameter. Existing volumes will maintain their size if the + logicalsize parameter is not specified, or is smaller than or identical to the current size. If the specified size is larger than the + current size, a C(growlogical) operation will be performed. + type: str + deduplication: + description: + - Configures whether deduplication is enabled. The default for a created volume is V(enabled). Existing volumes will maintain their previously + configured setting unless a different value is specified in the playbook. + type: str + choices: [disabled, enabled] + compression: + description: + - Configures whether compression is enabled. The default for a created volume is V(enabled). Existing volumes will maintain their previously + configured setting unless a different value is specified in the playbook. + type: str + choices: [disabled, enabled] + blockmapcachesize: + description: + - The amount of memory allocated for caching block map pages, in megabytes (or may be issued with an LVM-style suffix of K, M, G, or T). + The default (and minimum) value is V(128M). The value specifies the size of the cache; there is a 15% memory usage overhead. Each 1.25G + of block map covers 1T of logical blocks, therefore a small amount of block map cache memory can cache a significantly large amount of + block map data. + - Existing volumes will maintain their previously configured setting unless a different value is specified in the playbook. + type: str + readcache: + description: + - Enables or disables the read cache. The default is V(disabled). Choosing V(enabled) enables a read cache which may improve performance + for workloads of high deduplication, read workloads with a high level of compression, or on hard disk storage. Existing volumes will maintain + their previously configured setting unless a different value is specified in the playbook. + - The read cache feature is available in VDO 6.1 and older. + type: str + choices: [disabled, enabled] + readcachesize: + description: + - Specifies the extra VDO device read cache size in megabytes. This is in addition to a system-defined minimum. Using a value with a suffix + of K, M, G, or T is optional. The default value is V(0). 1.125 MB of memory per bio thread will be used per 1 MB of read cache specified + (for example, a VDO volume configured with 4 bio threads will have a read cache memory usage overhead of 4.5 MB per 1 MB of read cache + specified). Existing volumes will maintain their previously configured setting unless a different value is specified in the playbook. + - The read cache feature is available in VDO 6.1 and older. + type: str + emulate512: + description: + - Enables 512-byte emulation mode, allowing drivers or filesystems to access the VDO volume at 512-byte granularity, instead of the default + 4096-byte granularity. + - Only recommended when a driver or filesystem requires 512-byte sector level access to a device. + - This option is only available when creating a new volume, and cannot be changed for an existing volume. + type: bool + default: false + growphysical: + description: + - Specifies whether to attempt to execute a C(growphysical) operation, if there is enough unused space on the device. A C(growphysical) + operation will be executed if there is at least 64 GB of free space, relative to the previous physical size of the affected VDO volume. + type: bool + default: false + slabsize: + description: + - The size of the increment by which the physical size of a VDO volume is grown, in megabytes (or may be issued with an LVM-style suffix + of K, M, G, or T). Must be a power of two between 128M and 32G. The default is V(2G), which supports volumes having a physical size up + to 16T. The maximum, V(32G), supports a physical size of up to 256T. This option is only available when creating a new volume, and cannot + be changed for an existing volume. + type: str + writepolicy: + description: + - Specifies the write policy of the VDO volume. + - The V(sync) mode acknowledges writes only after data is on stable storage. + - The V(async) mode acknowledges writes when data has been cached for writing to stable storage. + - The default (and highly recommended) V(auto) mode checks the storage device to determine whether it supports flushes. Devices that support + flushes will result in a VDO volume in V(async) mode, while devices that do not support flushes will run in V(sync) mode. + - Existing volumes will maintain their previously configured setting unless a different value is specified in the playbook. + type: str + choices: [async, auto, sync] + indexmem: + description: + - Specifies the amount of index memory in gigabytes. The default is V(0.25). The special decimal values V(0.25), V(0.5), and V(0.75) can + be used, as can any positive integer. This option is only available when creating a new volume, and cannot be changed for an existing + volume. + type: str + indexmode: + description: + - Specifies the index mode of the Albireo index. + - The default is V(dense), which has a deduplication window of 1 GB of index memory per 1 TB of incoming data, requiring 10 GB of index + data on persistent storage. + - The V(sparse) mode has a deduplication window of 1 GB of index memory per 10 TB of incoming data, but requires 100 GB of index data on + persistent storage. + - This option is only available when creating a new volume, and cannot be changed for an existing volume. + type: str + choices: [dense, sparse] + ackthreads: + description: + - Specifies the number of threads to use for acknowledging completion of requested VDO I/O operations. Valid values are integer values from + V(1) to V(100) (lower numbers are preferable due to overhead). The default is V(1). Existing volumes will maintain their previously configured + setting unless a different value is specified in the playbook. + type: str + biothreads: + description: + - Specifies the number of threads to use for submitting I/O operations to the storage device. Valid values are integer values from V(1) + to V(100) (lower numbers are preferable due to overhead). The default is V(4). Existing volumes will maintain their previously configured + setting unless a different value is specified in the playbook. + type: str + cputhreads: + description: + - Specifies the number of threads to use for CPU-intensive work such as hashing or compression. Valid values are integer values from V(1) + to V(100) (lower numbers are preferable due to overhead). The default is V(2). Existing volumes will maintain their previously configured + setting unless a different value is specified in the playbook. + type: str + logicalthreads: + description: + - Specifies the number of threads across which to subdivide parts of the VDO processing based on logical block addresses. Valid values are + integer values from V(1) to V(100) (lower numbers are preferable due to overhead). The default is V(1). Existing volumes will maintain + their previously configured setting unless a different value is specified in the playbook. + type: str + physicalthreads: + description: + - Specifies the number of threads across which to subdivide parts of the VDO processing based on physical block addresses. Valid values + are integer values from V(1) to V(16) (lower numbers are preferable due to overhead). The physical space used by the VDO volume must be + larger than (O(slabsize) * O(physicalthreads)). The default is V(1). Existing volumes will maintain their previously configured setting + unless a different value is specified in the playbook. + type: str + force: + description: + - When creating a volume, ignores any existing file system or VDO signature already present in the storage device. When stopping or removing + a VDO volume, first unmounts the file system stored on the device if mounted. + - B(Warning:) Since this parameter removes all safety checks it is important to make sure that all parameters provided are accurate and + intentional. + type: bool + default: false + version_added: 2.4.0 notes: - In general, the default thread configuration should be used. requirements: - PyYAML - kmod-kvdo - vdo -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create 2 TB VDO volume vdo1 on device /dev/md0 community.general.vdo: name: vdo1 @@ -301,9 +212,9 @@ EXAMPLES = r''' community.general.vdo: name: vdo1 state: absent -''' +""" -RETURN = r'''# ''' +RETURN = r"""# """ from ansible.module_utils.basic import AnsibleModule, missing_required_lib import re diff --git a/plugins/modules/vertica_configuration.py b/plugins/modules/vertica_configuration.py index 09b80df3d7..9ce2e42d15 100644 --- a/plugins/modules/vertica_configuration.py +++ b/plugins/modules/vertica_configuration.py @@ -8,14 +8,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: vertica_configuration short_description: Updates Vertica configuration parameters description: - - Updates Vertica configuration parameters. + - Updates Vertica configuration parameters. extends_documentation_fragment: - - community.general.attributes + - community.general.attributes attributes: check_mode: support: full @@ -24,51 +23,49 @@ attributes: options: parameter: description: - - Name of the parameter to update. + - Name of the parameter to update. required: true aliases: [name] type: str value: description: - - Value of the parameter to be set. + - Value of the parameter to be set. type: str db: description: - - Name of the Vertica database. + - Name of the Vertica database. type: str cluster: description: - - Name of the Vertica cluster. + - Name of the Vertica cluster. default: localhost type: str port: description: - - Vertica cluster port to connect to. + - Vertica cluster port to connect to. default: '5433' type: str login_user: description: - - The username used to authenticate with. + - The username used to authenticate with. default: dbadmin type: str login_password: description: - - The password used to authenticate with. + - The password used to authenticate with. type: str notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] + - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly + configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either + C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to + be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Updating load_balance_policy community.general.vertica_configuration: name=failovertostandbyafter value='8 hours' """ diff --git a/plugins/modules/vertica_info.py b/plugins/modules/vertica_info.py index 93ccc68445..bfb99552a0 100644 --- a/plugins/modules/vertica_info.py +++ b/plugins/modules/vertica_info.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: vertica_info short_description: Gathers Vertica database facts description: @@ -25,8 +24,7 @@ options: default: localhost type: str port: - description: - Database port to connect to. + description: Database port to connect to. default: '5433' type: str db: @@ -43,19 +41,17 @@ options: - The password used to authenticate with. type: str notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) are installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] + - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) are installed on the host and properly + configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either + C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to + be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Gathering vertica facts community.general.vertica_info: db=db_name register: result diff --git a/plugins/modules/vertica_role.py b/plugins/modules/vertica_role.py index a1ef40c7a5..c3e15b4b95 100644 --- a/plugins/modules/vertica_role.py +++ b/plugins/modules/vertica_role.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: vertica_role short_description: Adds or removes Vertica database roles and assigns roles to them description: @@ -64,19 +63,17 @@ options: - The password used to authenticate with. type: str notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] + - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly + configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either + C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to + be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Creating a new vertica role community.general.vertica_role: name=role_name db=db_name state=present diff --git a/plugins/modules/vertica_schema.py b/plugins/modules/vertica_schema.py index 95e434ef3a..b9e243ec7b 100644 --- a/plugins/modules/vertica_schema.py +++ b/plugins/modules/vertica_schema.py @@ -9,17 +9,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: vertica_schema short_description: Adds or removes Vertica database schema and roles description: - - Adds or removes Vertica database schema and, optionally, roles - with schema access privileges. + - Adds or removes Vertica database schema and, optionally, roles with schema access privileges. - A schema will not be removed until all the objects have been dropped. - - In such a situation, if the module tries to remove the schema it - will fail and only remove roles created for the schema if they have - no dependencies. + - In such a situation, if the module tries to remove the schema it will fail and only remove roles created for the schema if they have no dependencies. extends_documentation_fragment: - community.general.attributes attributes: @@ -78,19 +74,17 @@ options: - The password used to authenticate with. type: str notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] + - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly + configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either + C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to + be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Creating a new vertica schema community.general.vertica_schema: name=schema_name db=db_name state=present @@ -98,12 +92,7 @@ EXAMPLES = """ community.general.vertica_schema: name=schema_name owner=dbowner db=db_name state=present - name: Creating a new schema with roles - community.general.vertica_schema: - name=schema_name - create_roles=schema_name_all - usage_roles=schema_name_ro,schema_name_rw - db=db_name - state=present + community.general.vertica_schema: name=schema_name create_roles=schema_name_all usage_roles=schema_name_ro,schema_name_rw db=db_name state=present """ import traceback diff --git a/plugins/modules/vertica_user.py b/plugins/modules/vertica_user.py index 7a62bec44c..c73e0d54fd 100644 --- a/plugins/modules/vertica_user.py +++ b/plugins/modules/vertica_user.py @@ -8,15 +8,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: vertica_user short_description: Adds or removes Vertica database users and assigns roles description: - Adds or removes Vertica database user and, optionally, assigns roles. - A user will not be removed until all the dependencies have been dropped. - - In such a situation, if the module tries to remove the user it - will fail and only remove roles granted to the user. + - In such a situation, if the module tries to remove the user it will fail and only remove roles granted to the user. extends_documentation_fragment: - community.general.attributes attributes: @@ -42,9 +40,8 @@ options: password: description: - The user's password encrypted by the MD5 algorithm. - - The password must be generated with the format C("md5" + md5[password + username]), - resulting in a total of 35 characters. An easy way to do this is by querying - the Vertica database with select V('md5'||md5(''\)). + - The password must be generated with the format C("md5" + md5[password + username]), resulting in a total of 35 characters. An easy way + to do this is by querying the Vertica database with select V('md5'||md5(''\)). type: str expired: description: @@ -90,29 +87,22 @@ options: - The password used to authenticate with. type: str notes: - - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). -requirements: [ 'unixODBC', 'pyodbc' ] + - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly + configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either + C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to + be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Creating a new vertica user with password community.general.vertica_user: name=user_name password=md5 db=db_name state=present - name: Creating a new vertica user authenticated via ldap with roles assigned - community.general.vertica_user: - name=user_name - ldap=true - db=db_name - roles=schema_name_ro - state=present + community.general.vertica_user: name=user_name ldap=true db=db_name roles=schema_name_ro state=present """ import traceback diff --git a/plugins/modules/vexata_eg.py b/plugins/modules/vexata_eg.py index 457d1fa9ed..f7184d68b0 100644 --- a/plugins/modules/vexata_eg.py +++ b/plugins/modules/vexata_eg.py @@ -9,15 +9,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: vexata_eg short_description: Manage export groups on Vexata VX100 storage arrays description: - - Create or delete export groups on a Vexata VX100 array. - - An export group is a tuple of a volume group, initiator group and port - group that allows a set of volumes to be exposed to one or more hosts - through specific array ports. + - Create or delete export groups on a Vexata VX100 array. + - An export group is a tuple of a volume group, initiator group and port group that allows a set of volumes to be exposed to one or more hosts + through specific array ports. author: - Sandeep Kasargod (@vexata) attributes: @@ -33,29 +31,28 @@ options: type: str state: description: - - Creates export group when present or delete when absent. + - Creates export group when present or delete when absent. default: present - choices: [ present, absent ] + choices: [present, absent] type: str vg: description: - - Volume group name. + - Volume group name. type: str ig: description: - - Initiator group name. + - Initiator group name. type: str pg: description: - - Port group name. + - Port group name. type: str extends_documentation_fragment: -- community.general.vexata.vx100 -- community.general.attributes + - community.general.vexata.vx100 + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Create export group named db_export. community.general.vexata_eg: name: db_export @@ -74,10 +71,10 @@ EXAMPLES = r''' array: vx100_ultra.test.com user: admin password: secret -''' +""" -RETURN = r''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.vexata import ( diff --git a/plugins/modules/vexata_volume.py b/plugins/modules/vexata_volume.py index 7fdfc7e5fa..29136eb31e 100644 --- a/plugins/modules/vexata_volume.py +++ b/plugins/modules/vexata_volume.py @@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: vexata_volume short_description: Manage volumes on Vexata VX100 storage arrays description: - - Create, deletes or extend volumes on a Vexata VX100 array. + - Create, deletes or extend volumes on a Vexata VX100 array. author: -- Sandeep Kasargod (@vexata) + - Sandeep Kasargod (@vexata) attributes: check_mode: support: full @@ -32,19 +31,18 @@ options: description: - Creates/Modifies volume when present or removes when absent. default: present - choices: [ present, absent ] + choices: [present, absent] type: str size: description: - Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes. type: str extends_documentation_fragment: -- community.general.vexata.vx100 -- community.general.attributes + - community.general.vexata.vx100 + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Create new 2 TiB volume named foo community.general.vexata_volume: name: foo @@ -70,10 +68,10 @@ EXAMPLES = r''' array: vx100_ultra.test.com user: admin password: secret -''' +""" -RETURN = r''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.vexata import ( diff --git a/plugins/modules/vmadm.py b/plugins/modules/vmadm.py index bfe6148375..148ca18b86 100644 --- a/plugins/modules/vmadm.py +++ b/plugins/modules/vmadm.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: vmadm short_description: Manage SmartOS virtual machines and zones description: @@ -27,8 +26,7 @@ options: archive_on_delete: required: false description: - - When enabled, the zone dataset will be mounted on C(/zones/archive) - upon removal. + - When enabled, the zone dataset will be mounted on C(/zones/archive) upon removal. type: bool autoboot: required: false @@ -36,7 +34,7 @@ options: - Whether or not a VM is booted when the system is rebooted. type: bool brand: - choices: [ joyent, joyent-minimal, lx, kvm, bhyve ] + choices: [joyent, joyent-minimal, lx, kvm, bhyve] default: joyent description: - Type of virtual machine. The V(bhyve) option was added in community.general 0.2.0. @@ -49,18 +47,16 @@ options: cpu_cap: required: false description: - - Sets a limit on the amount of CPU time that can be used by a VM. - Use V(0) for no cap. + - Sets a limit on the amount of CPU time that can be used by a VM. Use V(0) for no cap. type: int cpu_shares: required: false description: - - Sets a limit on the number of fair share scheduler (FSS) CPU shares for - a VM. This limit is relative to all other VMs on the system. + - Sets a limit on the number of fair share scheduler (FSS) CPU shares for a VM. This limit is relative to all other VMs on the system. type: int cpu_type: required: false - choices: [ qemu64, host ] + choices: [qemu64, host] default: qemu64 description: - Control the type of virtual CPU exposed to KVM VMs. @@ -68,8 +64,7 @@ options: customer_metadata: required: false description: - - Metadata to be set and associated with this VM, this contain customer - modifiable keys. + - Metadata to be set and associated with this VM, this contain customer modifiable keys. type: dict delegate_dataset: required: false @@ -141,14 +136,12 @@ options: internal_metadata: required: false description: - - Metadata to be set and associated with this VM, this contains operator - generated keys. + - Metadata to be set and associated with this VM, this contains operator generated keys. type: dict internal_metadata_namespace: required: false description: - - List of namespaces to be set as C(internal_metadata-only); these namespaces - will come from O(internal_metadata) rather than O(customer_metadata). + - List of namespaces to be set as C(internal_metadata-only); these namespaces will come from O(internal_metadata) rather than O(customer_metadata). type: str kernel_version: required: false @@ -163,8 +156,7 @@ options: maintain_resolvers: required: false description: - - Resolvers in C(/etc/resolv.conf) will be updated when updating - the O(resolvers) property. + - Resolvers in C(/etc/resolv.conf) will be updated when updating the O(resolvers) property. type: bool max_locked_memory: required: false @@ -189,12 +181,11 @@ options: mdata_exec_timeout: required: false description: - - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service - that runs user-scripts in the zone. + - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service that runs user-scripts in the zone. type: int name: required: false - aliases: [ alias ] + aliases: [alias] description: - Name of the VM. vmadm(1M) uses this as an optional name. type: str @@ -212,14 +203,12 @@ options: nowait: required: false description: - - Consider the provisioning complete when the VM first starts, rather than - when the VM has rebooted. + - Consider the provisioning complete when the VM first starts, rather than when the VM has rebooted. type: bool qemu_opts: required: false description: - - Additional qemu arguments for KVM guests. This overwrites the default arguments - provided by vmadm(1M) and should only be used for debugging. + - Additional qemu arguments for KVM guests. This overwrites the default arguments provided by vmadm(1M) and should only be used for debugging. type: str qemu_extra_opts: required: false @@ -245,8 +234,7 @@ options: routes: required: false description: - - Dictionary that maps destinations to gateways, these will be set as static - routes in the VM. + - Dictionary that maps destinations to gateways, these will be set as static routes in the VM. type: dict spice_opts: required: false @@ -256,19 +244,15 @@ options: spice_password: required: false description: - - Password required to connect to SPICE. By default no password is set. - Please note this can be read from the Global Zone. + - Password required to connect to SPICE. By default no password is set. Please note this can be read from the Global Zone. type: str state: - choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ] + choices: [present, running, absent, deleted, stopped, created, restarted, rebooted] default: running description: - - States for the VM to be in. Please note that V(present), V(stopped) and V(restarted) - operate on a VM that is currently provisioned. V(present) means that the VM will be - created if it was absent, and that it will be in a running state. V(absent) will - shutdown the zone before removing it. - V(stopped) means the zone will be created if it does not exist already, before shutting - it down. + - States for the VM to be in. Please note that V(present), V(stopped) and V(restarted) operate on a VM that is currently provisioned. V(present) + means that the VM will be created if it was absent, and that it will be in a running state. V(absent) will shutdown the zone before removing + it. V(stopped) means the zone will be created if it does not exist already, before shutting it down. type: str tmpfs: required: false @@ -303,20 +287,17 @@ options: vnc_password: required: false description: - - Password required to connect to VNC. By default no password is set. - Please note this can be read from the Global Zone. + - Password required to connect to VNC. By default no password is set. Please note this can be read from the Global Zone. type: str vnc_port: required: false description: - - TCP port to listen of the VNC server. Or set V(0) for random, - or V(-1) to disable. + - TCP port to listen of the VNC server. Or set V(0) for random, or V(-1) to disable. type: int zfs_data_compression: required: false description: - - Specifies compression algorithm used for this VMs data dataset. This option - only has effect on delegated datasets. + - Specifies compression algorithm used for this VMs data dataset. This option only has effect on delegated datasets. type: str zfs_data_recsize: required: false @@ -336,8 +317,7 @@ options: zfs_root_compression: required: false description: - - Specifies compression algorithm used for this VMs root dataset. This option - only has effect on the zoneroot dataset. + - Specifies compression algorithm used for this VMs root dataset. This option only has effect on the zoneroot dataset. type: str zfs_root_recsize: required: false @@ -354,9 +334,9 @@ options: description: - ZFS pool the VM's zone dataset will be created in. type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create SmartOS zone community.general.vmadm: brand: joyent @@ -382,9 +362,9 @@ EXAMPLES = ''' community.general.vmadm: uuid: '*' state: stopped -''' +""" -RETURN = ''' +RETURN = r""" uuid: description: UUID of the managed VM. returned: always @@ -400,7 +380,7 @@ state: returned: success type: str sample: 'running' -''' +""" import json import os @@ -558,9 +538,11 @@ def create_payload(module, uuid): # Filter out the few options that are not valid VM properties. module_options = ['force', 'state'] - # @TODO make this a simple {} comprehension as soon as py2 is ditched - # @TODO {k: v for k, v in p.items() if k not in module_options} - vmdef = dict([(k, v) for k, v in module.params.items() if k not in module_options and v]) + vmdef = { + k: v + for k, v in module.params.items() + if k not in module_options and v + } try: vmdef_json = json.dumps(vmdef) diff --git a/plugins/modules/wakeonlan.py b/plugins/modules/wakeonlan.py index 6d7e094527..235be741a7 100644 --- a/plugins/modules/wakeonlan.py +++ b/plugins/modules/wakeonlan.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: wakeonlan short_description: Send a magic Wake-on-LAN (WoL) broadcast packet description: @@ -25,17 +24,17 @@ attributes: options: mac: description: - - MAC address to send Wake-on-LAN broadcast packet for. + - MAC address to send Wake-on-LAN broadcast packet for. required: true type: str broadcast: description: - - Network broadcast address to use for broadcasting magic Wake-on-LAN packet. + - Network broadcast address to use for broadcasting magic Wake-on-LAN packet. default: 255.255.255.255 type: str port: description: - - UDP port to use for magic Wake-on-LAN packet. + - UDP port to use for magic Wake-on-LAN packet. default: 7 type: int todo: @@ -43,16 +42,16 @@ todo: - Enable check-mode support (when we have arping support) - Does not have SecureOn password support notes: - - This module sends a magic packet, without knowing whether it worked - - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS) + - This module sends a magic packet, without knowing whether it worked. + - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS). - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first). seealso: -- module: community.windows.win_wakeonlan + - module: community.windows.win_wakeonlan author: -- Dag Wieers (@dagwieers) -''' + - Dag Wieers (@dagwieers) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66 community.general.wakeonlan: mac: '00:00:5E:00:53:66' @@ -63,11 +62,11 @@ EXAMPLES = r''' mac: 00:00:5E:00:53:66 port: 9 delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" # Default return values -''' +""" import socket import struct import traceback diff --git a/plugins/modules/wdc_redfish_command.py b/plugins/modules/wdc_redfish_command.py index 93c4811afe..680bd4b3f9 100644 --- a/plugins/modules/wdc_redfish_command.py +++ b/plugins/modules/wdc_redfish_command.py @@ -8,14 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: wdc_redfish_command short_description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs version_added: 5.4.0 description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. - Manages OOB controller firmware. For example, Firmware Activate, Update and Activate. extends_documentation_fragment: - community.general.attributes @@ -38,11 +36,11 @@ options: elements: str baseuri: description: - - Base URI of OOB controller. Must include this or O(ioms). + - Base URI of OOB controller. Must include this or O(ioms). type: str ioms: description: - - List of IOM FQDNs for the enclosure. Must include this or O(baseuri). + - List of IOM FQDNs for the enclosure. Must include this or O(baseuri). type: list elements: str username: @@ -90,14 +88,12 @@ options: - The password for retrieving the update image. type: str notes: - - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. - - ioms is a list of FQDNs for the enclosure's IOMs. - - + - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. + - Ioms is a list of FQDNs for the enclosure's IOMs. author: Mike Moerk (@mikemoerk) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Firmware Activate (required after SimpleUpdate to apply the new firmware) community.general.wdc_redfish_command: category: Update @@ -188,16 +184,15 @@ EXAMPLES = ''' category: Chassis resource_id: Enclosure command: PowerModeNormal +""" -''' - -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" from ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils import WdcRedfishUtils from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/wdc_redfish_info.py b/plugins/modules/wdc_redfish_info.py index 03ae67fcfe..caaa9c7fd9 100644 --- a/plugins/modules/wdc_redfish_info.py +++ b/plugins/modules/wdc_redfish_info.py @@ -8,14 +8,12 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: wdc_redfish_info short_description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs version_added: 5.4.0 description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - get information back. + - Builds Redfish URIs locally and sends them to remote OOB controllers to get information back. extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module @@ -33,11 +31,11 @@ options: elements: str baseuri: description: - - Base URI of OOB controller. Must include this or O(ioms). + - Base URI of OOB controller. Must include this or O(ioms). type: str ioms: description: - - List of IOM FQDNs for the enclosure. Must include this or O(baseuri). + - List of IOM FQDNs for the enclosure. Must include this or O(baseuri). type: list elements: str username: @@ -59,13 +57,12 @@ options: type: int notes: - - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. - - ioms is a list of FQDNs for the enclosure's IOMs. - + - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. + - Ioms is a list of FQDNs for the enclosure's IOMs. author: Mike Moerk (@mikemoerk) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get Simple Update Status with individual IOMs specified community.general.wdc_redfish_info: category: Update @@ -93,30 +90,30 @@ EXAMPLES = ''' - name: Print fetched information ansible.builtin.debug: msg: "{{ result.redfish_facts.simple_update_status.entries | to_nice_json }}" -''' +""" -RETURN = ''' +RETURN = r""" Description: - description: Firmware update status description. - returned: always - type: str - sample: Ready for FW update + description: Firmware update status description. + returned: always + type: str + sample: Ready for FW update ErrorCode: - description: Numeric error code for firmware update status. Non-zero indicates an error condition. - returned: always - type: int - sample: 0 + description: Numeric error code for firmware update status. Non-zero indicates an error condition. + returned: always + type: int + sample: 0 EstimatedRemainingMinutes: - description: Estimated number of minutes remaining in firmware update operation. - returned: always - type: int - sample: 20 + description: Estimated number of minutes remaining in firmware update operation. + returned: always + type: int + sample: 20 StatusCode: - description: Firmware update status code. - returned: always - type: int - sample: 2 -''' + description: Firmware update status code. + returned: always + type: int + sample: 2 +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/xfconf.py b/plugins/modules/xfconf.py index 15943ae59d..8bb0abc273 100644 --- a/plugins/modules/xfconf.py +++ b/plugins/modules/xfconf.py @@ -8,26 +8,26 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ +--- module: xfconf author: - - "Joseph Benden (@jbenden)" - - "Alexei Znamensky (@russoz)" +- "Joseph Benden (@jbenden)" +- "Alexei Znamensky (@russoz)" short_description: Edit XFCE4 Configurations description: - - This module allows for the manipulation of Xfce 4 Configuration with the help of - xfconf-query. Please see the xfconf-query(1) man page for more details. +- This module allows for the manipulation of Xfce 4 Configuration with the help of C(xfconf-query). seealso: - - name: xfconf-query(1) man page - description: Manual page of the C(xfconf-query) tool at the XFCE documentation site. - link: 'https://docs.xfce.org/xfce/xfconf/xfconf-query' +- name: xfconf-query(1) man page + description: Manual page of the C(xfconf-query) tool at the XFCE documentation site. + link: 'https://docs.xfce.org/xfce/xfconf/xfconf-query' - - name: xfconf - Configuration Storage System - description: XFCE documentation for the Xfconf configuration system. - link: 'https://docs.xfce.org/xfce/xfconf/start' +- name: xfconf - Configuration Storage System + description: XFCE documentation for the Xfconf configuration system. + link: 'https://docs.xfce.org/xfce/xfconf/start' extends_documentation_fragment: - - community.general.attributes +- community.general.attributes attributes: check_mode: @@ -38,55 +38,50 @@ attributes: options: channel: description: - - A Xfconf preference channel is a top-level tree key, inside of the - Xfconf repository that corresponds to the location for which all - application properties/keys are stored. See man xfconf-query(1). + - A Xfconf preference channel is a top-level tree key, inside of the Xfconf repository that corresponds to the location for which all application + properties/keys are stored. See man xfconf-query(1). required: true type: str property: description: - - A Xfce preference key is an element in the Xfconf repository - that corresponds to an application preference. See man xfconf-query(1). + - A Xfce preference key is an element in the Xfconf repository that corresponds to an application preference. See man xfconf-query(1). required: true type: str value: description: - - Preference properties typically have simple values such as strings, - integers, or lists of strings and integers. See man xfconf-query(1). + - Preference properties typically have simple values such as strings, integers, or lists of strings and integers. See man xfconf-query(1). type: list elements: raw value_type: description: - - The type of value being set. - - When providing more than one O(value_type), the length of the list must - be equal to the length of O(value). - - If only one O(value_type) is provided, but O(value) contains more than - on element, that O(value_type) will be applied to all elements of O(value). - - If the O(property) being set is an array and it can possibly have only one - element in the array, then O(force_array=true) must be used to ensure - that C(xfconf-query) will interpret the value as an array rather than a - scalar. - - Support for V(uchar), V(char), V(uint64), and V(int64) has been added in community.general 4.8.0. + - The type of value being set. + - When providing more than one O(value_type), the length of the list must be equal to the length of O(value). + - If only one O(value_type) is provided, but O(value) contains more than on element, that O(value_type) will be applied to all elements of + O(value). + - If the O(property) being set is an array and it can possibly have only one element in the array, then O(force_array=true) must be used to + ensure that C(xfconf-query) will interpret the value as an array rather than a scalar. + - Support for V(uchar), V(char), V(uint64), and V(int64) has been added in community.general 4.8.0. type: list elements: str - choices: [ string, int, double, bool, uint, uchar, char, uint64, int64, float ] + choices: [string, int, double, bool, uint, uchar, char, uint64, int64, float] state: type: str description: - - The action to take upon the property/value. - - The state V(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead. - choices: [ present, absent ] + - The action to take upon the property/value. + - The state V(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead. + choices: [present, absent] default: "present" force_array: description: - - Force array even if only one element. + - Force array even if only one element. type: bool default: false aliases: ['array'] version_added: 1.0.0 -''' +""" EXAMPLES = """ +--- - name: Change the DPI to "192" xfconf: channel: "xsettings" @@ -110,60 +105,58 @@ EXAMPLES = """ force_array: true """ -RETURN = ''' - channel: - description: The channel specified in the module parameters - returned: success - type: str - sample: "xsettings" - property: - description: The property specified in the module parameters - returned: success - type: str - sample: "/Xft/DPI" - value_type: - description: - - The type of the value that was changed (V(none) for O(state=reset)). - Either a single string value or a list of strings for array types. - - This is a string or a list of strings. - returned: success - type: any - sample: '"int" or ["str", "str", "str"]' - value: - description: - - The value of the preference key after executing the module. Either a - single string value or a list of strings for array types. - - This is a string or a list of strings. - returned: success - type: any - sample: '"192" or ["orange", "yellow", "violet"]' - previous_value: - description: - - The value of the preference key before executing the module. - Either a single string value or a list of strings for array types. - - This is a string or a list of strings. - returned: success - type: any - sample: '"96" or ["red", "blue", "green"]' - cmd: - description: - - A list with the resulting C(xfconf-query) command executed by the module. - returned: success - type: list - elements: str - version_added: 5.4.0 - sample: - - /usr/bin/xfconf-query - - --channel - - xfce4-panel - - --property - - /plugins/plugin-19/timezone - - --create - - --type - - string - - --set - - Pacific/Auckland -''' +RETURN = """ +--- +channel: + description: The channel specified in the module parameters + returned: success + type: str + sample: "xsettings" +property: + description: The property specified in the module parameters + returned: success + type: str + sample: "/Xft/DPI" +value_type: + description: + - The type of the value that was changed (V(none) for O(state=reset)). Either a single string value or a list of strings for array types. + - This is a string or a list of strings. + returned: success + type: any + sample: '"int" or ["str", "str", "str"]' +value: + description: + - The value of the preference key after executing the module. Either a single string value or a list of strings for array types. + - This is a string or a list of strings. + returned: success + type: any + sample: '"192" or ["orange", "yellow", "violet"]' +previous_value: + description: + - The value of the preference key before executing the module. Either a single string value or a list of strings for array types. + - This is a string or a list of strings. + returned: success + type: any + sample: '"96" or ["red", "blue", "green"]' +cmd: + description: + - A list with the resulting C(xfconf-query) command executed by the module. + returned: success + type: list + elements: str + version_added: 5.4.0 + sample: + - /usr/bin/xfconf-query + - --channel + - xfce4-panel + - --property + - /plugins/plugin-19/timezone + - --create + - --type + - string + - --set + - Pacific/Auckland +""" from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner diff --git a/plugins/modules/xfconf_info.py b/plugins/modules/xfconf_info.py index 3d56a70cb9..aba0d912ff 100644 --- a/plugins/modules/xfconf_info.py +++ b/plugins/modules/xfconf_info.py @@ -7,17 +7,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ +--- module: xfconf_info author: - - "Alexei Znamensky (@russoz)" +- "Alexei Znamensky (@russoz)" short_description: Retrieve XFCE4 configurations version_added: 3.5.0 description: - - This module allows retrieving Xfce 4 configurations with the help of C(xfconf-query). +- This module allows retrieving Xfce 4 configurations with the help of C(xfconf-query). extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.info_module +- community.general.attributes +- community.general.attributes.info_module attributes: check_mode: version_added: 3.3.0 @@ -40,10 +41,11 @@ options: - If not provided and a O(channel) is provided, then the module will list all available properties in that O(channel). type: str notes: - - See man xfconf-query(1) for more details. -''' +- See man xfconf-query(1) for more details. +""" EXAMPLES = """ +--- - name: Get list of all available channels community.general.xfconf_info: {} register: result @@ -66,63 +68,64 @@ EXAMPLES = """ register: result """ -RETURN = ''' - channels: - description: - - List of available channels. - - Returned when the module receives no parameter at all. - returned: success - type: list - elements: str - sample: - - xfce4-desktop - - displays - - xsettings - - xfwm4 - properties: - description: - - List of available properties for a specific channel. - - Returned by passing only the O(channel) parameter to the module. - returned: success - type: list - elements: str - sample: - - /Gdk/WindowScalingFactor - - /Gtk/ButtonImages - - /Gtk/CursorThemeSize - - /Gtk/DecorationLayout - - /Gtk/FontName - - /Gtk/MenuImages - - /Gtk/MonospaceFontName - - /Net/DoubleClickTime - - /Net/IconThemeName - - /Net/ThemeName - - /Xft/Antialias - - /Xft/Hinting - - /Xft/HintStyle - - /Xft/RGBA - is_array: - description: - - Flag indicating whether the property is an array or not. - returned: success - type: bool - value: - description: - - The value of the property. Empty if the property is of array type. - returned: success - type: str - sample: Monospace 10 - value_array: - description: - - The array value of the property. Empty if the property is not of array type. - returned: success - type: list - elements: str - sample: - - Main - - Work - - Tmp -''' +RETURN = """ +--- +channels: + description: + - List of available channels. + - Returned when the module receives no parameter at all. + returned: success + type: list + elements: str + sample: + - xfce4-desktop + - displays + - xsettings + - xfwm4 +properties: + description: + - List of available properties for a specific channel. + - Returned by passing only the O(channel) parameter to the module. + returned: success + type: list + elements: str + sample: + - /Gdk/WindowScalingFactor + - /Gtk/ButtonImages + - /Gtk/CursorThemeSize + - /Gtk/DecorationLayout + - /Gtk/FontName + - /Gtk/MenuImages + - /Gtk/MonospaceFontName + - /Net/DoubleClickTime + - /Net/IconThemeName + - /Net/ThemeName + - /Xft/Antialias + - /Xft/Hinting + - /Xft/HintStyle + - /Xft/RGBA +is_array: + description: + - Flag indicating whether the property is an array or not. + returned: success + type: bool +value: + description: + - The value of the property. Empty if the property is of array type. + returned: success + type: str + sample: Monospace 10 +value_array: + description: + - The array value of the property. Empty if the property is not of array type. + returned: success + type: list + elements: str + sample: + - Main + - Work + - Tmp +""" from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner diff --git a/plugins/modules/yum_versionlock.py b/plugins/modules/yum_versionlock.py index 0cbf9be393..4a618a9d17 100644 --- a/plugins/modules/yum_versionlock.py +++ b/plugins/modules/yum_versionlock.py @@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: yum_versionlock version_added: 2.0.0 short_description: Locks / unlocks a installed package(s) from being updated by yum package manager @@ -32,62 +31,62 @@ options: elements: str state: description: - - If state is V(present), package(s) will be added to yum versionlock list. - - If state is V(absent), package(s) will be removed from yum versionlock list. - choices: [ 'absent', 'present' ] + - If state is V(present), package(s) will be added to yum versionlock list. + - If state is V(absent), package(s) will be removed from yum versionlock list. + choices: ['absent', 'present'] type: str default: present notes: - - Requires yum-plugin-versionlock package on the remote node. + - Requires yum-plugin-versionlock package on the remote node. requirements: -- yum -- yum-versionlock + - yum + - yum-versionlock author: - - Florian Paul Azim Hoberg (@gyptazy) - - Amin Vakil (@aminvakil) -''' + - Florian Paul Azim Hoberg (@gyptazy) + - Amin Vakil (@aminvakil) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Prevent Apache / httpd from being updated community.general.yum_versionlock: state: present name: - - httpd + - httpd - name: Prevent Apache / httpd version 2.4.57-2 from being updated community.general.yum_versionlock: state: present name: - - httpd-0:2.4.57-2.el9 + - httpd-0:2.4.57-2.el9 - name: Prevent multiple packages from being updated community.general.yum_versionlock: state: present name: - - httpd - - nginx - - haproxy - - curl + - httpd + - nginx + - haproxy + - curl - name: Remove lock from Apache / httpd to be updated again community.general.yum_versionlock: state: absent name: httpd -''' +""" -RETURN = r''' +RETURN = r""" packages: - description: A list of package(s) in versionlock list. - returned: success - type: list - elements: str - sample: [ 'httpd' ] + description: A list of package(s) in versionlock list. + returned: success + type: list + elements: str + sample: ['httpd'] state: - description: State of package(s). - returned: success - type: str - sample: present -''' + description: State of package(s). + returned: success + type: str + sample: present +""" import re from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/zfs.py b/plugins/modules/zfs.py index f23cc4580d..1b00010d8a 100644 --- a/plugins/modules/zfs.py +++ b/plugins/modules/zfs.py @@ -9,23 +9,20 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: zfs -short_description: Manage zfs +short_description: Manage ZFS description: - - Manages ZFS file systems, volumes, clones and snapshots + - Manages ZFS file systems, volumes, clones and snapshots. extends_documentation_fragment: - community.general.attributes attributes: check_mode: support: partial details: - - In certain situations it may report a task as changed that will not be reported - as changed when C(check_mode) is disabled. - - For example, this might occur when the zpool C(altroot) option is set or when - a size is written using human-readable notation, such as V(1M) or V(1024K), - instead of as an unqualified byte count, such as V(1048576). + - In certain situations it may report a task as changed that will not be reported as changed when C(check_mode) is disabled. + - For example, this might occur when the zpool C(altroot) option is set or when a size is written using human-readable notation, such as + V(1M) or V(1024K), instead of as an unqualified byte count, such as V(1048576). diff_mode: support: full options: @@ -36,10 +33,9 @@ options: type: str state: description: - - Whether to create (V(present)), or remove (V(absent)) a - file system, snapshot or volume. All parents/children - will be created/destroyed as needed to reach the desired state. - choices: [ absent, present ] + - Whether to create (V(present)), or remove (V(absent)) a file system, snapshot or volume. All parents/children will be created/destroyed + as needed to reach the desired state. + choices: [absent, present] required: true type: str origin: @@ -53,10 +49,10 @@ options: type: dict default: {} author: -- Johan Wiren (@johanwiren) -''' + - Johan Wiren (@johanwiren) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new file system called myfs in pool rpool with the setuid property turned off community.general.zfs: name: rpool/myfs @@ -93,7 +89,7 @@ EXAMPLES = ''' community.general.zfs: name: rpool/myfs state: absent -''' +""" import os diff --git a/plugins/modules/zfs_delegate_admin.py b/plugins/modules/zfs_delegate_admin.py index 24f7422206..796cbd4595 100644 --- a/plugins/modules/zfs_delegate_admin.py +++ b/plugins/modules/zfs_delegate_admin.py @@ -8,18 +8,17 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: zfs_delegate_admin short_description: Manage ZFS delegated administration (user admin privileges) description: - - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS - operations normally restricted to the superuser. + - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS operations normally restricted + to the superuser. - See the C(zfs allow) section of V(zfs(1M\)) for detailed explanations of options. - This module attempts to adhere to the behavior of the command line tool as much as possible. requirements: - - "A ZFS/OpenZFS implementation that supports delegation with C(zfs allow), including: Solaris >= 10, illumos (all - versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0." + - "A ZFS/OpenZFS implementation that supports delegation with C(zfs allow), including: Solaris >= 10, illumos (all versions), FreeBSD >= 8.0R, + ZFS on Linux >= 0.7.0." extends_documentation_fragment: - community.general.attributes attributes: @@ -38,7 +37,7 @@ options: - Whether to allow (V(present)), or unallow (V(absent)) a permission. - When set to V(present), at least one "entity" param of O(users), O(groups), or O(everyone) are required. - When set to V(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified. - choices: [ absent, present ] + choices: [absent, present] default: present type: str users: @@ -59,8 +58,8 @@ options: permissions: description: - The list of permission(s) to delegate (required if O(state=present)). - - Supported permissions depend on the ZFS version in use. See for example - U(https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html) for OpenZFS. + - Supported permissions depend on the ZFS version in use. See for example U(https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html) + for OpenZFS. type: list elements: str local: @@ -77,10 +76,10 @@ options: type: bool default: false author: -- Nate Coraor (@natefoo) -''' + - Nate Coraor (@natefoo) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope community.general.zfs_delegate_admin: name: rpool/myfs @@ -106,12 +105,12 @@ EXAMPLES = r''' name: rpool/myfs everyone: true state: absent -''' +""" # This module does not return anything other than the standard # changed/state/msg/stdout -RETURN = ''' -''' +RETURN = r""" +""" from itertools import product diff --git a/plugins/modules/zfs_facts.py b/plugins/modules/zfs_facts.py index bb4530c473..115e8e3e7a 100644 --- a/plugins/modules/zfs_facts.py +++ b/plugins/modules/zfs_facts.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: zfs_facts short_description: Gather facts about ZFS datasets description: @@ -21,45 +20,42 @@ extends_documentation_fragment: - community.general.attributes.facts - community.general.attributes.facts_module options: - name: - description: - - ZFS dataset name. - required: true - aliases: [ "ds", "dataset" ] - type: str - recurse: - description: - - Specifies if properties for any children should be recursively - displayed. - type: bool - default: false - parsable: - description: - - Specifies if property values should be displayed in machine - friendly format. - type: bool - default: false - properties: - description: - - Specifies which dataset properties should be queried in comma-separated format. - For more information about dataset properties, check zfs(1M) man page. - default: all - type: str - type: - description: - - Specifies which datasets types to display. Multiple values have to be - provided in comma-separated form. - choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ] - default: all - type: str - depth: - description: - - Specifies recursion depth. - type: int - default: 0 -''' + name: + description: + - ZFS dataset name. + required: true + aliases: ["ds", "dataset"] + type: str + recurse: + description: + - Specifies if properties for any children should be recursively displayed. + type: bool + default: false + parsable: + description: + - Specifies if property values should be displayed in machine friendly format. + type: bool + default: false + properties: + description: + - Specifies which dataset properties should be queried in comma-separated format. For more information about dataset properties, check zfs(1M) + man page. + default: all + type: str + type: + description: + - Specifies which datasets types to display. Multiple values have to be provided in comma-separated form. + choices: ['all', 'filesystem', 'volume', 'snapshot', 'bookmark'] + default: all + type: str + depth: + description: + - Specifies recursion depth. + type: int + default: 0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather facts about ZFS dataset rpool/export/home community.general.zfs_facts: dataset: rpool/export/home @@ -73,88 +69,38 @@ EXAMPLES = ''' - ansible.builtin.debug: msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.' with_items: '{{ ansible_zfs_datasets }}' -''' +""" -RETURN = ''' +RETURN = r""" name: - description: ZFS dataset name - returned: always - type: str - sample: rpool/var/spool + description: ZFS dataset name. + returned: always + type: str + sample: rpool/var/spool parsable: - description: if parsable output should be provided in machine friendly format. - returned: if 'parsable' is set to True - type: bool - sample: true + description: If parsable output should be provided in machine friendly format. + returned: if O(parsable=True) + type: bool + sample: true recurse: - description: if we should recurse over ZFS dataset - returned: if 'recurse' is set to True - type: bool - sample: true + description: If we should recurse over ZFS dataset. + returned: if O(recurse=True) + type: bool + sample: true zfs_datasets: - description: ZFS dataset facts - returned: always - type: str - sample: - { - "aclinherit": "restricted", - "aclmode": "discard", - "atime": "on", - "available": "43.8G", - "canmount": "on", - "casesensitivity": "sensitive", - "checksum": "on", - "compression": "off", - "compressratio": "1.00x", - "copies": "1", - "creation": "Thu Jun 16 11:37 2016", - "dedup": "off", - "devices": "on", - "exec": "on", - "filesystem_count": "none", - "filesystem_limit": "none", - "logbias": "latency", - "logicalreferenced": "18.5K", - "logicalused": "3.45G", - "mlslabel": "none", - "mounted": "yes", - "mountpoint": "/rpool", - "name": "rpool", - "nbmand": "off", - "normalization": "none", - "org.openindiana.caiman:install": "ready", - "primarycache": "all", - "quota": "none", - "readonly": "off", - "recordsize": "128K", - "redundant_metadata": "all", - "refcompressratio": "1.00x", - "referenced": "29.5K", - "refquota": "none", - "refreservation": "none", - "reservation": "none", - "secondarycache": "all", - "setuid": "on", - "sharenfs": "off", - "sharesmb": "off", - "snapdir": "hidden", - "snapshot_count": "none", - "snapshot_limit": "none", - "sync": "standard", - "type": "filesystem", - "used": "4.41G", - "usedbychildren": "4.41G", - "usedbydataset": "29.5K", - "usedbyrefreservation": "0", - "usedbysnapshots": "0", - "utf8only": "off", - "version": "5", - "vscan": "off", - "written": "29.5K", - "xattr": "on", - "zoned": "off" - } -''' + description: ZFS dataset facts. + returned: always + type: str + sample: {"aclinherit": "restricted", "aclmode": "discard", "atime": "on", "available": "43.8G", "canmount": "on", "casesensitivity": "sensitive", + "checksum": "on", "compression": "off", "compressratio": "1.00x", "copies": "1", "creation": "Thu Jun 16 11:37 2016", "dedup": "off", "devices": "on", + "exec": "on", "filesystem_count": "none", "filesystem_limit": "none", "logbias": "latency", "logicalreferenced": "18.5K", "logicalused": "3.45G", + "mlslabel": "none", "mounted": "yes", "mountpoint": "/rpool", "name": "rpool", "nbmand": "off", "normalization": "none", "org.openindiana.caiman:install": + "ready", "primarycache": "all", "quota": "none", "readonly": "off", "recordsize": "128K", "redundant_metadata": "all", "refcompressratio": "1.00x", + "referenced": "29.5K", "refquota": "none", "refreservation": "none", "reservation": "none", "secondarycache": "all", "setuid": "on", "sharenfs": "off", + "sharesmb": "off", "snapdir": "hidden", "snapshot_count": "none", "snapshot_limit": "none", "sync": "standard", "type": "filesystem", "used": "4.41G", + "usedbychildren": "4.41G", "usedbydataset": "29.5K", "usedbyrefreservation": "0", "usedbysnapshots": "0", "utf8only": "off", "version": "5", + "vscan": "off", "written": "29.5K", "xattr": "on", "zoned": "off"} +""" from collections import defaultdict diff --git a/plugins/modules/znode.py b/plugins/modules/znode.py index e8f7f1dc76..ca59704d12 100644 --- a/plugins/modules/znode.py +++ b/plugins/modules/znode.py @@ -8,83 +8,81 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: znode short_description: Create, delete, retrieve, and update znodes using ZooKeeper description: - - Create, delete, retrieve, and update znodes using ZooKeeper. + - Create, delete, retrieve, and update znodes using ZooKeeper. attributes: - check_mode: - support: none - diff_mode: - support: none + check_mode: + support: none + diff_mode: + support: none extends_documentation_fragment: - - community.general.attributes + - community.general.attributes options: - hosts: - description: - - A list of ZooKeeper servers (format '[server]:[port]'). - required: true - type: str - name: - description: - - The path of the znode. - required: true - type: str - value: - description: - - The value assigned to the znode. - type: str - op: - description: - - An operation to perform. Mutually exclusive with state. - choices: [ get, wait, list ] - type: str - state: - description: - - The state to enforce. Mutually exclusive with op. - choices: [ present, absent ] - type: str - timeout: - description: - - The amount of time to wait for a node to appear. - default: 300 - type: int - recursive: - description: - - Recursively delete node and all its children. - type: bool - default: false - auth_scheme: - description: - - 'Authentication scheme.' - choices: [ digest, sasl ] - type: str - default: "digest" - required: false - version_added: 5.8.0 - auth_credential: - description: - - The authentication credential value. Depends on O(auth_scheme). - - The format for O(auth_scheme=digest) is C(user:password), - and the format for O(auth_scheme=sasl) is C(user:password). - type: str - required: false - version_added: 5.8.0 - use_tls: - description: - - Using TLS/SSL or not. - type: bool - default: false - required: false - version_added: '6.5.0' + hosts: + description: + - A list of ZooKeeper servers (format V([server]:[port])). + required: true + type: str + name: + description: + - The path of the znode. + required: true + type: str + value: + description: + - The value assigned to the znode. + type: str + op: + description: + - An operation to perform. Mutually exclusive with state. + choices: [get, wait, list] + type: str + state: + description: + - The state to enforce. Mutually exclusive with op. + choices: [present, absent] + type: str + timeout: + description: + - The amount of time to wait for a node to appear. + default: 300 + type: int + recursive: + description: + - Recursively delete node and all its children. + type: bool + default: false + auth_scheme: + description: + - 'Authentication scheme.' + choices: [digest, sasl] + type: str + default: "digest" + required: false + version_added: 5.8.0 + auth_credential: + description: + - The authentication credential value. Depends on O(auth_scheme). + - The format for O(auth_scheme=digest) is C(user:password), and the format for O(auth_scheme=sasl) is C(user:password). + type: str + required: false + version_added: 5.8.0 + use_tls: + description: + - Using TLS/SSL or not. + type: bool + default: false + required: false + version_added: '6.5.0' requirements: - - kazoo >= 2.1 + - kazoo >= 2.1 author: "Trey Perry (@treyperry)" -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Creating or updating a znode with a given value community.general.znode: hosts: 'localhost:2181' diff --git a/plugins/modules/zpool_facts.py b/plugins/modules/zpool_facts.py index 2477a920b0..e0b87b570c 100644 --- a/plugins/modules/zpool_facts.py +++ b/plugins/modules/zpool_facts.py @@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: zpool_facts short_description: Gather facts about ZFS pools description: @@ -21,29 +20,28 @@ extends_documentation_fragment: - community.general.attributes.facts - community.general.attributes.facts_module options: - name: - description: - - ZFS pool name. - type: str - aliases: [ "pool", "zpool" ] - required: false - parsable: - description: - - Specifies if property values should be displayed in machine - friendly format. - type: bool - default: false - required: false - properties: - description: - - Specifies which dataset properties should be queried in comma-separated format. - For more information about dataset properties, check zpool(1M) man page. - type: str - default: all - required: false -''' + name: + description: + - ZFS pool name. + type: str + aliases: ["pool", "zpool"] + required: false + parsable: + description: + - Specifies if property values should be displayed in machine friendly format. + type: bool + default: false + required: false + properties: + description: + - Specifies which dataset properties should be queried in comma-separated format. For more information about dataset properties, check zpool(1M) + man page. + type: str + default: all + required: false +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather facts about ZFS pool rpool community.general.zpool_facts: pool=rpool @@ -54,71 +52,37 @@ EXAMPLES = ''' ansible.builtin.debug: msg: 'ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.' with_items: '{{ ansible_zfs_pools }}' -''' +""" -RETURN = ''' +RETURN = r""" ansible_facts: - description: Dictionary containing all the detailed information about the ZFS pool facts - returned: always - type: complex - contains: - ansible_zfs_pools: - description: ZFS pool facts - returned: always - type: str - sample: - { - "allocated": "3.46G", - "altroot": "-", - "autoexpand": "off", - "autoreplace": "off", - "bootfs": "rpool/ROOT/openindiana", - "cachefile": "-", - "capacity": "6%", - "comment": "-", - "dedupditto": "0", - "dedupratio": "1.00x", - "delegation": "on", - "expandsize": "-", - "failmode": "wait", - "feature@async_destroy": "enabled", - "feature@bookmarks": "enabled", - "feature@edonr": "enabled", - "feature@embedded_data": "active", - "feature@empty_bpobj": "active", - "feature@enabled_txg": "active", - "feature@extensible_dataset": "enabled", - "feature@filesystem_limits": "enabled", - "feature@hole_birth": "active", - "feature@large_blocks": "enabled", - "feature@lz4_compress": "active", - "feature@multi_vdev_crash_dump": "enabled", - "feature@sha512": "enabled", - "feature@skein": "enabled", - "feature@spacemap_histogram": "active", - "fragmentation": "3%", - "free": "46.3G", - "freeing": "0", - "guid": "15729052870819522408", - "health": "ONLINE", - "leaked": "0", - "listsnapshots": "off", - "name": "rpool", - "readonly": "off", - "size": "49.8G", - "version": "-" - } + description: Dictionary containing all the detailed information about the ZFS pool facts. + returned: always + type: complex + contains: + ansible_zfs_pools: + description: ZFS pool facts. + returned: always + type: str + sample: {"allocated": "3.46G", "altroot": "-", "autoexpand": "off", "autoreplace": "off", "bootfs": "rpool/ROOT/openindiana", "cachefile": "-", + "capacity": "6%", "comment": "-", "dedupditto": "0", "dedupratio": "1.00x", "delegation": "on", "expandsize": "-", "failmode": "wait", + "feature@async_destroy": "enabled", "feature@bookmarks": "enabled", "feature@edonr": "enabled", "feature@embedded_data": "active", + "feature@empty_bpobj": "active", "feature@enabled_txg": "active", "feature@extensible_dataset": "enabled", "feature@filesystem_limits": "enabled", + "feature@hole_birth": "active", "feature@large_blocks": "enabled", "feature@lz4_compress": "active", "feature@multi_vdev_crash_dump": "enabled", + "feature@sha512": "enabled", "feature@skein": "enabled", "feature@spacemap_histogram": "active", "fragmentation": "3%", "free": "46.3G", + "freeing": "0", "guid": "15729052870819522408", "health": "ONLINE", "leaked": "0", "listsnapshots": "off", "name": "rpool", "readonly": "off", + "size": "49.8G", "version": "-"} name: - description: ZFS pool name - returned: always - type: str - sample: rpool + description: ZFS pool name. + returned: always + type: str + sample: rpool parsable: - description: if parsable output should be provided in machine friendly format. - returned: if 'parsable' is set to True - type: bool - sample: true -''' + description: If parsable output should be provided in machine friendly format. + returned: if O(parsable=true) + type: bool + sample: true +""" from collections import defaultdict diff --git a/plugins/plugin_utils/ansible_type.py b/plugins/plugin_utils/ansible_type.py new file mode 100644 index 0000000000..ab78b78927 --- /dev/null +++ b/plugins/plugin_utils/ansible_type.py @@ -0,0 +1,47 @@ +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common._collections_compat import Mapping + + +def _atype(data, alias): + """ + Returns the name of the type class. + """ + + data_type = type(data).__name__ + return alias.get(data_type, data_type) + + +def _ansible_type(data, alias): + """ + Returns the Ansible data type. + """ + + if alias is None: + alias = {} + + if not isinstance(alias, Mapping): + msg = "The argument alias must be a dictionary. %s is %s" + raise AnsibleFilterError(msg % (alias, type(alias))) + + data_type = _atype(data, alias) + + if data_type == 'list' and len(data) > 0: + items = [_atype(i, alias) for i in data] + items_type = '|'.join(sorted(set(items))) + return ''.join((data_type, '[', items_type, ']')) + + if data_type == 'dict' and len(data) > 0: + keys = [_atype(i, alias) for i in data.keys()] + vals = [_atype(i, alias) for i in data.values()] + keys_type = '|'.join(sorted(set(keys))) + vals_type = '|'.join(sorted(set(vals))) + return ''.join((data_type, '[', keys_type, ', ', vals_type, ']')) + + return data_type diff --git a/plugins/plugin_utils/keys_filter.py b/plugins/plugin_utils/keys_filter.py new file mode 100644 index 0000000000..94234a15db --- /dev/null +++ b/plugins/plugin_utils/keys_filter.py @@ -0,0 +1,141 @@ +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.six import string_types +from ansible.module_utils.common._collections_compat import Mapping, Sequence + + +def _keys_filter_params(data, matching_parameter): + """test parameters: + * data must be a list of dictionaries. All keys must be strings. + * matching_parameter is member of a list. + """ + + mp = matching_parameter + ml = ['equal', 'starts_with', 'ends_with', 'regex'] + + if not isinstance(data, Sequence): + msg = "First argument must be a list. %s is %s" + raise AnsibleFilterError(msg % (data, type(data))) + + for elem in data: + if not isinstance(elem, Mapping): + msg = "The data items must be dictionaries. %s is %s" + raise AnsibleFilterError(msg % (elem, type(elem))) + + for elem in data: + if not all(isinstance(item, string_types) for item in elem.keys()): + msg = "Top level keys must be strings. keys: %s" + raise AnsibleFilterError(msg % elem.keys()) + + if mp not in ml: + msg = "The matching_parameter must be one of %s. matching_parameter=%s" + raise AnsibleFilterError(msg % (ml, mp)) + + return + + +def _keys_filter_target_str(target, matching_parameter): + """ + Test: + * target is a non-empty string or list. + * If target is list all items are strings. + * target is a string or list with single string if matching_parameter=regex. + Convert target and return: + * tuple of unique target items, or + * tuple with single item, or + * compiled regex if matching_parameter=regex. + """ + + if not isinstance(target, Sequence): + msg = "The target must be a string or a list. target is %s." + raise AnsibleFilterError(msg % type(target)) + + if len(target) == 0: + msg = "The target can't be empty." + raise AnsibleFilterError(msg) + + if isinstance(target, list): + for elem in target: + if not isinstance(elem, string_types): + msg = "The target items must be strings. %s is %s" + raise AnsibleFilterError(msg % (elem, type(elem))) + + if matching_parameter == 'regex': + if isinstance(target, string_types): + r = target + else: + if len(target) > 1: + msg = "Single item is required in the target list if matching_parameter=regex." + raise AnsibleFilterError(msg) + else: + r = target[0] + try: + tt = re.compile(r) + except re.error: + msg = "The target must be a valid regex if matching_parameter=regex. target is %s" + raise AnsibleFilterError(msg % r) + elif isinstance(target, string_types): + tt = (target, ) + else: + tt = tuple(set(target)) + + return tt + + +def _keys_filter_target_dict(target, matching_parameter): + """ + Test: + * target is a list of dictionaries with attributes 'after' and 'before'. + * Attributes 'before' must be valid regex if matching_parameter=regex. + * Otherwise, the attributes 'before' must be strings. + Convert target and return: + * iterator that aggregates attributes 'before' and 'after', or + * iterator that aggregates compiled regex of attributes 'before' and 'after' if matching_parameter=regex. + """ + + if not isinstance(target, list): + msg = "The target must be a list. target is %s." + raise AnsibleFilterError(msg % (target, type(target))) + + if len(target) == 0: + msg = "The target can't be empty." + raise AnsibleFilterError(msg) + + for elem in target: + if not isinstance(elem, Mapping): + msg = "The target items must be dictionaries. %s is %s" + raise AnsibleFilterError(msg % (elem, type(elem))) + if not all(k in elem for k in ('before', 'after')): + msg = "All dictionaries in target must include attributes: after, before." + raise AnsibleFilterError(msg) + if not isinstance(elem['before'], string_types): + msg = "The attributes before must be strings. %s is %s" + raise AnsibleFilterError(msg % (elem['before'], type(elem['before']))) + if not isinstance(elem['after'], string_types): + msg = "The attributes after must be strings. %s is %s" + raise AnsibleFilterError(msg % (elem['after'], type(elem['after']))) + + before = [d['before'] for d in target] + after = [d['after'] for d in target] + + if matching_parameter == 'regex': + try: + tr = map(re.compile, before) + tz = list(zip(tr, after)) + except re.error: + msg = ("The attributes before must be valid regex if matching_parameter=regex." + " Not all items are valid regex in: %s") + raise AnsibleFilterError(msg % before) + else: + tz = list(zip(before, after)) + + return tz diff --git a/plugins/plugin_utils/unsafe.py b/plugins/plugin_utils/unsafe.py index 1eb61bea0f..4fdb8b3d51 100644 --- a/plugins/plugin_utils/unsafe.py +++ b/plugins/plugin_utils/unsafe.py @@ -24,7 +24,7 @@ def make_unsafe(value): return value if isinstance(value, Mapping): - return dict((make_unsafe(key), make_unsafe(val)) for key, val in value.items()) + return {make_unsafe(key): make_unsafe(val) for key, val in value.items()} elif isinstance(value, Set): return set(make_unsafe(elt) for elt in value) elif is_sequence(value): diff --git a/plugins/test/ansible_type.py b/plugins/test/ansible_type.py new file mode 100644 index 0000000000..9ac5e138eb --- /dev/null +++ b/plugins/test/ansible_type.py @@ -0,0 +1,203 @@ +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: ansible_type + short_description: Validate input type + version_added: "9.2.0" + author: Vladimir Botka (@vbotka) + description: This test validates input type. + options: + _input: + description: Input data. + type: raw + required: true + dtype: + description: A single data type, or a data types list to be validated. + type: raw + required: true + alias: + description: Data type aliases. + default: {} + type: dictionary +''' + +EXAMPLES = ''' + +# Substitution converts str to AnsibleUnicode +# ------------------------------------------- + +# String. AnsibleUnicode. +dtype: AnsibleUnicode +data: "abc" +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# String. AnsibleUnicode alias str. +alias: {"AnsibleUnicode": "str"} +dtype: str +data: "abc" +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# List. All items are AnsibleUnicode. +dtype: list[AnsibleUnicode] +data: ["a", "b", "c"] +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. +dtype: dict[AnsibleUnicode, AnsibleUnicode] +data: {"a": "foo", "b": "bar", "c": "baz"} +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# No substitution and no alias. Type of strings is str +# ---------------------------------------------------- + +# String +dtype: str +result: '{{ "abc" is community.general.ansible_type(dtype) }}' +# result => true + +# Integer +dtype: int +result: '{{ 123 is community.general.ansible_type(dtype) }}' +# result => true + +# Float +dtype: float +result: '{{ 123.45 is community.general.ansible_type(dtype) }}' +# result => true + +# Boolean +dtype: bool +result: '{{ true is community.general.ansible_type(dtype) }}' +# result => true + +# List. All items are strings. +dtype: list[str] +result: '{{ ["a", "b", "c"] is community.general.ansible_type(dtype) }}' +# result => true + +# List of dictionaries. +dtype: list[dict] +result: '{{ [{"a": 1}, {"b": 2}] is community.general.ansible_type(dtype) }}' +# result => true + +# Dictionary. All keys are strings. All values are integers. +dtype: dict[str, int] +result: '{{ {"a": 1} is community.general.ansible_type(dtype) }}' +# result => true + +# Dictionary. All keys are strings. All values are integers. +dtype: dict[str, int] +result: '{{ {"a": 1, "b": 2} is community.general.ansible_type(dtype) }}' +# result => true + +# Type of strings is AnsibleUnicode or str +# ---------------------------------------- + +# Dictionary. The keys are integers or strings. All values are strings. +alias: {"AnsibleUnicode": "str"} +dtype: dict[int|str, str] +data: {1: 'a', 'b': 'b'} +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# Dictionary. All keys are integers. All values are keys. +alias: {"AnsibleUnicode": "str"} +dtype: dict[int, str] +data: {1: 'a', 2: 'b'} +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# Dictionary. All keys are strings. Multiple types values. +alias: {"AnsibleUnicode": "str"} +dtype: dict[str, bool|dict|float|int|list|str] +data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# List. Multiple types items. +alias: {"AnsibleUnicode": "str"} +dtype: list[bool|dict|float|int|list|str] +data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# Option dtype is list +# -------------------- + +# AnsibleUnicode or str +dtype: ['AnsibleUnicode', 'str'] +data: abc +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# float or int +dtype: ['float', 'int'] +data: 123 +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# float or int +dtype: ['float', 'int'] +data: 123.45 +result: '{{ data is community.general.ansible_type(dtype) }}' +# result => true + +# Multiple alias +# -------------- + +# int alias number +alias: {"int": "number", "float": "number"} +dtype: number +data: 123 +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true + +# float alias number +alias: {"int": "number", "float": "number"} +dtype: number +data: 123.45 +result: '{{ data is community.general.ansible_type(dtype, alias) }}' +# result => true +''' + +RETURN = ''' + _value: + description: Whether the data type is valid. + type: bool +''' + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common._collections_compat import Sequence +from ansible_collections.community.general.plugins.plugin_utils.ansible_type import _ansible_type + + +def ansible_type(data, dtype, alias=None): + """Validates data type""" + + if not isinstance(dtype, Sequence): + msg = "The argument dtype must be a string or a list. dtype is %s." + raise AnsibleFilterError(msg % (dtype, type(dtype))) + + if isinstance(dtype, str): + data_types = [dtype] + else: + data_types = dtype + + return _ansible_type(data, alias) in data_types + + +class TestModule(object): + + def tests(self): + return { + 'ansible_type': ansible_type + } diff --git a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml index 1ecd9980d4..5c4af6d167 100644 --- a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml +++ b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml @@ -4,10 +4,16 @@ # SPDX-License-Identifier: GPL-3.0-or-later ################################################### +- name: Make directory install_c + ansible.builtin.file: + path: "{{ remote_tmp_dir }}/install_c" + state: directory + - name: Install collection netbox.netbox community.general.ansible_galaxy_install: type: collection name: netbox.netbox + dest: "{{ remote_tmp_dir }}/install_c" register: install_c0 - name: Assert collection netbox.netbox was installed @@ -20,6 +26,7 @@ community.general.ansible_galaxy_install: type: collection name: netbox.netbox + dest: "{{ remote_tmp_dir }}/install_c" register: install_c1 - name: Assert collection was not installed @@ -28,10 +35,16 @@ - install_c1 is not changed ################################################### +- name: Make directory install_r + ansible.builtin.file: + path: "{{ remote_tmp_dir }}/install_r" + state: directory + - name: Install role ansistrano.deploy community.general.ansible_galaxy_install: type: role name: ansistrano.deploy + dest: "{{ remote_tmp_dir }}/install_r" register: install_r0 - name: Assert collection ansistrano.deploy was installed @@ -44,6 +57,7 @@ community.general.ansible_galaxy_install: type: role name: ansistrano.deploy + dest: "{{ remote_tmp_dir }}/install_r" register: install_r1 - name: Assert role was not installed @@ -86,3 +100,44 @@ assert: that: - install_rq1 is not changed + +################################################### +- name: Make directory upgrade_c + ansible.builtin.file: + path: "{{ remote_tmp_dir }}/upgrade_c" + state: directory + +- name: Install collection netbox.netbox 3.17.0 + community.general.ansible_galaxy_install: + type: collection + name: netbox.netbox:3.17.0 + dest: "{{ remote_tmp_dir }}/upgrade_c" + register: upgrade_c0 + +- name: Assert collection netbox.netbox was installed + assert: + that: + - upgrade_c0 is changed + - '"netbox.netbox" in upgrade_c0.new_collections' + +- name: Upgrade collection netbox.netbox + community.general.ansible_galaxy_install: + state: latest + type: collection + name: netbox.netbox + dest: "{{ remote_tmp_dir }}/upgrade_c" + register: upgrade_c1 + +- name: Upgrade collection netbox.netbox (again) + community.general.ansible_galaxy_install: + state: latest + type: collection + name: netbox.netbox + dest: "{{ remote_tmp_dir }}/upgrade_c" + register: upgrade_c2 + +- name: Assert collection was not installed + assert: + that: + - upgrade_c1 is changed + - upgrade_c2 is not changed diff --git a/tests/integration/targets/cargo/tasks/main.yml b/tests/integration/targets/cargo/tasks/main.yml index 29f27c3fda..89f13960a6 100644 --- a/tests/integration/targets/cargo/tasks/main.yml +++ b/tests/integration/targets/cargo/tasks/main.yml @@ -16,6 +16,7 @@ - block: - import_tasks: test_general.yml - import_tasks: test_version.yml + - import_tasks: test_directory.yml environment: "{{ cargo_environment }}" when: has_cargo | default(false) - import_tasks: test_rustup_cargo.yml diff --git a/tests/integration/targets/cargo/tasks/test_directory.yml b/tests/integration/targets/cargo/tasks/test_directory.yml new file mode 100644 index 0000000000..f4275ede68 --- /dev/null +++ b/tests/integration/targets/cargo/tasks/test_directory.yml @@ -0,0 +1,122 @@ +--- +# Copyright (c) 2024 Colin Nolan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Create temp directory + tempfile: + state: directory + register: temp_directory + +- name: Test block + vars: + manifest_path: "{{ temp_directory.path }}/Cargo.toml" + package_name: hello-world-directory-test + block: + - name: Initialize package + ansible.builtin.command: + cmd: "cargo init --name {{ package_name }}" + args: + chdir: "{{ temp_directory.path }}" + + - name: Set package version (1.0.0) + ansible.builtin.lineinfile: + path: "{{ manifest_path }}" + regexp: '^version = ".*"$' + line: 'version = "1.0.0"' + + - name: Ensure package is uninstalled + community.general.cargo: + name: "{{ package_name }}" + state: absent + directory: "{{ temp_directory.path }}" + register: uninstall_absent + + - name: Install package + community.general.cargo: + name: "{{ package_name }}" + directory: "{{ temp_directory.path }}" + register: install_absent + + - name: Change package version (1.0.1) + ansible.builtin.lineinfile: + path: "{{ manifest_path }}" + regexp: '^version = ".*"$' + line: 'version = "1.0.1"' + + - name: Install package again (present) + community.general.cargo: + name: "{{ package_name }}" + state: present + directory: "{{ temp_directory.path }}" + register: install_present_state + + - name: Install package again (latest) + community.general.cargo: + name: "{{ package_name }}" + state: latest + directory: "{{ temp_directory.path }}" + register: install_latest_state + + - name: Change package version (2.0.0) + ansible.builtin.lineinfile: + path: "{{ manifest_path }}" + regexp: '^version = ".*"$' + line: 'version = "2.0.0"' + + - name: Install package with given version (matched) + community.general.cargo: + name: "{{ package_name }}" + version: "2.0.0" + directory: "{{ temp_directory.path }}" + register: install_given_version_matched + + - name: Install package with given version (unmatched) + community.general.cargo: + name: "{{ package_name }}" + version: "2.0.1" + directory: "{{ temp_directory.path }}" + register: install_given_version_unmatched + ignore_errors: true + + - name: Uninstall package + community.general.cargo: + name: "{{ package_name }}" + state: absent + directory: "{{ temp_directory.path }}" + register: uninstall_present + + - name: Install non-existant package + community.general.cargo: + name: "{{ package_name }}-non-existant" + state: present + directory: "{{ temp_directory.path }}" + register: install_non_existant + ignore_errors: true + + - name: Install non-existant source directory + community.general.cargo: + name: "{{ package_name }}" + state: present + directory: "{{ temp_directory.path }}/non-existant" + register: install_non_existant_source + ignore_errors: true + + always: + - name: Remove temp directory + file: + path: "{{ temp_directory.path }}" + state: absent + +- name: Check assertions + assert: + that: + - uninstall_absent is not changed + - install_absent is changed + - install_present_state is not changed + - install_latest_state is changed + - install_given_version_matched is changed + - install_given_version_unmatched is failed + - uninstall_present is changed + - install_non_existant is failed + - install_non_existant_source is failed diff --git a/tests/integration/targets/consul/tasks/consul_agent_check.yml b/tests/integration/targets/consul/tasks/consul_agent_check.yml new file mode 100644 index 0000000000..e1229c794f --- /dev/null +++ b/tests/integration/targets/consul/tasks/consul_agent_check.yml @@ -0,0 +1,114 @@ +--- +# Copyright (c) 2024, Michael Ilg (@Ilgmi) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Create a service + community.general.consul_agent_service: + name: nginx + service_port: 80 + address: localhost + tags: + - http + meta: + nginx_version: 1.25.3 + register: result + +- set_fact: + nginx_service: "{{result.service}}" + +- assert: + that: + - result is changed + - result.service.ID is defined + +- name: Add a check for service + community.general.consul_agent_check: + name: nginx_check + id: nginx_check + interval: 30s + http: http://localhost:80/morestatus + notes: "Nginx Check" + service_id: "{{ nginx_service.ID }}" + register: result + +- assert: + that: + - result is changed + - result.check is defined + - result.check.CheckID == 'nginx_check' + - result.check.ServiceID == 'nginx' + - result.check.Interval == '30s' + - result.check.Type == 'http' + - result.check.Notes == 'Nginx Check' + +- set_fact: + nginx_service_check: "{{ result.check }}" + +- name: Update check for service + community.general.consul_agent_check: + name: "{{ nginx_service_check.Name }}" + id: "{{ nginx_service_check.CheckID }}" + interval: 60s + http: http://localhost:80/morestatus + notes: "New Nginx Check" + service_id: "{{ nginx_service.ID }}" + register: result + +- assert: + that: + - result is changed + - result.check is defined + - result.check.CheckID == 'nginx_check' + - result.check.ServiceID == 'nginx' + - result.check.Interval == '1m0s' + - result.check.Type == 'http' + - result.check.Notes == 'New Nginx Check' + +- name: Remove check + community.general.consul_agent_check: + id: "{{ nginx_service_check.Name }}" + state: absent + service_id: "{{ nginx_service.ID }}" + register: result + +- assert: + that: + - result is changed + - result is not failed + - result.operation == 'remove' + +- name: Add a check + community.general.consul_agent_check: + name: check + id: check + interval: 30s + tcp: localhost:80 + notes: "check" + register: result + +- assert: + that: + - result is changed + - result.check is defined + +- name: Update a check + community.general.consul_agent_check: + name: check + id: check + interval: 60s + tcp: localhost:80 + notes: "check" + register: result + +- assert: + that: + - result is changed + - result.check is defined + - result.check.Interval == '1m0s' + +- name: Remove check + community.general.consul_agent_check: + id: check + state: absent + register: result \ No newline at end of file diff --git a/tests/integration/targets/consul/tasks/consul_agent_service.yml b/tests/integration/targets/consul/tasks/consul_agent_service.yml new file mode 100644 index 0000000000..95270f74b3 --- /dev/null +++ b/tests/integration/targets/consul/tasks/consul_agent_service.yml @@ -0,0 +1,89 @@ +--- +# Copyright (c) 2024, Michael Ilg (@Ilgmi) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Create a service + community.general.consul_agent_service: + name: nginx + service_port: 80 + address: localhost + tags: + - http + meta: + nginx_version: 1.25.3 + register: result + +- set_fact: + nginx_service: "{{result.service}}" + +- assert: + that: + - result is changed + - result.service.ID is defined + - result.service.Service == 'nginx' + - result.service.Address == 'localhost' + - result.service.Port == 80 + - result.service.Tags[0] == 'http' + - result.service.Meta.nginx_version is defined + - result.service.Meta.nginx_version == '1.25.3' + - result.service.ContentHash is defined + +- name: Update service + community.general.consul_agent_service: + id: "{{ nginx_service.ID }}" + name: "{{ nginx_service.Service }}" + service_port: 8080 + address: 127.0.0.1 + tags: + - http + - new_tag + meta: + nginx_version: 1.0.0 + nginx: 1.25.3 + register: result +- assert: + that: + - result is changed + - result.service.ID is defined + - result.service.Service == 'nginx' + - result.service.Address == '127.0.0.1' + - result.service.Port == 8080 + - result.service.Tags[0] == 'http' + - result.service.Tags[1] == 'new_tag' + - result.service.Meta.nginx_version is defined + - result.service.Meta.nginx_version == '1.0.0' + - result.service.Meta.nginx is defined + - result.service.Meta.nginx == '1.25.3' + - result.service.ContentHash is defined + +- name: Update service not changed when updating again without changes + community.general.consul_agent_service: + id: "{{ nginx_service.ID }}" + name: "{{ nginx_service.Service }}" + service_port: 8080 + address: 127.0.0.1 + tags: + - http + - new_tag + meta: + nginx_version: 1.0.0 + nginx: 1.25.3 + register: result + +- assert: + that: + - result is not changed + - result.operation is not defined + +- name: Remove service + community.general.consul_agent_service: + id: "{{ nginx_service.ID }}" + state: absent + register: result + +- assert: + that: + - result is changed + - result is not failed + - result.operation == 'remove' \ No newline at end of file diff --git a/tests/integration/targets/consul/tasks/main.yml b/tests/integration/targets/consul/tasks/main.yml index 6fef2b9980..0ac58fc40e 100644 --- a/tests/integration/targets/consul/tasks/main.yml +++ b/tests/integration/targets/consul/tasks/main.yml @@ -97,6 +97,8 @@ - import_tasks: consul_token.yml - import_tasks: consul_auth_method.yml - import_tasks: consul_binding_rule.yml + - import_tasks: consul_agent_service.yml + - import_tasks: consul_agent_check.yml module_defaults: group/community.general.consul: token: "{{ consul_management_token }}" diff --git a/tests/integration/targets/django_command/aliases b/tests/integration/targets/django_command/aliases new file mode 100644 index 0000000000..aa06ccd7b4 --- /dev/null +++ b/tests/integration/targets/django_command/aliases @@ -0,0 +1,22 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 +skip/python2 +skip/freebsd +skip/macos +skip/osx +skip/rhel8.2 +skip/rhel8.3 +skip/rhel8.4 +skip/rhel8.5 +skip/rhel8.6 +skip/rhel8.7 +skip/rhel8.8 +skip/rhel9.0 +skip/rhel9.1 +skip/rhel9.2 +skip/rhel9.3 +skip/rhel9.4 +skip/rhel9.5 diff --git a/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/core/settings.py b/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/core/settings.py new file mode 100644 index 0000000000..881221c066 --- /dev/null +++ b/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/core/settings.py @@ -0,0 +1,6 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# single_app_project/core/settings.py +SECRET_KEY = 'testtesttesttesttest' diff --git a/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/manage.py b/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/manage.py new file mode 100755 index 0000000000..4b4eddcb67 --- /dev/null +++ b/tests/integration/targets/django_command/files/base_test/1045-single-app-project/single_app_project/manage.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +# single_app_project/manage.py +import os +import sys + + +def main(): + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'single_app_project.core.settings') + from django.core.management import execute_from_command_line + execute_from_command_line(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/django_command/files/base_test/simple_project/p1/manage.py b/tests/integration/targets/django_command/files/base_test/simple_project/p1/manage.py new file mode 100755 index 0000000000..be3140f44d --- /dev/null +++ b/tests/integration/targets/django_command/files/base_test/simple_project/p1/manage.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +"""Django's command-line utility for administrative tasks.""" +import os +import sys + + +def main(): + """Run administrative tasks.""" + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'p1.settings') + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/settings.py b/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/settings.py new file mode 100644 index 0000000000..86b3ae64c6 --- /dev/null +++ b/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/settings.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +""" +Django settings for p1 project. + +Generated by 'django-admin startproj' using Django 3.1.5. + +For more information on this file, see +https://docs.djangoproject.com/en/3.1/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/3.1/ref/settings/ +""" + +import os +from pathlib import Path + +# Build paths inside the project like this: BASE_DIR / 'subdir'. +BASE_DIR = Path(__file__).resolve().parent.parent + + +# Quick-start development settings - unsuitable for production +# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ + +# SECURITY WARNING: keep the secret key used in production secret! +SECRET_KEY = '%g@gyhl*q@@g(_ab@t^76dao^#b9-v8mw^50)x_bv6wpl+mukj' + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True + +ALLOWED_HOSTS = [] + + +# Application definition + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', +] + +ROOT_URLCONF = 'p1.urls' + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.debug', + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +WSGI_APPLICATION = 'p1.wsgi.application' + + +# Database +# https://docs.djangoproject.com/en/3.1/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': BASE_DIR / 'db.sqlite3', + } +} + + +# Password validation +# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators + +AUTH_PASSWORD_VALIDATORS = [ + { + 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + }, +] + + +# Internationalization +# https://docs.djangoproject.com/en/3.1/topics/i18n/ + +LANGUAGE_CODE = 'en-us' + +TIME_ZONE = 'UTC' + +USE_I18N = True + +USE_L10N = True + +USE_TZ = True + + +# Static files (CSS, JavaScript, Images) +# https://docs.djangoproject.com/en/3.1/howto/static-files/ + +STATIC_URL = '/static/' +STATIC_ROOT = '/tmp/django-static' + +if "DJANGO_ANSIBLE_RAISE" in os.environ: + raise ValueError("DJANGO_ANSIBLE_RAISE={0}".format(os.environ["DJANGO_ANSIBLE_RAISE"])) diff --git a/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/urls.py b/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/urls.py new file mode 100644 index 0000000000..36cb592756 --- /dev/null +++ b/tests/integration/targets/django_command/files/base_test/simple_project/p1/p1/urls.py @@ -0,0 +1,28 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +"""p1 URL Configuration + +The `urlpatterns` list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en/2.2/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +""" +from django.contrib import admin +from django.urls import path + +urlpatterns = [ + path('admin/', admin.site.urls), +] diff --git a/tests/integration/targets/django_command/files/base_test/startproj/.keep b/tests/integration/targets/django_command/files/base_test/startproj/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/targets/django_command/meta/main.yml b/tests/integration/targets/django_command/meta/main.yml new file mode 100644 index 0000000000..4a216308a2 --- /dev/null +++ b/tests/integration/targets/django_command/meta/main.yml @@ -0,0 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +dependencies: + - setup_pkg_mgr + - setup_os_pkg_name diff --git a/tests/integration/targets/django_command/tasks/main.yaml b/tests/integration/targets/django_command/tasks/main.yaml new file mode 100644 index 0000000000..9d052dc44f --- /dev/null +++ b/tests/integration/targets/django_command/tasks/main.yaml @@ -0,0 +1,91 @@ +# Test code for django_command module +# +# Copyright (c) 2020, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +- name: Create temporary test directory + tempfile: + state: directory + suffix: .django_command + register: tmp_django_root + +- name: Install OS package virtualenv + package: + name: "{{ os_package_name.virtualenv }}" + state: present + +- name: Ensure virtualenv is created + command: >- + virtualenv {{ tmp_django_root.path }}/venv + +- name: Update python package pip + pip: + name: pip + state: latest + virtualenv: "{{ tmp_django_root.path }}/venv" + +- name: Install python package django + pip: + name: django + state: present + virtualenv: "{{ tmp_django_root.path }}/venv" + +- name: Copy files + copy: + src: base_test/ + dest: "{{ tmp_django_root.path }}" + mode: preserve + +- name: Create project + command: + chdir: "{{ tmp_django_root.path }}/startproj" + cmd: "{{ tmp_django_root.path }}/venv/bin/django-admin startproject test_django_command_1" + +- name: Create app + command: + chdir: "{{ tmp_django_root.path }}/startproj" + cmd: "{{ tmp_django_root.path }}/venv/bin/django-admin startapp app1" + +- name: Check + community.general.django_command: + pythonpath: "{{ tmp_django_root.path }}/startproj/test_django_command_1" + settings: test_django_command_1.settings + command: check + venv: "{{ tmp_django_root.path }}/venv" + +- name: Check simple_project + community.general.django_command: + pythonpath: "{{ tmp_django_root.path }}/simple_project/p1" + settings: p1.settings + command: check + venv: "{{ tmp_django_root.path }}/venv" + +- name: Check custom project + community.general.django_command: + pythonpath: "{{ tmp_django_root.path }}/1045-single-app-project/single_app_project" + settings: core.settings + command: check + venv: "{{ tmp_django_root.path }}/venv" + +- name: Run collectstatic --noinput on simple project + community.general.django_command: + pythonpath: "{{ tmp_django_root.path }}/simple_project/p1" + settings: p1.settings + command: collectstatic --noinput + venv: "{{ tmp_django_root.path }}/venv" + +- name: Trigger exception with environment variable + community.general.django_command: + pythonpath: "{{ tmp_django_root.path }}/simple_project/p1" + settings: p1.settings + command: collectstatic --noinput + venv: "{{ tmp_django_root.path }}/venv" + environment: + DJANGO_ANSIBLE_RAISE: blah + ignore_errors: true + register: env_raise + +- name: Check env variable reached manage.py + ansible.builtin.assert: + that: + - "'ValueError: DJANGO_ANSIBLE_RAISE=blah' in env_raise.msg" diff --git a/tests/integration/targets/django_manage/aliases b/tests/integration/targets/django_manage/aliases index 9790549169..aa06ccd7b4 100644 --- a/tests/integration/targets/django_manage/aliases +++ b/tests/integration/targets/django_manage/aliases @@ -18,3 +18,5 @@ skip/rhel9.0 skip/rhel9.1 skip/rhel9.2 skip/rhel9.3 +skip/rhel9.4 +skip/rhel9.5 diff --git a/tests/integration/targets/django_manage/meta/main.yml b/tests/integration/targets/django_manage/meta/main.yml index 2fcd152f95..4a216308a2 100644 --- a/tests/integration/targets/django_manage/meta/main.yml +++ b/tests/integration/targets/django_manage/meta/main.yml @@ -5,3 +5,4 @@ dependencies: - setup_pkg_mgr + - setup_os_pkg_name diff --git a/tests/integration/targets/django_manage/tasks/main.yaml b/tests/integration/targets/django_manage/tasks/main.yaml index 5307fb6642..9c2d4789e3 100644 --- a/tests/integration/targets/django_manage/tasks/main.yaml +++ b/tests/integration/targets/django_manage/tasks/main.yaml @@ -9,17 +9,10 @@ suffix: .django_manage register: tmp_django_root -- name: Install virtualenv on CentOS 8 +- name: Install virtualenv package: - name: virtualenv + name: "{{ os_package_name.virtualenv }}" state: present - when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '8' - -- name: Install virtualenv on Arch Linux - pip: - name: virtualenv - state: present - when: ansible_os_family == 'Archlinux' - name: Install required library pip: diff --git a/tests/integration/targets/dnf_versionlock/tasks/main.yml b/tests/integration/targets/dnf_versionlock/tasks/main.yml index 51e823ffd7..f37bd8dd48 100644 --- a/tests/integration/targets/dnf_versionlock/tasks/main.yml +++ b/tests/integration/targets/dnf_versionlock/tasks/main.yml @@ -7,6 +7,7 @@ - include_tasks: install.yml - include_tasks: lock_bash.yml - include_tasks: lock_updates.yml - when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or + when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=') and ansible_distribution_major_version is version('41', '<')) or (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>=')) +# TODO: Fix on Fedora 41, apparently the output changed! ... diff --git a/tests/integration/targets/ejabberd_user/tasks/main.yml b/tests/integration/targets/ejabberd_user/tasks/main.yml index 349b3f952f..d7f1670d06 100644 --- a/tests/integration/targets/ejabberd_user/tasks/main.yml +++ b/tests/integration/targets/ejabberd_user/tasks/main.yml @@ -11,8 +11,10 @@ - name: Bail out if not supported ansible.builtin.meta: end_play # TODO: remove Archlinux from the list - when: ansible_distribution in ('Alpine', 'openSUSE Leap', 'CentOS', 'Fedora', 'Archlinux') - + # TODO: remove Ubuntu 24.04 (noble) from the list + when: > + ansible_distribution in ('Alpine', 'openSUSE Leap', 'CentOS', 'Fedora', 'Archlinux') + or (ansible_distribution == 'Ubuntu' and ansible_distribution_release in ['noble']) - name: Remove ejabberd ansible.builtin.package: diff --git a/tests/integration/targets/filter_jc/aliases b/tests/integration/targets/filter_jc/aliases index 4e11515666..62fbc2daba 100644 --- a/tests/integration/targets/filter_jc/aliases +++ b/tests/integration/targets/filter_jc/aliases @@ -6,3 +6,5 @@ azp/posix/2 skip/python2.7 # jc only supports python3.x skip/freebsd13.3 # FIXME - ruyaml compilation fails skip/freebsd14.0 # FIXME - ruyaml compilation fails +skip/freebsd14.1 # FIXME - ruyaml compilation fails +skip/freebsd14.2 # FIXME - ruyaml compilation fails diff --git a/tests/integration/targets/filter_keep_keys/aliases b/tests/integration/targets/filter_keep_keys/aliases new file mode 100644 index 0000000000..12d1d6617e --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 diff --git a/tests/integration/targets/filter_keep_keys/tasks/main.yml b/tests/integration/targets/filter_keep_keys/tasks/main.yml new file mode 100644 index 0000000000..9c0674780e --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Tests + import_tasks: tests.yml diff --git a/tests/integration/targets/filter_keep_keys/tasks/tests.yml b/tests/integration/targets/filter_keep_keys/tasks/tests.yml new file mode 100644 index 0000000000..fa821702f0 --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/tasks/tests.yml @@ -0,0 +1,31 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Debug ansible_version + ansible.builtin.debug: + var: ansible_version + when: not quite_test | d(true) | bool + tags: ansible_version + +- name: Tests + ansible.builtin.assert: + that: + - (result | difference(i.0.result) | length) == 0 + success_msg: | + [OK] result: + {{ result | to_yaml }} + fail_msg: | + [ERR] result: + {{ result | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + loop: "{{ tests | subelements('group') }}" + loop_control: + loop_var: i + label: "{{ i.1.mp | d('default') }}: {{ i.1.tt }}" + vars: + input: "{{ i.0.input }}" + target: "{{ i.1.tt }}" + mp: "{{ i.1.mp | d('default') }}" + result: "{{ lookup('template', i.0.template) }}" diff --git a/tests/integration/targets/filter_keep_keys/templates/default.j2 b/tests/integration/targets/filter_keep_keys/templates/default.j2 new file mode 100644 index 0000000000..cb1232f9ee --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/templates/default.j2 @@ -0,0 +1 @@ +{{ input | community.general.keep_keys(target=target) }} diff --git a/tests/integration/targets/filter_keep_keys/templates/default.j2.license b/tests/integration/targets/filter_keep_keys/templates/default.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/templates/default.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_keep_keys/templates/mp.j2 b/tests/integration/targets/filter_keep_keys/templates/mp.j2 new file mode 100644 index 0000000000..753698d420 --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/templates/mp.j2 @@ -0,0 +1 @@ +{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }} diff --git a/tests/integration/targets/filter_keep_keys/templates/mp.j2.license b/tests/integration/targets/filter_keep_keys/templates/mp.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/templates/mp.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_keep_keys/vars/main/tests.yml b/tests/integration/targets/filter_keep_keys/vars/main/tests.yml new file mode 100644 index 0000000000..f1abceddda --- /dev/null +++ b/tests/integration/targets/filter_keep_keys/vars/main/tests.yml @@ -0,0 +1,40 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +tests: + - template: default.j2 + group: + - {tt: [k0_x0, k1_x1], d: 'By default, match keys that equal any of the items in the target.'} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + - template: mp.j2 + group: + - {mp: equal, tt: [k0_x0, k1_x1], d: Match keys that equal any of the items in the target.} + - {mp: starts_with, tt: [k0, k1], d: Match keys that start with any of the items in the target.} + - {mp: ends_with, tt: [x0, x1], d: Match keys that end with any of the items in target.} + - {mp: regex, tt: ['^.*[01]_x.*$'], d: Match keys by the regex.} + - {mp: regex, tt: '^.*[01]_x.*$', d: Match keys by the regex.} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + - template: mp.j2 + group: + - {mp: equal, tt: k0_x0, d: Match keys that equal the target.} + - {mp: starts_with, tt: k0, d: Match keys that start with the target.} + - {mp: ends_with, tt: x0, d: Match keys that end with the target.} + - {mp: regex, tt: '^.*0_x.*$', d: Match keys by the regex.} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k0_x0: A0} + - {k0_x0: A1} diff --git a/tests/integration/targets/filter_remove_keys/aliases b/tests/integration/targets/filter_remove_keys/aliases new file mode 100644 index 0000000000..12d1d6617e --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 diff --git a/tests/integration/targets/filter_remove_keys/tasks/main.yml b/tests/integration/targets/filter_remove_keys/tasks/main.yml new file mode 100644 index 0000000000..9c0674780e --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Tests + import_tasks: tests.yml diff --git a/tests/integration/targets/filter_remove_keys/tasks/tests.yml b/tests/integration/targets/filter_remove_keys/tasks/tests.yml new file mode 100644 index 0000000000..fa821702f0 --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/tasks/tests.yml @@ -0,0 +1,31 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Debug ansible_version + ansible.builtin.debug: + var: ansible_version + when: not quite_test | d(true) | bool + tags: ansible_version + +- name: Tests + ansible.builtin.assert: + that: + - (result | difference(i.0.result) | length) == 0 + success_msg: | + [OK] result: + {{ result | to_yaml }} + fail_msg: | + [ERR] result: + {{ result | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + loop: "{{ tests | subelements('group') }}" + loop_control: + loop_var: i + label: "{{ i.1.mp | d('default') }}: {{ i.1.tt }}" + vars: + input: "{{ i.0.input }}" + target: "{{ i.1.tt }}" + mp: "{{ i.1.mp | d('default') }}" + result: "{{ lookup('template', i.0.template) }}" diff --git a/tests/integration/targets/filter_remove_keys/templates/default.j2 b/tests/integration/targets/filter_remove_keys/templates/default.j2 new file mode 100644 index 0000000000..0dbc26323f --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/templates/default.j2 @@ -0,0 +1 @@ +{{ input | community.general.remove_keys(target=target) }} diff --git a/tests/integration/targets/filter_remove_keys/templates/default.j2.license b/tests/integration/targets/filter_remove_keys/templates/default.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/templates/default.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_remove_keys/templates/mp.j2 b/tests/integration/targets/filter_remove_keys/templates/mp.j2 new file mode 100644 index 0000000000..5caa27a9b8 --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/templates/mp.j2 @@ -0,0 +1 @@ +{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }} diff --git a/tests/integration/targets/filter_remove_keys/templates/mp.j2.license b/tests/integration/targets/filter_remove_keys/templates/mp.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/templates/mp.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_remove_keys/vars/main/tests.yml b/tests/integration/targets/filter_remove_keys/vars/main/tests.yml new file mode 100644 index 0000000000..a4767ea799 --- /dev/null +++ b/tests/integration/targets/filter_remove_keys/vars/main/tests.yml @@ -0,0 +1,40 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +tests: + - template: default.j2 + group: + - {tt: [k0_x0, k1_x1], d: 'By default, match keys that equal any of the items in the target.'} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k2_x2: [C0], k3_x3: foo} + - {k2_x2: [C1], k3_x3: bar} + - template: mp.j2 + group: + - {mp: equal, tt: [k0_x0, k1_x1], d: Match keys that equal any of the items in the target.} + - {mp: starts_with, tt: [k0, k1], d: Match keys that start with any of the items in the target.} + - {mp: ends_with, tt: [x0, x1], d: Match keys that end with any of the items in target.} + - {mp: regex, tt: ['^.*[01]_x.*$'], d: Match keys by the regex.} + - {mp: regex, tt: '^.*[01]_x.*$', d: Match keys by the regex.} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k2_x2: [C0], k3_x3: foo} + - {k2_x2: [C1], k3_x3: bar} + - template: mp.j2 + group: + - {mp: equal, tt: k0_x0, d: Match keys that equal the target.} + - {mp: starts_with, tt: k0, d: Match keys that start with the target.} + - {mp: ends_with, tt: x0, d: Match keys that end with the target.} + - {mp: regex, tt: '^.*0_x.*$', d: Match keys by the regex.} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} diff --git a/tests/integration/targets/filter_replace_keys/aliases b/tests/integration/targets/filter_replace_keys/aliases new file mode 100644 index 0000000000..12d1d6617e --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 diff --git a/tests/integration/targets/filter_replace_keys/tasks/main.yml b/tests/integration/targets/filter_replace_keys/tasks/main.yml new file mode 100644 index 0000000000..9c0674780e --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Tests + import_tasks: tests.yml diff --git a/tests/integration/targets/filter_replace_keys/tasks/tests.yml b/tests/integration/targets/filter_replace_keys/tasks/tests.yml new file mode 100644 index 0000000000..fa821702f0 --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/tasks/tests.yml @@ -0,0 +1,31 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Debug ansible_version + ansible.builtin.debug: + var: ansible_version + when: not quite_test | d(true) | bool + tags: ansible_version + +- name: Tests + ansible.builtin.assert: + that: + - (result | difference(i.0.result) | length) == 0 + success_msg: | + [OK] result: + {{ result | to_yaml }} + fail_msg: | + [ERR] result: + {{ result | to_yaml }} + quiet: "{{ quiet_test | d(true) | bool }}" + loop: "{{ tests | subelements('group') }}" + loop_control: + loop_var: i + label: "{{ i.1.mp | d('default') }}: {{ i.1.tt }}" + vars: + input: "{{ i.0.input }}" + target: "{{ i.1.tt }}" + mp: "{{ i.1.mp | d('default') }}" + result: "{{ lookup('template', i.0.template) }}" diff --git a/tests/integration/targets/filter_replace_keys/templates/default.j2 b/tests/integration/targets/filter_replace_keys/templates/default.j2 new file mode 100644 index 0000000000..6ba66cd690 --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/templates/default.j2 @@ -0,0 +1 @@ +{{ input | community.general.replace_keys(target=target) }} diff --git a/tests/integration/targets/filter_replace_keys/templates/default.j2.license b/tests/integration/targets/filter_replace_keys/templates/default.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/templates/default.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_replace_keys/templates/mp.j2 b/tests/integration/targets/filter_replace_keys/templates/mp.j2 new file mode 100644 index 0000000000..70c5009d91 --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/templates/mp.j2 @@ -0,0 +1 @@ +{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }} diff --git a/tests/integration/targets/filter_replace_keys/templates/mp.j2.license b/tests/integration/targets/filter_replace_keys/templates/mp.j2.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/templates/mp.j2.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/filter_replace_keys/vars/main/tests.yml b/tests/integration/targets/filter_replace_keys/vars/main/tests.yml new file mode 100644 index 0000000000..ca906a770b --- /dev/null +++ b/tests/integration/targets/filter_replace_keys/vars/main/tests.yml @@ -0,0 +1,71 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +tests: + - template: default.j2 + group: + - d: By default, match keys that equal any of the attributes before. + tt: + - {before: k0_x0, after: a0} + - {before: k1_x1, after: a1} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + - template: mp.j2 + group: + - d: Replace keys that starts with any of the attributes before. + mp: starts_with + tt: + - {before: k0, after: a0} + - {before: k1, after: a1} + - d: Replace keys that ends with any of the attributes before. + mp: ends_with + tt: + - {before: x0, after: a0} + - {before: x1, after: a1} + - d: Replace keys that match any regex of the attributes before. + mp: regex + tt: + - {before: "^.*0_x.*$", after: a0} + - {before: "^.*1_x.*$", after: a1} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + - template: mp.j2 + group: + - d: If more keys match the same attribute before the last one will be used. + mp: regex + tt: + - {before: "^.*_x.*$", after: X} + - d: If there are items with equal attribute before the first one will be used. + mp: regex + tt: + - {before: "^.*_x.*$", after: X} + - {before: "^.*_x.*$", after: Y} + input: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + result: + - X: foo + - X: bar + - template: mp.j2 + group: + - d: If there are more matches for a key the first one will be used. + mp: starts_with + tt: + - {before: a, after: X} + - {before: aa, after: Y} + input: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} + result: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} diff --git a/tests/integration/targets/filter_reveal_ansible_type/aliases b/tests/integration/targets/filter_reveal_ansible_type/aliases new file mode 100644 index 0000000000..12d1d6617e --- /dev/null +++ b/tests/integration/targets/filter_reveal_ansible_type/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 diff --git a/tests/integration/targets/filter_reveal_ansible_type/tasks/main.yml b/tests/integration/targets/filter_reveal_ansible_type/tasks/main.yml new file mode 100644 index 0000000000..c890c11901 --- /dev/null +++ b/tests/integration/targets/filter_reveal_ansible_type/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Integration tests + import_tasks: tasks.yml diff --git a/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml b/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml new file mode 100644 index 0000000000..37d3abcb71 --- /dev/null +++ b/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml @@ -0,0 +1,185 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Substitution converts str to AnsibleUnicode +# ------------------------------------------- + +- name: String. AnsibleUnicode. + assert: + that: result == dtype + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: "abc" + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'AnsibleUnicode' + +- name: String. AnsibleUnicode alias str. + assert: + that: result == dtype + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: "abc" + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: 'str' + +- name: List. All items are AnsibleUnicode. + assert: + that: result == dtype + success_msg: '["a", "b", "c"] is {{ dtype }}' + fail_msg: '["a", "b", "c"] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: ["a", "b", "c"] + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'list[AnsibleUnicode]' + +- name: Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. + assert: + that: result == dtype + success_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ dtype }}' + fail_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: {"a": "foo", "b": "bar", "c": "baz"} + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'dict[AnsibleUnicode, AnsibleUnicode]' + +# No substitution and no alias. Type of strings is str +# ---------------------------------------------------- + +- name: String + assert: + that: result == dtype + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ "abc" | community.general.reveal_ansible_type }}' + dtype: str + +- name: Integer + assert: + that: result == dtype + success_msg: '123 is {{ dtype }}' + fail_msg: '123 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ 123 | community.general.reveal_ansible_type }}' + dtype: int + +- name: Float + assert: + that: result == dtype + success_msg: '123.45 is {{ dtype }}' + fail_msg: '123.45 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ 123.45 | community.general.reveal_ansible_type }}' + dtype: float + +- name: Boolean + assert: + that: result == dtype + success_msg: 'true is {{ dtype }}' + fail_msg: 'true is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ true | community.general.reveal_ansible_type }}' + dtype: bool + +- name: List. All items are strings. + assert: + that: result == dtype + success_msg: '["a", "b", "c"] is {{ dtype }}' + fail_msg: '["a", "b", "c"] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' + dtype: list[str] + +- name: List of dictionaries. + assert: + that: result == dtype + success_msg: '[{"a": 1}, {"b": 2}] is {{ dtype }}' + fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' + dtype: list[dict] + +- name: Dictionary. All keys are strings. All values are integers. + assert: + that: result == dtype + success_msg: '{"a": 1} is {{ dtype }}' + fail_msg: '{"a": 1} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' + dtype: dict[str, int] + +- name: Dictionary. All keys are strings. All values are integers. + assert: + that: result == dtype + success_msg: '{"a": 1, "b": 2} is {{ dtype }}' + fail_msg: '{"a": 1, "b": 2} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' + dtype: dict[str, int] + +# Type of strings is AnsibleUnicode or str +# ---------------------------------------- + +- name: Dictionary. The keys are integers or strings. All values are strings. + assert: + that: result == dtype + success_msg: 'data is {{ dtype }}' + fail_msg: 'data is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {1: 'a', 'b': 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[int|str, str] + +- name: Dictionary. All keys are integers. All values are keys. + assert: + that: result == dtype + success_msg: 'data is {{ dtype }}' + fail_msg: 'data is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {1: 'a', 2: 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[int, str] + +- name: Dictionary. All keys are strings. Multiple types values. + assert: + that: result == dtype + success_msg: 'data is {{ dtype }}' + fail_msg: 'data is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[str, bool|dict|float|int|list|str] + +- name: List. Multiple types items. + assert: + that: result == dtype + success_msg: 'data is {{ dtype }}' + fail_msg: 'data is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: list[bool|dict|float|int|list|str] diff --git a/tests/integration/targets/gandi_livedns/aliases b/tests/integration/targets/gandi_livedns/aliases index f69a127f4d..bd1f024441 100644 --- a/tests/integration/targets/gandi_livedns/aliases +++ b/tests/integration/targets/gandi_livedns/aliases @@ -2,5 +2,4 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -cloud/gandi unsupported diff --git a/tests/integration/targets/gem/tasks/main.yml b/tests/integration/targets/gem/tasks/main.yml index 2d615304f8..2848a92bfb 100644 --- a/tests/integration/targets/gem/tasks/main.yml +++ b/tests/integration/targets/gem/tasks/main.yml @@ -18,6 +18,7 @@ - files: - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml' + - '{{ ansible_distribution }}.yml' - '{{ ansible_os_family }}.yml' - 'default.yml' paths: '../vars' diff --git a/tests/integration/targets/gem/vars/Ubuntu.yml b/tests/integration/targets/gem/vars/Ubuntu.yml new file mode 100644 index 0000000000..5f81e7e487 --- /dev/null +++ b/tests/integration/targets/gem/vars/Ubuntu.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +test_packages: + - "ruby" diff --git a/tests/integration/targets/git_config/tasks/unset_value.yml b/tests/integration/targets/git_config/tasks/unset_value.yml index dfa535a2d3..5f8c52c96f 100644 --- a/tests/integration/targets/git_config/tasks/unset_value.yml +++ b/tests/integration/targets/git_config/tasks/unset_value.yml @@ -18,6 +18,30 @@ scope: "{{ option_scope }}" register: get_result +- name: assert unset changed and deleted value + assert: + that: + - unset_result is changed + - unset_result.diff.before == option_value + "\n" + - unset_result.diff.after == "\n" + - get_result.config_value == '' + +- import_tasks: setup_value.yml + +- name: unsetting value with value specified + git_config: + name: "{{ option_name }}" + scope: "{{ option_scope }}" + value: "{{ option_value }}" + state: absent + register: unset_result + +- name: getting value + git_config: + name: "{{ option_name }}" + scope: "{{ option_scope }}" + register: get_result + - name: assert unset changed and deleted value assert: that: diff --git a/tests/integration/targets/homebrew/tasks/docker.yml b/tests/integration/targets/homebrew/tasks/docker.yml index 3b9e2ea6b4..c7f282ba2d 100644 --- a/tests/integration/targets/homebrew/tasks/docker.yml +++ b/tests/integration/targets/homebrew/tasks/docker.yml @@ -12,20 +12,6 @@ path: "{{ brew_which.stdout }}" register: brew_stat -- name: MACOS | Install docker without --formula - community.general.homebrew: - name: docker - state: present - become: true - become_user: "{{ brew_stat.stat.pw_name }}" - ignore_errors: true - register: result - -- name: Check that installing docker without --formula raises warning - assert: - that: - - result is failed - - name: MACOS | Install docker community.general.homebrew: name: docker diff --git a/tests/integration/targets/homebrew_services/aliases b/tests/integration/targets/homebrew_services/aliases new file mode 100644 index 0000000000..bd478505d9 --- /dev/null +++ b/tests/integration/targets/homebrew_services/aliases @@ -0,0 +1,9 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/1 +skip/aix +skip/freebsd +skip/rhel +skip/docker diff --git a/tests/integration/targets/homebrew_services/handlers/main.yml b/tests/integration/targets/homebrew_services/handlers/main.yml new file mode 100644 index 0000000000..18856120d0 --- /dev/null +++ b/tests/integration/targets/homebrew_services/handlers/main.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: uninstall black + community.general.homebrew: + name: black + state: absent + become: true + become_user: "{{ brew_stat.stat.pw_name }}" diff --git a/tests/integration/targets/homebrew_services/tasks/main.yml b/tests/integration/targets/homebrew_services/tasks/main.yml new file mode 100644 index 0000000000..1d524715ca --- /dev/null +++ b/tests/integration/targets/homebrew_services/tasks/main.yml @@ -0,0 +1,86 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Don't run this test for non-MacOS systems. +- meta: end_play + when: ansible_facts.distribution != 'MacOSX' + +- name: MACOS | Find brew binary + command: which brew + register: brew_which + +- name: MACOS | Get owner of brew binary + stat: + path: "{{ brew_which.stdout }}" + register: brew_stat + +- name: Homebrew Services test block + become: true + become_user: "{{ brew_stat.stat.pw_name }}" + block: + - name: MACOS | Install black + community.general.homebrew: + name: black + state: present + register: install_result + notify: + - uninstall black + + - name: Check the black service is installed + assert: + that: + - install_result is success + + - name: Start the black service + community.general.homebrew_services: + name: black + state: present + register: start_result + environment: + HOMEBREW_NO_ENV_HINTS: "1" + + - name: Check the black service is running + assert: + that: + - start_result is success + + - name: Start the black service when already started + community.general.homebrew_services: + name: black + state: present + register: start_result + environment: + HOMEBREW_NO_ENV_HINTS: "1" + + - name: Check for idempotency + assert: + that: + - start_result.changed == 0 + + - name: Restart the black service + community.general.homebrew_services: + name: black + state: restarted + register: restart_result + environment: + HOMEBREW_NO_ENV_HINTS: "1" + + - name: Check the black service is restarted + assert: + that: + - restart_result is success + + - name: Stop the black service + community.general.homebrew_services: + name: black + state: present + register: stop_result + environment: + HOMEBREW_NO_ENV_HINTS: "1" + + - name: Check the black service is stopped + assert: + that: + - stop_result is success diff --git a/tests/integration/targets/homectl/aliases b/tests/integration/targets/homectl/aliases index ea9b442302..82c28122c9 100644 --- a/tests/integration/targets/homectl/aliases +++ b/tests/integration/targets/homectl/aliases @@ -11,3 +11,5 @@ skip/rhel9.0 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/ skip/rhel9.1 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/ skip/rhel9.2 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/ skip/rhel9.3 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/ +skip/rhel9.4 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/ +skip/rhel9.5 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/ diff --git a/tests/integration/targets/homectl/tasks/main.yml b/tests/integration/targets/homectl/tasks/main.yml index 93c1089b47..aa924293e3 100644 --- a/tests/integration/targets/homectl/tasks/main.yml +++ b/tests/integration/targets/homectl/tasks/main.yml @@ -15,6 +15,11 @@ ignore_errors: true - block: + - name: Install legacycrypt on Python 3.13+ + pip: + name: legacycrypt + when: ansible_python_version is version("3.13", ">=") + - name: Check and start systemd-homed service service: name: systemd-homed.service diff --git a/tests/integration/targets/htpasswd/tasks/main.yml b/tests/integration/targets/htpasswd/tasks/main.yml index 7b5dc3c511..030f4d19ae 100644 --- a/tests/integration/targets/htpasswd/tasks/main.yml +++ b/tests/integration/targets/htpasswd/tasks/main.yml @@ -13,6 +13,7 @@ path: "{{ htpasswd_path }}" name: bob password: c00lbob + mode: "0644" check_mode: true register: add_bob_check @@ -21,6 +22,7 @@ path: "{{ htpasswd_path }}" name: bob password: c00lbob + mode: "0644" register: add_bob - name: add bob (idempotency) @@ -28,13 +30,40 @@ path: "{{ htpasswd_path }}" name: bob password: c00lbob + mode: "0644" register: add_bob_idempot +- name: update permissions (check mode) + community.general.htpasswd: + path: "{{ htpasswd_path }}" + name: bob + password: c00lbob + mode: "0600" + check_mode: true + register: update_perms_check + +- name: update permissions + community.general.htpasswd: + path: "{{ htpasswd_path }}" + name: bob + password: c00lbob + mode: "0600" + register: update_perms + +- name: update permissions (idempotency) + community.general.htpasswd: + path: "{{ htpasswd_path }}" + name: bob + password: c00lbob + mode: "0600" + register: update_perms_idempot + - name: add bob new password community.general.htpasswd: path: "{{ htpasswd_path }}" name: bob password: SUPERsecret + mode: "0600" register: add_bob_newpw - name: add bob new password (idempotency) @@ -42,6 +71,7 @@ path: "{{ htpasswd_path }}" name: bob password: SUPERsecret + mode: "0600" register: add_bob_newpw_idempot - name: test add bob assertions @@ -50,6 +80,9 @@ - add_bob_check is changed - add_bob is changed - add_bob_idempot is not changed + - update_perms_check is changed + - update_perms is changed + - update_perms_idempot is not changed - add_bob_newpw is changed - add_bob_newpw_idempot is not changed @@ -58,6 +91,7 @@ path: "{{ htpasswd_path }}" name: bob state: absent + mode: "0600" check_mode: true register: del_bob_check @@ -66,6 +100,7 @@ path: "{{ htpasswd_path }}" name: bob state: absent + mode: "0600" register: del_bob - name: remove bob (idempotency) @@ -73,6 +108,7 @@ path: "{{ htpasswd_path }}" name: bob state: absent + mode: "0600" register: del_bob_idempot - name: test remove bob assertions diff --git a/tests/integration/targets/iptables_state/aliases b/tests/integration/targets/iptables_state/aliases index 5a02a630bc..76c58041b6 100644 --- a/tests/integration/targets/iptables_state/aliases +++ b/tests/integration/targets/iptables_state/aliases @@ -10,3 +10,5 @@ skip/freebsd # no iptables/netfilter (Linux specific) skip/osx # no iptables/netfilter (Linux specific) skip/macos # no iptables/netfilter (Linux specific) skip/aix # no iptables/netfilter (Linux specific) + +skip/ubuntu22.04 # TODO there's a problem here! diff --git a/tests/integration/targets/iso_extract/aliases b/tests/integration/targets/iso_extract/aliases index 5ddca1ecbb..e98f128115 100644 --- a/tests/integration/targets/iso_extract/aliases +++ b/tests/integration/targets/iso_extract/aliases @@ -11,7 +11,12 @@ skip/rhel9.0 # FIXME skip/rhel9.1 # FIXME skip/rhel9.2 # FIXME skip/rhel9.3 # FIXME +skip/rhel9.4 # FIXME +skip/rhel9.5 # FIXME skip/freebsd12.4 # FIXME skip/freebsd13.2 # FIXME skip/freebsd13.3 # FIXME +skip/freebsd13.4 # FIXME skip/freebsd14.0 # FIXME +skip/freebsd14.1 # FIXME +skip/freebsd14.2 # FIXME diff --git a/tests/integration/targets/keycloak_client/tasks/main.yml b/tests/integration/targets/keycloak_client/tasks/main.yml index 5e7c7fae39..e1a7d2ebfb 100644 --- a/tests/integration/targets/keycloak_client/tasks/main.yml +++ b/tests/integration/targets/keycloak_client/tasks/main.yml @@ -103,3 +103,131 @@ assert: that: - check_client_when_present_and_changed is changed + +- name: Desire client with flow binding overrides + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + redirect_uris: '{{redirect_uris1}}' + attributes: '{{client_attributes1}}' + protocol_mappers: '{{protocol_mappers1}}' + authentication_flow_binding_overrides: + browser_name: browser + direct_grant_name: direct grant + register: desire_client_with_flow_binding_overrides + +- name: Assert flows are set + assert: + that: + - desire_client_with_flow_binding_overrides is changed + - "'authenticationFlowBindingOverrides' in desire_client_with_flow_binding_overrides.end_state" + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.browser | length > 0 + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.direct_grant | length > 0 + +- name: Backup flow UUIDs + set_fact: + flow_browser_uuid: "{{ desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.browser }}" + flow_direct_grant_uuid: "{{ desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.direct_grant }}" + +- name: Desire client with flow binding overrides remove direct_grant_name + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + redirect_uris: '{{redirect_uris1}}' + attributes: '{{client_attributes1}}' + protocol_mappers: '{{protocol_mappers1}}' + authentication_flow_binding_overrides: + browser_name: browser + register: desire_client_with_flow_binding_overrides + +- name: Assert flows are updated + assert: + that: + - desire_client_with_flow_binding_overrides is changed + - "'authenticationFlowBindingOverrides' in desire_client_with_flow_binding_overrides.end_state" + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.browser | length > 0 + - "'direct_grant' not in desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides" + +- name: Desire client with flow binding overrides remove browser add direct_grant + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + redirect_uris: '{{redirect_uris1}}' + attributes: '{{client_attributes1}}' + protocol_mappers: '{{protocol_mappers1}}' + authentication_flow_binding_overrides: + direct_grant_name: direct grant + register: desire_client_with_flow_binding_overrides + +- name: Assert flows are updated + assert: + that: + - desire_client_with_flow_binding_overrides is changed + - "'authenticationFlowBindingOverrides' in desire_client_with_flow_binding_overrides.end_state" + - "'browser' not in desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides" + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.direct_grant | length > 0 + +- name: Desire client with flow binding overrides with UUIDs + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + redirect_uris: '{{redirect_uris1}}' + attributes: '{{client_attributes1}}' + protocol_mappers: '{{protocol_mappers1}}' + authentication_flow_binding_overrides: + browser: "{{ flow_browser_uuid }}" + direct_grant: "{{ flow_direct_grant_uuid }}" + register: desire_client_with_flow_binding_overrides + +- name: Assert flows are updated + assert: + that: + - desire_client_with_flow_binding_overrides is changed + - "'authenticationFlowBindingOverrides' in desire_client_with_flow_binding_overrides.end_state" + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.browser == flow_browser_uuid + - desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides.direct_grant == flow_direct_grant_uuid + +- name: Unset flow binding overrides + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + redirect_uris: '{{redirect_uris1}}' + attributes: '{{client_attributes1}}' + protocol_mappers: '{{protocol_mappers1}}' + authentication_flow_binding_overrides: + browser: "{{ None }}" + direct_grant: null + register: desire_client_with_flow_binding_overrides + +- name: Assert flows are removed + assert: + that: + - desire_client_with_flow_binding_overrides is changed + - "'authenticationFlowBindingOverrides' in desire_client_with_flow_binding_overrides.end_state" + - "'browser' not in desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides" + - "'direct_grant' not in desire_client_with_flow_binding_overrides.end_state.authenticationFlowBindingOverrides" \ No newline at end of file diff --git a/tests/integration/targets/keycloak_group/tasks/main.yml b/tests/integration/targets/keycloak_group/tasks/main.yml index 8b115e3a28..f807b0640d 100644 --- a/tests/integration/targets/keycloak_group/tasks/main.yml +++ b/tests/integration/targets/keycloak_group/tasks/main.yml @@ -10,8 +10,8 @@ command: start-dev env: KC_HTTP_RELATIVE_PATH: /auth - KEYCLOAK_ADMIN: admin - KEYCLOAK_ADMIN_PASSWORD: password + KEYCLOAK_ADMIN: "{{ admin_user }}" + KEYCLOAK_ADMIN_PASSWORD: "{{ admin_password }}" ports: - "8080:8080" detach: true diff --git a/tests/integration/targets/keycloak_userprofile/aliases b/tests/integration/targets/keycloak_userprofile/aliases new file mode 100644 index 0000000000..bd1f024441 --- /dev/null +++ b/tests/integration/targets/keycloak_userprofile/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +unsupported diff --git a/tests/integration/targets/keycloak_userprofile/meta/main.yml b/tests/integration/targets/keycloak_userprofile/meta/main.yml new file mode 100644 index 0000000000..c583a8fc22 --- /dev/null +++ b/tests/integration/targets/keycloak_userprofile/meta/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# dependencies: +# - setup_docker diff --git a/tests/integration/targets/keycloak_userprofile/readme.adoc b/tests/integration/targets/keycloak_userprofile/readme.adoc new file mode 100644 index 0000000000..943dfaf542 --- /dev/null +++ b/tests/integration/targets/keycloak_userprofile/readme.adoc @@ -0,0 +1,27 @@ +// Copyright (c) Ansible Project +// GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +// SPDX-License-Identifier: GPL-3.0-or-later + +To be able to run these integration tests a keycloak server must be +reachable under a specific url with a specific admin user and password. +The exact values expected for these parameters can be found in +'vars/main.yml' file. A simple way to do this is to use the official +keycloak docker images like this: + +---- +docker run --name mykeycloak -p 8080:8080 -e KC_HTTP_RELATIVE_PATH= -e KEYCLOAK_ADMIN= -e KEYCLOAK_ADMIN_PASSWORD= quay.io/keycloak/keycloak:24.0.5 start-dev +---- + +Example with concrete values inserted: + +---- +docker run --name mykeycloak -p 8080:8080 -e KC_HTTP_RELATIVE_PATH=/auth -e KEYCLOAK_ADMIN=admin -e KEYCLOAK_ADMIN_PASSWORD=password quay.io/keycloak/keycloak:24.0.5 start-dev +---- + +This test suite can run against a fresh unconfigured server instance +(no preconfiguration required) and cleans up after itself (undoes all +its config changes) as long as it runs through completely. While its active +it changes the server configuration in the following ways: + + * creating, modifying and deleting some keycloak userprofiles + diff --git a/tests/integration/targets/keycloak_userprofile/tasks/main.yml b/tests/integration/targets/keycloak_userprofile/tasks/main.yml new file mode 100644 index 0000000000..37b65d35ed --- /dev/null +++ b/tests/integration/targets/keycloak_userprofile/tasks/main.yml @@ -0,0 +1,301 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +- name: Start container + community.docker.docker_container: + name: mykeycloak + image: "quay.io/keycloak/keycloak:24.0.5" + command: start-dev + env: + KC_HTTP_RELATIVE_PATH: /auth + KEYCLOAK_ADMIN: admin + KEYCLOAK_ADMIN_PASSWORD: password + ports: + - "8080:8080" + detach: true + auto_remove: true + memory: 2200M + +- name: Check default ports + ansible.builtin.wait_for: + host: "localhost" + port: "8080" + state: started # Port should be open + delay: 30 # Wait before first check + timeout: 50 # Stop checking after timeout (sec) + +- name: Remove Keycloak test realm to avoid failures from previous failed runs + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + id: "{{ realm }}" + state: absent + +- name: Create Keycloak test realm + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + id: "{{ realm }}" + state: present + +- name: Create default User Profile (check mode) + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + parent_id: "{{ realm }}" + config: "{{ config_default }}" + check_mode: true + register: result + +- name: Assert that User Profile would be created + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg == "Userprofile declarative-user-profile would be created" + +- name: Create default User Profile + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_default }}" + diff: true + register: result + +- name: Assert that User Profile was created + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg == "Userprofile declarative-user-profile created" + +- name: Create default User Profile (test for idempotency) + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_default }}" + register: result + +- name: Assert that User Profile was in sync + assert: + that: + - result is not changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg == "Userprofile declarative-user-profile was in sync" + +- name: Update default User Profile (check mode) + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_updated }}" + check_mode: true + register: result + +- name: Assert that User Profile would be changed + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg.startswith("Userprofile declarative-user-profile would be changed:") + +- name: Update default User Profile + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_updated }}" + diff: true + register: result + +- name: Assert that User Profile changed + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg.startswith("Userprofile declarative-user-profile changed:") + +- name: Update default User Profile (test for idempotency) + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_updated }}" + register: result + +- name: Assert that User Profile was in sync + assert: + that: + - result is not changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg == "Userprofile declarative-user-profile was in sync" + +## No force implemented +# - name: Force update default User Profile +# community.general.keycloak_userprofile: +# auth_keycloak_url: "{{ url }}" +# auth_realm: "{{ admin_realm }}" +# auth_username: "{{ admin_user }}" +# auth_password: "{{ admin_password }}" +# force: true +# state: present +# parent_id: "{{ realm }}" +# config: "{{ config_updated }}" +# register: result +# +# - name: Assert that forced update ran correctly +# assert: +# that: +# - result is changed +# - result.end_state != {} +# - result.end_state.providerId == "declarative-user-profile" +# - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" +# - result.msg == "Userprofile declarative-user-profile was forcibly updated" + +- name: Remove default User Profile + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: absent + parent_id: "{{ realm }}" + config: "{{ config_default }}" + diff: true + register: result + +- name: Assert that User Profile was deleted + assert: + that: + - result is changed + - result.end_state == {} + - result.msg == "Userprofile declarative-user-profile deleted" + +- name: Remove default User Profile (test for idempotency) + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: absent + parent_id: "{{ realm }}" + config: "{{ config_default }}" + register: result + +- name: Assert that User Profile not present + assert: + that: + - result is not changed + - result.end_state == {} + - result.msg == "Userprofile declarative-user-profile not present" + +- name: Create User Profile with unmanaged attributes ENABLED + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_unmanaged_attributes_enabled }}" + diff: true + register: result + +- name: Assert that User Profile was created + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg == "Userprofile declarative-user-profile created" + +- name: Attempt to change the User Profile to unmanaged ADMIN_EDIT + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_unmanaged_attributes_admin_edit }}" + diff: true + register: result + +- name: Assert that User Profile was changed + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg.startswith("Userprofile declarative-user-profile changed:") + +- name: Attempt to change the User Profile to unmanaged ADMIN_VIEW + community.general.keycloak_userprofile: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + state: present + parent_id: "{{ realm }}" + config: "{{ config_unmanaged_attributes_admin_view }}" + diff: true + register: result + +- name: Assert that User Profile was changed + assert: + that: + - result is changed + - result.end_state != {} + - result.end_state.providerId == "declarative-user-profile" + - result.end_state.providerType == "org.keycloak.userprofile.UserProfileProvider" + - result.msg.startswith("Userprofile declarative-user-profile changed:") + +- name: Remove Keycloak test realm + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + id: "{{ realm }}" + state: absent diff --git a/tests/integration/targets/keycloak_userprofile/vars/main.yml b/tests/integration/targets/keycloak_userprofile/vars/main.yml new file mode 100644 index 0000000000..1f8ae6c823 --- /dev/null +++ b/tests/integration/targets/keycloak_userprofile/vars/main.yml @@ -0,0 +1,111 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +url: http://localhost:8080/auth +admin_realm: master +admin_user: admin +admin_password: password +realm: realm_userprofile_test +attributes_default: + - name: username + displayName: ${username} + validations: + length: + min: 3 + max: 255 + usernameProhibitedCharacters: {} + up_username_not_idn_homograph: {} + annotations: {} + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: email + displayName: ${email} + validations: + email: {} + length: + max: 255 + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: firstName + displayName: ${firstName} + validations: + length: + max: 255 + personNameProhibitedCharacters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: lastName + displayName: ${lastName} + validations: + length: + max: 255 + person_name_prohibited_characters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false +attributes_additional: + - name: additionalAttribute + displayName: additionalAttribute + group: user-metadata + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false +groups_default: + - name: user-metadata + displayHeader: User metadata + displayDescription: Attributes, which refer to user metadata +config_default: + kc_user_profile_config: + - attributes: "{{ attributes_default }}" + groups: "{{ groups_default }}" +config_updated: + kc_user_profile_config: + - attributes: "{{ attributes_default + attributes_additional }}" + groups: "{{ groups_default }}" +config_unmanaged_attributes_enabled: + kc_user_profile_config: + - unmanagedAttributePolicy: ENABLED + attributes: "{{ attributes_default }}" +config_unmanaged_attributes_admin_edit: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_EDIT + attributes: "{{ attributes_default }}" +config_unmanaged_attributes_admin_view: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_VIEW + attributes: "{{ attributes_default }}" diff --git a/tests/integration/targets/locale_gen/vars/main.yml b/tests/integration/targets/locale_gen/vars/main.yml index 44327ddd31..23358e6374 100644 --- a/tests/integration/targets/locale_gen/vars/main.yml +++ b/tests/integration/targets/locale_gen/vars/main.yml @@ -15,3 +15,12 @@ locale_list_basic: - localegen: eo locales: [eo] skip_removal: false + - localegen: + - ar_BH.UTF-8 + - tr_CY.UTF-8 + locales: + - ar_BH.UTF-8 + - ar_BH.utf8 + - tr_CY.UTF-8 + - tr_CY.utf8 + skip_removal: false diff --git a/tests/integration/targets/lookup_lmdb_kv/runme.sh b/tests/integration/targets/lookup_lmdb_kv/runme.sh index 71faa439d1..286ec6b16d 100755 --- a/tests/integration/targets/lookup_lmdb_kv/runme.sh +++ b/tests/integration/targets/lookup_lmdb_kv/runme.sh @@ -4,6 +4,10 @@ # SPDX-License-Identifier: GPL-3.0-or-later set -eux +if grep -Fq 'NAME="Arch Linux"' /etc/os-release; then + exit 0 +fi + ANSIBLE_ROLES_PATH=../ \ ansible-playbook dependencies.yml -v "$@" diff --git a/tests/integration/targets/lookup_random_words/test.yml b/tests/integration/targets/lookup_random_words/test.yml index 90c6727304..e1b6fde13b 100644 --- a/tests/integration/targets/lookup_random_words/test.yml +++ b/tests/integration/targets/lookup_random_words/test.yml @@ -27,6 +27,7 @@ - result4[0] | length >= 17 - result4[0] | length <= 29 - result4[0] | regex_findall("[A-Z]") | length == 3 - - result4[0].count("-") == 2 + # If one of the random words is 't-shirt', there are more than 2 dashes... + - result4[0].count("-") == 2 or "t-shirt" in result4[0].lower() - result5 | length == 1 - result5[0] | length == 15 diff --git a/tests/integration/targets/lvg/aliases b/tests/integration/targets/lvg/aliases index 3b92ba75c4..cbe489d88b 100644 --- a/tests/integration/targets/lvg/aliases +++ b/tests/integration/targets/lvg/aliases @@ -10,3 +10,4 @@ skip/aix skip/freebsd skip/osx skip/macos +skip/alpine3.21 # TODO try to fix diff --git a/tests/integration/targets/mqtt/tasks/main.yml b/tests/integration/targets/mqtt/tasks/main.yml index 0beb1b3b27..3fd11643ee 100644 --- a/tests/integration/targets/mqtt/tasks/main.yml +++ b/tests/integration/targets/mqtt/tasks/main.yml @@ -11,4 +11,4 @@ - include_tasks: ubuntu.yml when: - ansible_distribution == 'Ubuntu' - - ansible_distribution_release not in ['focal', 'jammy'] + - ansible_distribution_release not in ['focal', 'jammy', 'noble'] diff --git a/tests/integration/targets/odbc/aliases b/tests/integration/targets/odbc/aliases index 91a6167251..0cc7b80d9d 100644 --- a/tests/integration/targets/odbc/aliases +++ b/tests/integration/targets/odbc/aliases @@ -11,4 +11,6 @@ skip/rhel9.0 skip/rhel9.1 skip/rhel9.2 skip/rhel9.3 +skip/rhel9.4 +skip/rhel9.5 skip/freebsd diff --git a/tests/integration/targets/one_image/aliases b/tests/integration/targets/one_image/aliases new file mode 100644 index 0000000000..100ba0f979 --- /dev/null +++ b/tests/integration/targets/one_image/aliases @@ -0,0 +1,7 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/generic/1 +cloud/opennebula +disabled # FIXME - when this is fixed, also re-enable the generic tests in CI! diff --git a/tests/integration/targets/one_image/tasks/main.yml b/tests/integration/targets/one_image/tasks/main.yml new file mode 100644 index 0000000000..c8736d73d8 --- /dev/null +++ b/tests/integration/targets/one_image/tasks/main.yml @@ -0,0 +1,210 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Checks for existence +- name: Make sure image is present by ID + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: present + register: result + +- name: Assert that image is present + assert: + that: + - result is not changed + +- name: Make sure image is present by ID + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: my_image + state: present + register: result + +- name: Assert that image is present + assert: + that: + - result is not changed + +# Updating an image +- name: Clone image without name + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: cloned + register: result + +- name: Assert that image is cloned + assert: + that: + - result is changed + +- name: Clone image with name + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: renamed + new_name: new_image + register: result + +- name: Assert that image is cloned + assert: + that: + - result is changed + +- name: Disable image + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + enabled: false + register: result + +- name: Assert that network is disabled + assert: + that: + - result is changed + +- name: Enable image + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + enabled: true + register: result + +- name: Assert that network is enabled + assert: + that: + - result is changed + +- name: Make image persistent + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + persistent: true + register: result + +- name: Assert that network is persistent + assert: + that: + - result is changed + +- name: Make image non-persistent + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + persistent: false + register: result + +- name: Assert that network is non-persistent + assert: + that: + - result is changed + +# Testing idempotence using the same tasks +- name: Make image non-persistent + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + persistent: false + enabled: true + register: result + +- name: Assert that network not changed + assert: + that: + - result is not changed + +# Delete images +- name: Deleting non-existing image + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 228 + state: absent + register: result + +- name: Assert that network not changed + assert: + that: + - result is not changed + +- name: Delete an existing image + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: absent + register: result + +- name: Assert that image was deleted + assert: + that: + - result is changed + +# Trying to run with wrong arguments +- name: Try to use name and ID at the same time + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + name: name + register: result + ignore_errors: true + +- name: Assert that task failed + assert: + that: + - result is failed + +- name: Try to rename image without specifying new name + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: rename + register: result + ignore_errors: true + +- name: Assert that task failed + assert: + that: + - result is failed + +- name: Try to rename image without specifying new name + one_image: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: rename + register: result + ignore_errors: true diff --git a/tests/integration/targets/one_image_info/aliases b/tests/integration/targets/one_image_info/aliases new file mode 100644 index 0000000000..100ba0f979 --- /dev/null +++ b/tests/integration/targets/one_image_info/aliases @@ -0,0 +1,7 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/generic/1 +cloud/opennebula +disabled # FIXME - when this is fixed, also re-enable the generic tests in CI! diff --git a/tests/integration/targets/one_image_info/tasks/main.yml b/tests/integration/targets/one_image_info/tasks/main.yml new file mode 100644 index 0000000000..fede116241 --- /dev/null +++ b/tests/integration/targets/one_image_info/tasks/main.yml @@ -0,0 +1,192 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Checks for existence +- name: Get info by ID + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + register: result + +- name: Assert that image is present + assert: + that: + - result is not changed + +- name: Get info by list of ID + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + ids: + - 2 + - 2 + - 8 + register: result + +- name: Assert that image is present + assert: + that: + - result is not changed + +- name: Get info by list of ID + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: somename + register: result + +- name: Assert that image is present + assert: + that: + - result is not changed + +- name: Gather all info + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + register: result + +- name: Assert that images are present + assert: + that: + - result is not changed + +- name: Gather info by regex + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: '~my_image-[0-9].*' + register: result + +- name: Assert that images are present + assert: + that: + - result is not changed + +- name: Gather info by regex and ignore upper/lower cases + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: '~*my_image-[0-9].*' + register: result + +- name: Assert that images are present + assert: + that: + - result is not changed + +# Updating an image +- name: Clone image without name + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: cloned + register: result + +- name: Assert that image is cloned + assert: + that: + - result is changed + +- name: Clone image with name + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: renamed + new_name: new_image + register: result + +- name: Assert that image is cloned + assert: + that: + - result is changed + +- name: Disable image + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + enabled: false + register: result + +- name: Assert that network is disabled + assert: + that: + - result is changed + +- name: Enable image + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + enabled: true + register: result + +- name: Assert that network is enabled + assert: + that: + - result is changed + +- name: Make image persistent + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + persistent: true + register: result + +- name: Assert that network is persistent + assert: + that: + - result is changed + +- name: Make image non-persistent + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + persistent: false + register: result + +- name: Assert that network is non-persistent + assert: + that: + - result is changed + +# Testing errors +- name: Try to use name and ID a the same time + one_image_info: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + name: somename + register: result + ignore_errors: true + +- name: Assert that network not changed + assert: + that: + - result is failed diff --git a/tests/integration/targets/one_vnet/aliases b/tests/integration/targets/one_vnet/aliases new file mode 100644 index 0000000000..100ba0f979 --- /dev/null +++ b/tests/integration/targets/one_vnet/aliases @@ -0,0 +1,7 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/generic/1 +cloud/opennebula +disabled # FIXME - when this is fixed, also re-enable the generic tests in CI! diff --git a/tests/integration/targets/one_vnet/tasks/main.yml b/tests/integration/targets/one_vnet/tasks/main.yml new file mode 100644 index 0000000000..084d4758ad --- /dev/null +++ b/tests/integration/targets/one_vnet/tasks/main.yml @@ -0,0 +1,173 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Create a new template +- name: Create a new network + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: bridge-network + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = "192.0.2.2", + SIZE = "20" + ] + DNS = "192.0.2.1" + GATEWAY = "192.0.2.1" + register: result + +- name: Assert that network is created + assert: + that: + - result is changed + + +# Updating a network +- name: Update an existing network + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: bridge-network + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = "192.0.2.2", + SIZE = "20" + ] + DNS = "192.0.2.220" + GATEWAY = "192.0.2.1" + register: result + +- name: Assert that network is changed + assert: + that: + - result is changed + +# Testing idempotence using the same template as in previous task +- name: Update an existing network with the same changes again + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: bridge-network + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = "192.0.2.2", + SIZE = "20" + ] + DNS = "192.0.2.220" + GATEWAY = "192.0.2.1" + register: result + +- name: Assert that network is not changed + assert: + that: + - result is not changed + + +# Deletion of networks +- name: Delete a nonexisting network + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: i-do-not-exists + state: absent + register: result + +- name: Assert that network is not changed + assert: + that: + - result is not changed + +- name: Delete an existing network + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: bridge-network + state: absent + register: result + +- name: Assert that network was deleted + assert: + that: + - result is changed + +# Trying to run with wrong arguments +- name: Try to create use network with state=present and without the template parameter + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: bridge-network + state: present + register: result + ignore_errors: true + +- name: Assert that it failed because network is missing + assert: + that: + - result is failed + +- name: Try to create network with template but without specifying the name parameter + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + id: 0 + state: present + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = "192.0.2.2", + SIZE = "20" + ] + DNS = "192.0.2.220" + GATEWAY = "192.0.2.1" + register: result + ignore_errors: true + +- name: Assert that it failed because name is required for initial creation + assert: + that: + - result is failed + +- name: Try to use both ID and name at the same time + one_vnet: + api_url: "{{ opennebula_url }}" + api_username: "{{ opennebula_username }}" + api_password: "{{ opennebula_password }}" + name: + id: 0 + state: present + register: result + ignore_errors: true + +- name: Assert that it failed because you can use only one at the time + assert: + that: + - result is failed diff --git a/tests/integration/targets/pipx/aliases b/tests/integration/targets/pipx/aliases index 66e6e1a3e6..9f87ec3480 100644 --- a/tests/integration/targets/pipx/aliases +++ b/tests/integration/targets/pipx/aliases @@ -6,4 +6,3 @@ azp/posix/2 destructive skip/python2 skip/python3.5 -disabled # TODO diff --git a/tests/integration/targets/pipx/files/spec.json b/tests/integration/targets/pipx/files/spec.json new file mode 100644 index 0000000000..3c85125337 --- /dev/null +++ b/tests/integration/targets/pipx/files/spec.json @@ -0,0 +1,91 @@ +{ + "pipx_spec_version": "0.1", + "venvs": { + "black": { + "metadata": { + "injected_packages": {}, + "main_package": { + "app_paths": [ + { + "__Path__": "/home/az/.local/pipx/venvs/black/bin/black", + "__type__": "Path" + }, + { + "__Path__": "/home/az/.local/pipx/venvs/black/bin/blackd", + "__type__": "Path" + } + ], + "app_paths_of_dependencies": {}, + "apps": [ + "black", + "blackd" + ], + "apps_of_dependencies": [], + "include_apps": true, + "include_dependencies": false, + "man_pages": [], + "man_pages_of_dependencies": [], + "man_paths": [], + "man_paths_of_dependencies": {}, + "package": "black", + "package_or_url": "black", + "package_version": "24.8.0", + "pinned": false, + "pip_args": [], + "suffix": "" + }, + "pipx_metadata_version": "0.5", + "python_version": "Python 3.11.9", + "source_interpreter": { + "__Path__": "/home/az/.pyenv/versions/3.11.9/bin/python3.11", + "__type__": "Path" + }, + "venv_args": [] + } + }, + "pycowsay": { + "metadata": { + "injected_packages": {}, + "main_package": { + "app_paths": [ + { + "__Path__": "/home/az/.local/pipx/venvs/pycowsay/bin/pycowsay", + "__type__": "Path" + } + ], + "app_paths_of_dependencies": {}, + "apps": [ + "pycowsay" + ], + "apps_of_dependencies": [], + "include_apps": true, + "include_dependencies": false, + "man_pages": [ + "man6/pycowsay.6" + ], + "man_pages_of_dependencies": [], + "man_paths": [ + { + "__Path__": "/home/az/.local/pipx/venvs/pycowsay/share/man/man6/pycowsay.6", + "__type__": "Path" + } + ], + "man_paths_of_dependencies": {}, + "package": "pycowsay", + "package_or_url": "pycowsay", + "package_version": "0.0.0.2", + "pinned": false, + "pip_args": [], + "suffix": "" + }, + "pipx_metadata_version": "0.5", + "python_version": "Python 3.11.9", + "source_interpreter": { + "__Path__": "/home/az/.pyenv/versions/3.11.9/bin/python3.11", + "__type__": "Path" + }, + "venv_args": [] + } + }, + } +} diff --git a/tests/integration/targets/pipx/files/spec.json.license b/tests/integration/targets/pipx/files/spec.json.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/tests/integration/targets/pipx/files/spec.json.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/tests/integration/targets/pipx/tasks/main.yml b/tests/integration/targets/pipx/tasks/main.yml index 7eb0f11a6c..30e96ef1bf 100644 --- a/tests/integration/targets/pipx/tasks/main.yml +++ b/tests/integration/targets/pipx/tasks/main.yml @@ -171,7 +171,7 @@ state: latest register: install_tox_latest_with_preinstall_again -- name: install application latest tox +- name: install application latest tox (force) community.general.pipx: name: tox state: latest @@ -217,125 +217,42 @@ - "'tox' not in uninstall_tox_again.application" ############################################################################## -- name: ensure application ansible-lint is uninstalled - community.general.pipx: - name: ansible-lint - state: absent -- name: install application ansible-lint - community.general.pipx: - name: ansible-lint - register: install_ansible_lint +- name: Include testcase for inject packages + ansible.builtin.include_tasks: testcase-injectpkg.yml -- name: inject packages - community.general.pipx: - state: inject - name: ansible-lint - inject_packages: - - licenses - register: inject_pkgs_ansible_lint +- name: Include testcase for jupyter + ansible.builtin.include_tasks: testcase-jupyter.yml -- name: inject packages with apps - community.general.pipx: - state: inject - name: ansible-lint - inject_packages: - - black - install_apps: true - register: inject_pkgs_apps_ansible_lint +- name: Include testcase for old site-wide + ansible.builtin.include_tasks: testcase-oldsitewide.yml -- name: cleanup ansible-lint - community.general.pipx: - state: absent - name: ansible-lint - register: uninstall_ansible_lint +- name: Include testcase for issue 7497 + ansible.builtin.include_tasks: testcase-7497.yml -- name: check assertions inject_packages - assert: - that: - - install_ansible_lint is changed - - inject_pkgs_ansible_lint is changed - - '"ansible-lint" in inject_pkgs_ansible_lint.application' - - '"licenses" in inject_pkgs_ansible_lint.application["ansible-lint"]["injected"]' - - inject_pkgs_apps_ansible_lint is changed - - '"ansible-lint" in inject_pkgs_apps_ansible_lint.application' - - '"black" in inject_pkgs_apps_ansible_lint.application["ansible-lint"]["injected"]' - - uninstall_ansible_lint is changed +- name: Include testcase for issue 8656 + ansible.builtin.include_tasks: testcase-8656.yml -############################################################################## -- name: install jupyter - not working smoothly in freebsd - when: ansible_system != 'FreeBSD' +- name: install pipx + pip: + name: pipx>=1.7.0 + extra_args: --user + ignore_errors: true + register: pipx170_install + +- name: Recent features + when: + - pipx170_install is not failed + - pipx170_install is changed block: - - name: ensure application jupyter is uninstalled - community.general.pipx: - name: jupyter - state: absent + - name: Include testcase for PR 8793 --global + ansible.builtin.include_tasks: testcase-8793-global.yml - - name: install application jupyter - community.general.pipx: - name: jupyter - install_deps: true - register: install_jupyter + - name: Include testcase for PR 8809 install-all + ansible.builtin.include_tasks: testcase-8809-install-all.yml - - name: cleanup jupyter - community.general.pipx: - state: absent - name: jupyter + - name: Include testcase for PR 8809 pin + ansible.builtin.include_tasks: testcase-8809-pin.yml - - name: check assertions - assert: - that: - - install_jupyter is changed - - '"ipython" in install_jupyter.stdout' - -############################################################################## -- name: ensure /opt/pipx - ansible.builtin.file: - path: /opt/pipx - state: directory - mode: 0755 - -- name: install tox site-wide - community.general.pipx: - name: tox - state: latest - register: install_tox_sitewide - environment: - PIPX_HOME: /opt/pipx - PIPX_BIN_DIR: /usr/local/bin - -- name: stat /usr/local/bin/tox - ansible.builtin.stat: - path: /usr/local/bin/tox - register: usrlocaltox - -- name: check assertions - ansible.builtin.assert: - that: - - install_tox_sitewide is changed - - usrlocaltox.stat.exists - -############################################################################## -# Test for issue 7497 -- name: ensure application pyinstaller is uninstalled - community.general.pipx: - name: pyinstaller - state: absent - -- name: Install Python Package pyinstaller - community.general.pipx: - name: pyinstaller - state: present - system_site_packages: true - pip_args: "--no-cache-dir" - register: install_pyinstaller - -- name: cleanup pyinstaller - community.general.pipx: - name: pyinstaller - state: absent - -- name: check assertions - assert: - that: - - install_pyinstaller is changed + - name: Include testcase for PR 8809 injectpkg + ansible.builtin.include_tasks: testcase-8809-uninjectpkg.yml diff --git a/tests/integration/targets/pipx/tasks/testcase-7497.yml b/tests/integration/targets/pipx/tasks/testcase-7497.yml new file mode 100644 index 0000000000..938196ef59 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-7497.yml @@ -0,0 +1,27 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: ensure application pyinstaller is uninstalled + community.general.pipx: + name: pyinstaller + state: absent + +- name: Install Python Package pyinstaller + community.general.pipx: + name: pyinstaller + state: present + system_site_packages: true + pip_args: "--no-cache-dir" + register: install_pyinstaller + +- name: cleanup pyinstaller + community.general.pipx: + name: pyinstaller + state: absent + +- name: check assertions + assert: + that: + - install_pyinstaller is changed diff --git a/tests/integration/targets/pipx/tasks/testcase-8656.yml b/tests/integration/targets/pipx/tasks/testcase-8656.yml new file mode 100644 index 0000000000..10e99e846e --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-8656.yml @@ -0,0 +1,35 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: ensure application conan2 is uninstalled + community.general.pipx: + name: conan2 + state: absent + +- name: Install Python Package conan with suffix 2 (conan2) + community.general.pipx: + name: conan + state: install + suffix: "2" + register: install_conan2 + +- name: Install Python Package conan with suffix 2 (conan2) again + community.general.pipx: + name: conan + state: install + suffix: "2" + register: install_conan2_again + +- name: cleanup conan2 + community.general.pipx: + name: conan2 + state: absent + +- name: check assertions + assert: + that: + - install_conan2 is changed + - "' - conan2' in install_conan2.stdout" + - install_conan2_again is not changed diff --git a/tests/integration/targets/pipx/tasks/testcase-8793-global.yml b/tests/integration/targets/pipx/tasks/testcase-8793-global.yml new file mode 100644 index 0000000000..7d3c871306 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-8793-global.yml @@ -0,0 +1,58 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Set up environment + environment: + PATH: /usr/local/bin:{{ ansible_env.PATH }} + block: + - name: Remove global pipx dir + ansible.builtin.file: + path: /opt/pipx + state: absent + force: true + + - name: Create global pipx dir + ansible.builtin.file: + path: /opt/pipx + state: directory + mode: '0755' + + - name: Uninstall pycowsay + community.general.pipx: + state: uninstall + name: pycowsay + + - name: Uninstall pycowsay (global) + community.general.pipx: + state: uninstall + name: pycowsay + global: true + + - name: Run pycowsay (should fail) + ansible.builtin.command: pycowsay Moooooooo! + changed_when: false + ignore_errors: true + + - name: Install pycowsay (global) + community.general.pipx: + state: install + name: pycowsay + global: true + + - name: Run pycowsay (should succeed) + ansible.builtin.command: pycowsay Moooooooo! + changed_when: false + register: what_the_cow_said + + - name: Which cow? + ansible.builtin.command: which pycowsay + changed_when: false + register: which_cow + + - name: Assert Moooooooo + ansible.builtin.assert: + that: + - "'Moooooooo!' in what_the_cow_said.stdout" + - "'/usr/local/bin/pycowsay' in which_cow.stdout" diff --git a/tests/integration/targets/pipx/tasks/testcase-8809-installall.yml b/tests/integration/targets/pipx/tasks/testcase-8809-installall.yml new file mode 100644 index 0000000000..37816247c0 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-8809-installall.yml @@ -0,0 +1,59 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Set up environment + environment: + PATH: /usr/local/bin:{{ ansible_env.PATH }} + block: + - name: Uninstall pycowsay and black + community.general.pipx: + state: uninstall + name: "{{ item }}" + loop: + - black + - pycowsay + + - name: Uninstall pycowsay and black (again) + community.general.pipx: + state: uninstall + name: "{{ item }}" + loop: + - black + - pycowsay + register: uninstall_all_1 + + - name: Use install-all + community.general.pipx: + state: install-all + spec_metadata: spec.json + register: install_all + + - name: Run pycowsay (should succeed) + ansible.builtin.command: pycowsay Moooooooo! + changed_when: false + register: what_the_cow_said + + - name: Which cow? + ansible.builtin.command: which pycowsay + changed_when: false + register: which_cow + + - name: Uninstall pycowsay and black (again) + community.general.pipx: + state: uninstall + name: "{{ item }}" + loop: + - black + - pycowsay + register: uninstall_all_2 + + - name: Assert uninstall-all + ansible.builtin.assert: + that: + - uninstall_all_1 is not changed + - install_all is changed + - "'Moooooooo!' in what_the_cow_said.stdout" + - "'/usr/local/bin/pycowsay' in which_cow.stdout" + - uninstall_all_2 is changed diff --git a/tests/integration/targets/pipx/tasks/testcase-8809-pin.yml b/tests/integration/targets/pipx/tasks/testcase-8809-pin.yml new file mode 100644 index 0000000000..89e4bb9dc6 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-8809-pin.yml @@ -0,0 +1,69 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Set up environment + environment: + PATH: /usr/local/bin:{{ ansible_env.PATH }} + block: + - name: Uninstall pycowsay and black + community.general.pipx: + state: uninstall + name: pycowsay + + # latest is 0.0.0.2 + - name: Install pycowsay 0.0.0.1 + community.general.pipx: + state: install + name: pycowsay + source: pycowsay==0.0.0.1 + + - name: Pin cowsay + community.general.pipx: + state: pin + name: pycowsay + register: pin_cow + + - name: Upgrade pycowsay + community.general.pipx: + state: upgrade + name: pycowsay + + - name: Get pycowsay version + community.general.pipx_info: + name: pycowsay + register: cow_info_1 + + - name: Unpin cowsay + community.general.pipx: + state: unpin + name: pycowsay + register: unpin_cow + + - name: Upgrade pycowsay + community.general.pipx: + state: upgrade + name: pycowsay + + - name: Get pycowsay version + community.general.pipx_info: + name: pycowsay + register: cow_info_2 + + - name: Uninstall pycowsay and black (again) + community.general.pipx: + state: uninstall + name: "{{ item }}" + loop: + - black + - pycowsay + register: uninstall_all_2 + + - name: Assert uninstall-all + ansible.builtin.assert: + that: + - pin_cow is changed + - cow_info_1 == "0.0.0.1" + - unpin_cow is changed + - cow_info_2 != "0.0.0.1" diff --git a/tests/integration/targets/pipx/tasks/testcase-8809-uninjectpkg.yml b/tests/integration/targets/pipx/tasks/testcase-8809-uninjectpkg.yml new file mode 100644 index 0000000000..89e4bb9dc6 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-8809-uninjectpkg.yml @@ -0,0 +1,69 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Set up environment + environment: + PATH: /usr/local/bin:{{ ansible_env.PATH }} + block: + - name: Uninstall pycowsay and black + community.general.pipx: + state: uninstall + name: pycowsay + + # latest is 0.0.0.2 + - name: Install pycowsay 0.0.0.1 + community.general.pipx: + state: install + name: pycowsay + source: pycowsay==0.0.0.1 + + - name: Pin cowsay + community.general.pipx: + state: pin + name: pycowsay + register: pin_cow + + - name: Upgrade pycowsay + community.general.pipx: + state: upgrade + name: pycowsay + + - name: Get pycowsay version + community.general.pipx_info: + name: pycowsay + register: cow_info_1 + + - name: Unpin cowsay + community.general.pipx: + state: unpin + name: pycowsay + register: unpin_cow + + - name: Upgrade pycowsay + community.general.pipx: + state: upgrade + name: pycowsay + + - name: Get pycowsay version + community.general.pipx_info: + name: pycowsay + register: cow_info_2 + + - name: Uninstall pycowsay and black (again) + community.general.pipx: + state: uninstall + name: "{{ item }}" + loop: + - black + - pycowsay + register: uninstall_all_2 + + - name: Assert uninstall-all + ansible.builtin.assert: + that: + - pin_cow is changed + - cow_info_1 == "0.0.0.1" + - unpin_cow is changed + - cow_info_2 != "0.0.0.1" diff --git a/tests/integration/targets/pipx/tasks/testcase-injectpkg.yml b/tests/integration/targets/pipx/tasks/testcase-injectpkg.yml new file mode 100644 index 0000000000..63d33ba92c --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-injectpkg.yml @@ -0,0 +1,49 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Ensure application pylint is uninstalled + community.general.pipx: + name: pylint + state: absent + +- name: Install application pylint + community.general.pipx: + name: pylint + register: install_pylint + +- name: Inject packages + community.general.pipx: + state: inject + name: pylint + inject_packages: + - licenses + register: inject_pkgs_pylint + +- name: Inject packages with apps + community.general.pipx: + state: inject + name: pylint + inject_packages: + - black + install_apps: true + register: inject_pkgs_apps_pylint + +- name: Cleanup pylint + community.general.pipx: + state: absent + name: pylint + register: uninstall_pylint + +- name: Check assertions inject_packages + assert: + that: + - install_pylint is changed + - inject_pkgs_pylint is changed + - '"pylint" in inject_pkgs_pylint.application' + - '"licenses" in inject_pkgs_pylint.application["pylint"]["injected"]' + - inject_pkgs_apps_pylint is changed + - '"pylint" in inject_pkgs_apps_pylint.application' + - '"black" in inject_pkgs_apps_pylint.application["pylint"]["injected"]' + - uninstall_pylint is changed diff --git a/tests/integration/targets/pipx/tasks/testcase-jupyter.yml b/tests/integration/targets/pipx/tasks/testcase-jupyter.yml new file mode 100644 index 0000000000..e4b5d48dd5 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-jupyter.yml @@ -0,0 +1,28 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: install jupyter + block: + - name: ensure application mkdocs is uninstalled + community.general.pipx: + name: mkdocs + state: absent + + - name: install application mkdocs + community.general.pipx: + name: mkdocs + install_deps: true + register: install_mkdocs + + - name: cleanup mkdocs + community.general.pipx: + state: absent + name: mkdocs + + - name: check assertions + assert: + that: + - install_mkdocs is changed + - '"markdown_py" in install_mkdocs.stdout' diff --git a/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml b/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml new file mode 100644 index 0000000000..1db3e60406 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml @@ -0,0 +1,40 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Ensure /opt/pipx + ansible.builtin.file: + path: /opt/pipx + state: directory + mode: 0755 + +- name: Install tox site-wide + community.general.pipx: + name: tox + state: latest + register: install_tox_sitewide + environment: + PIPX_HOME: /opt/pipx + PIPX_BIN_DIR: /usr/local/bin + +- name: stat /usr/local/bin/tox + ansible.builtin.stat: + path: /usr/local/bin/tox + register: usrlocaltox + +- name: Uninstall tox site-wide + community.general.pipx: + name: tox + state: uninstall + register: uninstall_tox_sitewide + environment: + PIPX_HOME: /opt/pipx + PIPX_BIN_DIR: /usr/local/bin + +- name: check assertions + ansible.builtin.assert: + that: + - install_tox_sitewide is changed + - usrlocaltox.stat.exists + - uninstall_tox_sitewide is changed diff --git a/tests/integration/targets/pipx_info/aliases b/tests/integration/targets/pipx_info/aliases index e262b485a6..a28278bbc1 100644 --- a/tests/integration/targets/pipx_info/aliases +++ b/tests/integration/targets/pipx_info/aliases @@ -6,4 +6,3 @@ azp/posix/3 destructive skip/python2 skip/python3.5 -disabled # TODO diff --git a/tests/integration/targets/pipx_info/tasks/main.yml b/tests/integration/targets/pipx_info/tasks/main.yml index 0a01f0af9c..e3de105d6f 100644 --- a/tests/integration/targets/pipx_info/tasks/main.yml +++ b/tests/integration/targets/pipx_info/tasks/main.yml @@ -68,7 +68,7 @@ apps: - name: tox source: tox==3.24.0 - - name: ansible-lint + - name: pylint inject_packages: - licenses @@ -81,7 +81,7 @@ - name: install applications community.general.pipx: name: "{{ item.name }}" - source: "{{ item.source|default(omit) }}" + source: "{{ item.source | default(omit) }}" loop: "{{ apps }}" - name: inject packages @@ -102,9 +102,9 @@ include_injected: true register: info2_all_deps -- name: retrieve application ansible-lint +- name: retrieve application pylint community.general.pipx_info: - name: ansible-lint + name: pylint include_deps: true include_injected: true register: info2_lint @@ -131,10 +131,10 @@ - "'injected' in all_apps_deps[0]" - "'licenses' in all_apps_deps[0].injected" - - lint|length == 1 + - lint | length == 1 - all_apps_deps|length == 2 - lint[0] == all_apps_deps[0] vars: all_apps: "{{ info2_all.application|sort(attribute='name') }}" - all_apps_deps: "{{ info2_all_deps.application|sort(attribute='name') }}" - lint: "{{ info2_lint.application|sort(attribute='name') }}" + all_apps_deps: "{{ info2_all_deps.application | sort(attribute='name') }}" + lint: "{{ info2_lint.application | sort(attribute='name') }}" diff --git a/tests/integration/targets/pkgng/tasks/freebsd.yml b/tests/integration/targets/pkgng/tasks/freebsd.yml index 9d4ecf8bb2..a9c0a1873b 100644 --- a/tests/integration/targets/pkgng/tasks/freebsd.yml +++ b/tests/integration/targets/pkgng/tasks/freebsd.yml @@ -518,15 +518,23 @@ # NOTE: FreeBSD 13.3 fails to update the package catalogue for unknown reasons (someone with FreeBSD # knowledge has to take a look) # + # NOTE: FreeBSD 13.4 fails to update the package catalogue for unknown reasons (someone with FreeBSD + # knowledge has to take a look) + # # NOTE: FreeBSD 14.0 fails to update the package catalogue for unknown reasons (someone with FreeBSD # knowledge has to take a look) # + # NOTE: FreeBSD 14.1 fails to update the package catalogue for unknown reasons (someone with FreeBSD + # knowledge has to take a look) + # + # NOTE: FreeBSD 14.2 fails as well (someone with FreeBSD knowledge has to take a look) + # # See also # https://github.com/ansible-collections/community.general/issues/5795 when: >- (ansible_distribution_version is version('12.01', '>=') and ansible_distribution_version is version('12.3', '<')) - or (ansible_distribution_version is version('13.4', '>=') and ansible_distribution_version is version('14.0', '<')) - or ansible_distribution_version is version('14.1', '>=') + or (ansible_distribution_version is version('13.5', '>=') and ansible_distribution_version is version('14.0', '<')) + or ansible_distribution_version is version('14.3', '>=') block: - name: Setup testjail include_tasks: setup-testjail.yml diff --git a/tests/integration/targets/pkgng/tasks/install_single_package.yml b/tests/integration/targets/pkgng/tasks/install_single_package.yml index 5ba529af35..7f0886af8b 100644 --- a/tests/integration/targets/pkgng/tasks/install_single_package.yml +++ b/tests/integration/targets/pkgng/tasks/install_single_package.yml @@ -40,6 +40,16 @@ get_mime: false register: pkgng_install_stat_after +- name: Upgrade package (orig, no globs) + pkgng: + name: '{{ pkgng_test_pkg_category }}/{{ pkgng_test_pkg_name }}' + state: latest + use_globs: false + jail: '{{ pkgng_test_jail | default(omit) }}' + chroot: '{{ pkgng_test_chroot | default(omit) }}' + rootdir: '{{ pkgng_test_rootdir | default(omit) }}' + register: pkgng_upgrade_orig_noglobs + - name: Remove test package (if requested) pkgng: <<: *pkgng_install_params @@ -56,3 +66,4 @@ - not pkgng_install_idempotent_cached.stdout is match("Updating \w+ repository catalogue\.\.\.") - pkgng_install_stat_after.stat.exists - pkgng_install_stat_after.stat.executable + - pkgng_upgrade_orig_noglobs is not changed diff --git a/tests/integration/targets/setup_java_keytool/vars/Ubuntu-24.yml b/tests/integration/targets/setup_java_keytool/vars/Ubuntu-24.yml new file mode 100644 index 0000000000..addf344fe2 --- /dev/null +++ b/tests/integration/targets/setup_java_keytool/vars/Ubuntu-24.yml @@ -0,0 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +keytool_package_names: + - ca-certificates-java + - openjdk-21-jre-headless diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/alpine.yml b/tests/integration/targets/setup_os_pkg_name/tasks/alpine.yml new file mode 100644 index 0000000000..bb17b5e5f1 --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/alpine.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (alpine) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: + virtualenv: py3-virtualenv diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/archlinux.yml b/tests/integration/targets/setup_os_pkg_name/tasks/archlinux.yml new file mode 100644 index 0000000000..bb98583506 --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/archlinux.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (archlinux) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: + virtualenv: python-virtualenv diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/debian.yml b/tests/integration/targets/setup_os_pkg_name/tasks/debian.yml new file mode 100644 index 0000000000..6a20de1eeb --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/debian.yml @@ -0,0 +1,10 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (debian) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: {} diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/default.yml b/tests/integration/targets/setup_os_pkg_name/tasks/default.yml new file mode 100644 index 0000000000..977d690437 --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/default.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (default) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: + virtualenv: virtualenv diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/main.yml b/tests/integration/targets/setup_os_pkg_name/tasks/main.yml new file mode 100644 index 0000000000..91066cf53c --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/main.yml @@ -0,0 +1,26 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Make sure we have the ansible_os_family and ansible_distribution_version facts + ansible.builtin.setup: + gather_subset: distribution + when: ansible_facts == {} + +- name: Create OS Package name fact + ansible.builtin.set_fact: + os_package_name: {} + +- name: Include the files setting the package names + ansible.builtin.include_tasks: "{{ file }}" + loop_control: + loop_var: file + loop: + - "default.yml" + - "{{ ansible_os_family | lower }}.yml" diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/redhat.yml b/tests/integration/targets/setup_os_pkg_name/tasks/redhat.yml new file mode 100644 index 0000000000..022de8b961 --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/redhat.yml @@ -0,0 +1,10 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (redhat) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: {} diff --git a/tests/integration/targets/setup_os_pkg_name/tasks/suse.yml b/tests/integration/targets/setup_os_pkg_name/tasks/suse.yml new file mode 100644 index 0000000000..db2b0a1fa2 --- /dev/null +++ b/tests/integration/targets/setup_os_pkg_name/tasks/suse.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Update OS Package name fact (suse) + ansible.builtin.set_fact: + os_package_name: "{{ os_package_name | combine(specific_package_names) }}" + vars: + specific_package_names: + virtualenv: python3-virtualenv diff --git a/tests/integration/targets/setup_pkg_mgr/tasks/main.yml b/tests/integration/targets/setup_pkg_mgr/tasks/main.yml index 5bff53b3b1..91f406d861 100644 --- a/tests/integration/targets/setup_pkg_mgr/tasks/main.yml +++ b/tests/integration/targets/setup_pkg_mgr/tasks/main.yml @@ -26,6 +26,12 @@ cacheable: true when: ansible_os_family == "Archlinux" +- shell: + cmd: | + sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/*.repo + sed -i 's%#baseurl=http://mirror.centos.org/%baseurl=https://vault.centos.org/%g' /etc/yum.repos.d/*.repo + when: ansible_distribution in 'CentOS' and ansible_distribution_major_version == '7' + - shell: cmd: | sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*.repo diff --git a/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-24-py3.yml b/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-24-py3.yml new file mode 100644 index 0000000000..702bd9a5d1 --- /dev/null +++ b/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-24-py3.yml @@ -0,0 +1,13 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python3-psycopg2" + +pg_hba_location: "/etc/postgresql/16/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/16/main" +pg_ver: 16 diff --git a/tests/integration/targets/setup_redis_replication/defaults/main.yml b/tests/integration/targets/setup_redis_replication/defaults/main.yml index 46dae9898a..301c5a6e84 100644 --- a/tests/integration/targets/setup_redis_replication/defaults/main.yml +++ b/tests/integration/targets/setup_redis_replication/defaults/main.yml @@ -22,13 +22,23 @@ redis_packages: FreeBSD: - redis +redis_user: + Alpine: redis + Archlinux: redis + Debian: redis + Ubuntu: redis + openSUSE Leap: redis + Fedora: "{{ '998' if ansible_distribution_major_version is version('41', '>=') else 'redis' }}" + CentOS: redis + FreeBSD: redis + redis_bin: Alpine: /usr/bin/redis-server Archlinux: /usr/bin/redis-server Debian: /usr/bin/redis-server Ubuntu: /usr/bin/redis-server openSUSE Leap: /usr/sbin/redis-server - Fedora: /usr/bin/redis-server + Fedora: "/usr/bin/{{ 'valkey-server' if ansible_distribution_major_version is version('41', '>=') else 'redis-server' }}" CentOS: /usr/bin/redis-server FreeBSD: /usr/local/bin/redis-server diff --git a/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml b/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml index dd48bf2b64..72f1703832 100644 --- a/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml +++ b/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml @@ -24,8 +24,8 @@ file: path: "{{ item }}" state: directory - owner: redis - group: redis + owner: "{{ redis_user[ansible_distribution] }}" + group: "{{ redis_user[ansible_distribution] }}" loop: - "{{ master_datadir }}" - "{{ master_logdir }}" @@ -54,10 +54,10 @@ datadir: "{{ replica_datadir }}" - name: Start redis master - shell: "{{ redis_bin[ansible_distribution] }} {{ master_conf }}" + ansible.builtin.command: "{{ redis_bin[ansible_distribution] }} {{ master_conf }}" - name: Start redis replica - shell: "{{ redis_bin[ansible_distribution] }} {{ replica_conf }} --{% if old_redis %}slaveof{% else %}replicaof{% endif %} 127.0.0.1 {{ master_port }}" + ansible.builtin.command: "{{ redis_bin[ansible_distribution] }} {{ replica_conf }} --{% if old_redis %}slaveof{% else %}replicaof{% endif %} 127.0.0.1 {{ master_port }}" - name: Wait for redis master to be started ansible.builtin.wait_for: diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.4.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.4.yml new file mode 120000 index 0000000000..0b06951496 --- /dev/null +++ b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.4.yml @@ -0,0 +1 @@ +nothing.yml \ No newline at end of file diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml new file mode 100644 index 0000000000..5bbfaff128 --- /dev/null +++ b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Do nothing diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml index a2d8698d0f..e96fbde38b 100644 --- a/tests/integration/targets/snap/tasks/main.yml +++ b/tests/integration/targets/snap/tasks/main.yml @@ -15,9 +15,8 @@ ansible.builtin.include_tasks: test.yml - name: Include test_channel ansible.builtin.include_tasks: test_channel.yml - # TODO: Find better package to download and install from sources - cider 1.6.0 takes over 35 seconds to install - # - name: Include test_dangerous - # ansible.builtin.include_tasks: test_dangerous.yml + - name: Include test_dangerous + ansible.builtin.include_tasks: test_dangerous.yml - name: Include test_3dash ansible.builtin.include_tasks: test_3dash.yml - name: Include test_empty_list diff --git a/tests/integration/targets/snap/tasks/test_dangerous.yml b/tests/integration/targets/snap/tasks/test_dangerous.yml index 8fe4edee0b..e85725992d 100644 --- a/tests/integration/targets/snap/tasks/test_dangerous.yml +++ b/tests/integration/targets/snap/tasks/test_dangerous.yml @@ -5,43 +5,48 @@ # NOTE This is currently disabled for performance reasons! -- name: Make sure package is not installed (cider) +- name: Make sure package is not installed (bpytop) community.general.snap: - name: cider + name: bpytop state: absent -- name: Download cider snap - ansible.builtin.get_url: - url: https://github.com/ciderapp/cider-releases/releases/download/v1.6.0/cider_1.6.0_amd64.snap - dest: "{{ remote_tmp_dir }}/cider_1.6.0_amd64.snap" - mode: "0644" +- name: Download bpytop snap + ansible.builtin.command: + cmd: snap download bpytop + chdir: "{{ remote_tmp_dir }}" + register: bpytop_download -# Test for https://github.com/ansible-collections/community.general/issues/5715 -- name: Install package from file (check) - community.general.snap: - name: "{{ remote_tmp_dir }}/cider_1.6.0_amd64.snap" - dangerous: true - state: present - check_mode: true - register: install_dangerous_check +- name: Test block + vars: + snap_file: "{{ (bpytop_download.stdout_lines[-1] | split(' '))[-1] }}" + snap_path: "{{ remote_tmp_dir }}/{{ snap_file }}" + block: + # Test for https://github.com/ansible-collections/community.general/issues/5715 + - name: Install package from file (check) + community.general.snap: + name: "{{ snap_path }}" + dangerous: true + state: present + check_mode: true + register: install_dangerous_check -- name: Install package from file - community.general.snap: - name: "{{ remote_tmp_dir }}/cider_1.6.0_amd64.snap" - dangerous: true - state: present - register: install_dangerous + - name: Install package from file + community.general.snap: + name: "{{ snap_path }}" + dangerous: true + state: present + register: install_dangerous -- name: Install package from file - community.general.snap: - name: "{{ remote_tmp_dir }}/cider_1.6.0_amd64.snap" - dangerous: true - state: present - register: install_dangerous_idempot + - name: Install package from file (again) + community.general.snap: + name: "{{ snap_path }}" + dangerous: true + state: present + register: install_dangerous_idempot - name: Remove package community.general.snap: - name: cider + name: bpytop state: absent register: remove_dangerous diff --git a/tests/integration/targets/sysrc/tasks/main.yml b/tests/integration/targets/sysrc/tasks/main.yml index ace38202f2..25d7ca4d59 100644 --- a/tests/integration/targets/sysrc/tasks/main.yml +++ b/tests/integration/targets/sysrc/tasks/main.yml @@ -141,10 +141,12 @@ # # NOTE: currently fails with FreeBSD 12 with minor version less than 4 # NOTE: currently fails with FreeBSD 13 with minor version less than 2 + # NOTE: currently fails with FreeBSD 14 with minor version less than 1 # when: >- ansible_distribution_version is version('12.4', '>=') and ansible_distribution_version is version('13', '<') - or ansible_distribution_version is version('13.2', '>=') + or ansible_distribution_version is version('13.2', '>=') and ansible_distribution_version is version('14', '<') + or ansible_distribution_version is version('14.1', '>=') block: - name: Setup testjail include_tasks: setup-testjail.yml diff --git a/tests/integration/targets/test_ansible_type/aliases b/tests/integration/targets/test_ansible_type/aliases new file mode 100644 index 0000000000..12d1d6617e --- /dev/null +++ b/tests/integration/targets/test_ansible_type/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/2 diff --git a/tests/integration/targets/test_ansible_type/tasks/main.yml b/tests/integration/targets/test_ansible_type/tasks/main.yml new file mode 100644 index 0000000000..c890c11901 --- /dev/null +++ b/tests/integration/targets/test_ansible_type/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Integration tests + import_tasks: tasks.yml diff --git a/tests/integration/targets/test_ansible_type/tasks/tasks.yml b/tests/integration/targets/test_ansible_type/tasks/tasks.yml new file mode 100644 index 0000000000..261256c0d4 --- /dev/null +++ b/tests/integration/targets/test_ansible_type/tasks/tasks.yml @@ -0,0 +1,248 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Substitution converts str to AnsibleUnicode +# ------------------------------------------- + +- name: String. AnsibleUnicode. + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: "abc" + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'AnsibleUnicode' + +- name: String. AnsibleUnicode alias str. + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: "abc" + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: 'str' + +- name: List. All items are AnsibleUnicode. + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '["a", "b", "c"] is {{ dtype }}' + fail_msg: '["a", "b", "c"] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: ["a", "b", "c"] + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'list[AnsibleUnicode]' + +- name: Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ dtype }}' + fail_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: {"a": "foo", "b": "bar", "c": "baz"} + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: 'dict[AnsibleUnicode, AnsibleUnicode]' + +# No substitution and no alias. Type of strings is str +# ---------------------------------------------------- + +- name: String + assert: + that: '"abc" is community.general.ansible_type(dtype)' + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ "abc" | community.general.reveal_ansible_type }}' + dtype: str + +- name: Integer + assert: + that: '123 is community.general.ansible_type(dtype)' + success_msg: '123 is {{ dtype }}' + fail_msg: '123 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ 123 | community.general.reveal_ansible_type }}' + dtype: int + +- name: Float + assert: + that: '123.45 is community.general.ansible_type(dtype)' + success_msg: '123.45 is {{ dtype }}' + fail_msg: '123.45 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ 123.45 | community.general.reveal_ansible_type }}' + dtype: float + +- name: Boolean + assert: + that: 'true is community.general.ansible_type(dtype)' + success_msg: 'true is {{ dtype }}' + fail_msg: 'true is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ true | community.general.reveal_ansible_type }}' + dtype: bool + +- name: List. All items are strings. + assert: + that: '["a", "b", "c"] is community.general.ansible_type(dtype)' + success_msg: '["a", "b", "c"] is {{ dtype }}' + fail_msg: '["a", "b", "c"] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' + dtype: list[str] + +- name: List of dictionaries. + assert: + that: '[{"a": 1}, {"b": 2}] is community.general.ansible_type(dtype)' + success_msg: '[{"a": 1}, {"b": 2}] is {{ dtype }}' + fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' + dtype: list[dict] + +- name: Dictionary. All keys are strings. All values are integers. + assert: + that: '{"a": 1} is community.general.ansible_type(dtype)' + success_msg: '{"a": 1} is {{ dtype }}' + fail_msg: '{"a": 1} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' + dtype: dict[str, int] + +- name: Dictionary. All keys are strings. All values are integers. + assert: + that: '{"a": 1, "b": 2} is community.general.ansible_type(dtype)' + success_msg: '{"a": 1, "b": 2} is {{ dtype }}' + fail_msg: '{"a": 1, "b": 2} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' + dtype: dict[str, int] + +# Type of strings is AnsibleUnicode or str +# ---------------------------------------- + +- name: Dictionary. The keys are integers or strings. All values are strings. + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '{"1": "a", "b": "b"} is {{ dtype }}' + fail_msg: '{"1": "a", "b": "b"} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {1: 'a', 'b': 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[int|str, str] + +- name: Dictionary. All keys are integers. All values are keys. + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '{"1": "a", "2": "b"} is {{ dtype }}' + fail_msg: '{"1": "a", "2": "b"} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {1: 'a', 2: 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[int, str] + +- name: Dictionary. All keys are strings. Multiple types values. + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '{"a": 1, "b": 1.1, "c": "abc", "d": true, "e": ["x", "y", "z"], "f": {"x": 1, "y": 2}} is {{ dtype }}' + fail_msg: '{"a": 1, "b": 1.1, "c": "abc", "d": true, "e": ["x", "y", "z"], "f": {"x": 1, "y": 2}} is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: dict[str, bool|dict|float|int|list|str] + +- name: List. Multiple types items. + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '[1, 2, 1.1, "abc", true, ["x", "y", "z"], {"x": 1, "y": 2}] is {{ dtype }}' + fail_msg: '[1, 2, 1.1, "abc", true, ["x", "y", "z"], {"x": 1, "y": 2}] is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"AnsibleUnicode": "str"} + data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: list[bool|dict|float|int|list|str] + +# Option dtype is list +# -------------------- + +- name: AnsibleUnicode or str + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '"abc" is {{ dtype }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: abc + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: ['AnsibleUnicode', 'str'] + +- name: float or int + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '123 is {{ dtype }}' + fail_msg: '123 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: 123 + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: ['float', 'int'] + +- name: float or int + assert: + that: data is community.general.ansible_type(dtype) + success_msg: '123.45 is {{ dtype }}' + fail_msg: '123.45 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + data: 123.45 + result: '{{ data | community.general.reveal_ansible_type }}' + dtype: ['float', 'int'] + +# Multiple alias +# -------------- + +- name: int alias number + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '123 is {{ dtype }}' + fail_msg: '123 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"int": "number", "float": "number"} + data: 123 + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: number + +- name: float alias number + assert: + that: data is community.general.ansible_type(dtype, alias) + success_msg: '123.45 is {{ dtype }}' + fail_msg: '123.45 is {{ result }}' + quiet: '{{ quiet_test | d(true) | bool }}' + vars: + alias: {"int": "number", "float": "number"} + data: 123.45 + result: '{{ data | community.general.reveal_ansible_type(alias) }}' + dtype: number diff --git a/tests/integration/targets/timezone/aliases b/tests/integration/targets/timezone/aliases index 007bed5386..c7f0c89c23 100644 --- a/tests/integration/targets/timezone/aliases +++ b/tests/integration/targets/timezone/aliases @@ -7,3 +7,4 @@ destructive skip/aix skip/osx skip/macos +skip/rhel7.9 # TODO: '/bin/timedatectl set-local-rtc no' fails with 'Failed to set local RTC: Failed to set RTC to local/UTC: Input/output error' diff --git a/tests/integration/targets/timezone/tasks/main.yml b/tests/integration/targets/timezone/tasks/main.yml index 721341592a..475f22447d 100644 --- a/tests/integration/targets/timezone/tasks/main.yml +++ b/tests/integration/targets/timezone/tasks/main.yml @@ -60,6 +60,14 @@ state: present when: ansible_distribution == 'Alpine' +- name: make sure hwclock is installed in Ubuntu 24.04 + package: + name: util-linux-extra + state: present + when: + - ansible_distribution == 'Ubuntu' + - ansible_facts.distribution_major_version is version('24', '>=') + - name: make sure the dbus service is started under systemd systemd: name: dbus diff --git a/tests/integration/targets/ufw/aliases b/tests/integration/targets/ufw/aliases index 209a1153e4..3c6c855221 100644 --- a/tests/integration/targets/ufw/aliases +++ b/tests/integration/targets/ufw/aliases @@ -13,6 +13,8 @@ skip/rhel9.0 # FIXME skip/rhel9.1 # FIXME skip/rhel9.2 # FIXME skip/rhel9.3 # FIXME +skip/rhel9.4 # FIXME +skip/rhel9.5 # FIXME skip/docker needs/root needs/target/setup_epel diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py index 459d3ba14d..bacef63f19 100755 --- a/tests/sanity/extra/botmeta.py +++ b/tests/sanity/extra/botmeta.py @@ -3,10 +3,9 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later """Check BOTMETA file.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type -import ast +from __future__ import annotations + import os import re import sys @@ -66,26 +65,27 @@ AUTHOR_REGEX = re.compile(r'^\w.*\(@([\w-]+)\)(?![\w.])') def read_authors(filename): data = {} try: - with open(filename, 'rb') as b_module_data: - M = ast.parse(b_module_data.read()) + documentation = [] + in_docs = False + with open(filename, 'r', encoding='utf-8') as f: + for line in f: + if line.startswith('DOCUMENTATION ='): + in_docs = True + elif line.startswith(("'''", '"""')) and in_docs: + in_docs = False + elif in_docs: + documentation.append(line) + if in_docs: + print(f'{filename}: cannot find DOCUMENTATION end') + return [] + if not documentation: + print(f'{filename}: cannot find DOCUMENTATION') + return [] - for child in M.body: - if isinstance(child, ast.Assign): - for t in child.targets: - try: - theid = t.id - except AttributeError: - # skip errors can happen when trying to use the normal code - continue - - if theid == 'DOCUMENTATION': - if isinstance(child.value, ast.Dict): - data = ast.literal_eval(child.value) - else: - data = yaml.safe_load(child.value.s) + data = yaml.safe_load('\n'.join(documentation)) except Exception as e: - print('%s:%d:%d: Cannot load DOCUMENTATION: %s' % (filename, 0, 0, e)) + print(f'{filename}:0:0: Cannot load DOCUMENTATION: {e}') return [] author = data.get('author') or [] @@ -108,21 +108,21 @@ def validate(filename, filedata): return if filename.startswith(('plugins/doc_fragments/', 'plugins/module_utils/')): return - # Compile lis tof all active and inactive maintainers + # Compile list of all active and inactive maintainers all_maintainers = filedata['maintainers'] + filedata['ignore'] - if not filename.startswith('plugins/filter/'): + if not filename.startswith(('plugins/action/', 'plugins/doc_fragments/', 'plugins/filter/', 'plugins/module_utils/', 'plugins/plugin_utils/')): maintainers = read_authors(filename) for maintainer in maintainers: maintainer = extract_author_name(maintainer) if maintainer is not None and maintainer not in all_maintainers: - msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( - maintainer, filename, ', '.join(all_maintainers)) - print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) + others = ', '.join(all_maintainers) + msg = f'Author {maintainer} not mentioned as active or inactive maintainer for {filename} (mentioned are: {others})' + print(f'{FILENAME}:0:0: {msg}') should_have_no_maintainer = filename in IGNORE_NO_MAINTAINERS if not all_maintainers and not should_have_no_maintainer: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename)) + print(f'{FILENAME}:0:0: No (active or inactive) maintainer mentioned for {filename}') if all_maintainers and should_have_no_maintainer: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Please remove %s from the ignore list of %s' % (filename, sys.argv[0]))) + print(f'{FILENAME}:0:0: Please remove {filename} from the ignore list of {sys.argv[0]}') def main(): @@ -131,12 +131,12 @@ def main(): with open(FILENAME, 'rb') as f: botmeta = yaml.safe_load(f) except yaml.error.MarkedYAMLError as ex: - print('%s:%d:%d: YAML load failed: %s' % (FILENAME, ex.context_mark.line + - 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex)))) + msg = re.sub(r'\s+', ' ', str(ex)) + print('f{FILENAME}:{ex.context_mark.line + 1}:{ex.context_mark.column + 1}: YAML load failed: {msg}') return except Exception as ex: # pylint: disable=broad-except - print('%s:%d:%d: YAML load failed: %s' % - (FILENAME, 0, 0, re.sub(r'\s+', ' ', str(ex)))) + msg = re.sub(r'\s+', ' ', str(ex)) + print(f'{FILENAME}:0:0: YAML load failed: {msg}') return # Validate schema @@ -169,7 +169,7 @@ def main(): except MultipleInvalid as ex: for error in ex.errors: # No way to get line/column numbers - print('%s:%d:%d: %s' % (FILENAME, 0, 0, humanize_error(botmeta, error))) + print(f'{FILENAME}:0:0: {humanize_error(botmeta, error)}') return # Preprocess (substitute macros, convert to lists) @@ -181,7 +181,7 @@ def main(): macro = m.group(1) replacement = (macros[macro] or '') if macro == 'team_ansible_core': - return '$team_ansible_core %s' % replacement + return f'$team_ansible_core {replacement}' return replacement return macro_re.sub(f, text) @@ -190,19 +190,19 @@ def main(): try: for file, filedata in (botmeta.get('files') or {}).items(): file = convert_macros(file, macros) - filedata = dict((k, convert_macros(v, macros)) for k, v in filedata.items()) + filedata = {k: convert_macros(v, macros) for k, v in filedata.items()} files[file] = filedata for k, v in filedata.items(): if k in LIST_ENTRIES: filedata[k] = v.split() except KeyError as e: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Found unknown macro %s' % e)) + print(f'{FILENAME}:0:0: Found unknown macro {e}') return # Scan all files unmatched = set(files) for dirs in ('docs/docsite/rst', 'plugins', 'tests', 'changelogs'): - for dirpath, dirnames, filenames in os.walk(dirs): + for dirpath, _dirnames, filenames in os.walk(dirs): for file in sorted(filenames): if file.endswith('.pyc'): continue @@ -217,10 +217,10 @@ def main(): if file in unmatched: unmatched.remove(file) if not matching_files: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Did not find any entry for %s' % filename)) + print(f'{FILENAME}:0:0: Did not find any entry for {filename}') matching_files.sort(key=lambda kv: kv[0]) - filedata = dict() + filedata = {} for k in LIST_ENTRIES: filedata[k] = [] for dummy, data in matching_files: @@ -231,7 +231,7 @@ def main(): validate(filename, filedata) for file in unmatched: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Entry %s was not used' % file)) + print(f'{FILENAME}:0:0: Entry {file} was not used') if __name__ == '__main__': diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt index cfeaff7c31..6f6495dd17 100644 --- a/tests/sanity/ignore-2.13.txt +++ b/tests/sanity/ignore-2.13.txt @@ -1,5 +1,6 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen plugins/callback/timestamp.py validate-modules:invalid-documentation +plugins/callback/yaml.py validate-modules:invalid-documentation plugins/lookup/etcd.py validate-modules:invalid-documentation plugins/lookup/etcd3.py validate-modules:invalid-documentation plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt index 247d43fe37..24d7521036 100644 --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.14.txt @@ -1,5 +1,6 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen plugins/callback/timestamp.py validate-modules:invalid-documentation +plugins/callback/yaml.py validate-modules:invalid-documentation plugins/lookup/etcd.py validate-modules:invalid-documentation plugins/lookup/etcd3.py validate-modules:invalid-documentation plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.17.txt b/tests/sanity/ignore-2.17.txt index 7479d6bafe..806c4c5fcf 100644 --- a/tests/sanity/ignore-2.17.txt +++ b/tests/sanity/ignore-2.17.txt @@ -11,4 +11,5 @@ plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt plugins/modules/xfconf.py validate-modules:return-syntax-error plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/compat/mock.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/helper.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.18.txt b/tests/sanity/ignore-2.18.txt index 7479d6bafe..806c4c5fcf 100644 --- a/tests/sanity/ignore-2.18.txt +++ b/tests/sanity/ignore-2.18.txt @@ -11,4 +11,5 @@ plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt plugins/modules/xfconf.py validate-modules:return-syntax-error plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/compat/mock.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/helper.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.19.txt b/tests/sanity/ignore-2.19.txt new file mode 100644 index 0000000000..806c4c5fcf --- /dev/null +++ b/tests/sanity/ignore-2.19.txt @@ -0,0 +1,15 @@ +plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice +plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt' +plugins/modules/homectl.py import-3.12 # Uses deprecated stdlib library 'crypt' +plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin +plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen +plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice +plugins/modules/parted.py validate-modules:parameter-state-invalid-choice +plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice +plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' +plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' +plugins/modules/xfconf.py validate-modules:return-syntax-error +plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/compat/mock.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/helper.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.19.txt.license b/tests/sanity/ignore-2.19.txt.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/tests/sanity/ignore-2.19.txt.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/tests/unit/plugins/inventory/test_opennebula.py b/tests/unit/plugins/inventory/test_opennebula.py index bbc2fe699a..52ea934043 100644 --- a/tests/unit/plugins/inventory/test_opennebula.py +++ b/tests/unit/plugins/inventory/test_opennebula.py @@ -21,6 +21,23 @@ from ansible_collections.community.general.plugins.inventory.opennebula import I from ansible_collections.community.general.tests.unit.compat.mock import create_autospec +class HistoryEntry(object): + def __init__(self): + self.SEQ = '384' + self.HOSTNAME = 'sam-691-sam' + self.HID = '10' + self.CID = '0' + self.DS_ID = '100' + self.VM_MAD = 'kvm' + self.TM_MAD = '3par' + self.ACTION = '0' + + +class HistoryRecords(object): + def __init__(self): + self.HISTORY = [HistoryEntry()] + + @pytest.fixture def inventory(): r = InventoryModule() @@ -58,7 +75,7 @@ def get_vm_pool(): 'ETIME': 0, 'GID': 132, 'GNAME': 'CSApparelVDC', - 'HISTORY_RECORDS': {}, + 'HISTORY_RECORDS': HistoryRecords(), 'ID': 7157, 'LAST_POLL': 1632762935, 'LCM_STATE': 3, @@ -104,7 +121,7 @@ def get_vm_pool(): 'ETIME': 0, 'GID': 0, 'GNAME': 'oneadmin', - 'HISTORY_RECORDS': {}, + 'HISTORY_RECORDS': [], 'ID': 327, 'LAST_POLL': 1632763543, 'LCM_STATE': 3, @@ -167,7 +184,7 @@ def get_vm_pool(): 'ETIME': 0, 'GID': 0, 'GNAME': 'oneadmin', - 'HISTORY_RECORDS': {}, + 'HISTORY_RECORDS': [], 'ID': 107, 'LAST_POLL': 1632764186, 'LCM_STATE': 3, diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index ea6c84bcda..b8358df226 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -37,7 +37,7 @@ def get_auth(): # NOTE: when updating/adding replies to this function, # be sure to only add only the _contents_ of the 'data' dict in the API reply -def get_json(url): +def get_json(url, ignore_errors=None): if url == "https://localhost:8006/api2/json/nodes": # _get_nodes return [{"type": "node", diff --git a/tests/unit/plugins/inventory/test_xen_orchestra.py b/tests/unit/plugins/inventory/test_xen_orchestra.py index bae038e807..d626fb988b 100644 --- a/tests/unit/plugins/inventory/test_xen_orchestra.py +++ b/tests/unit/plugins/inventory/test_xen_orchestra.py @@ -146,7 +146,7 @@ def serialize_groups(groups): return list(map(str, groups)) -@ pytest.fixture(scope="module") +@pytest.fixture(scope="module") def inventory(): r = InventoryModule() r.inventory = InventoryData() diff --git a/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py b/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py index aaeaf79eaf..5d2abeffa8 100644 --- a/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py +++ b/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py @@ -45,6 +45,10 @@ MOCK_SECRETS = [ class MockBitwardenSecretsManager(BitwardenSecretsManager): def _run(self, args, stdin=None): + # mock the --version call + if args[0] == "--version": + return "bws 1.0.0", "", 0 + # secret_id is the last argument passed to the bws CLI secret_id = args[-1] rc = 1 diff --git a/tests/unit/plugins/module_utils/test_cmd_runner.py b/tests/unit/plugins/module_utils/test_cmd_runner.py index 6816afb34c..da93292197 100644 --- a/tests/unit/plugins/module_utils/test_cmd_runner.py +++ b/tests/unit/plugins/module_utils/test_cmd_runner.py @@ -32,6 +32,7 @@ TC_FORMATS = dict( simple_opt_val__int=(partial(cmd_runner_fmt.as_opt_val, "-t"), 42, ["-t", "42"], None), simple_opt_eq_val__str=(partial(cmd_runner_fmt.as_opt_eq_val, "--food"), "potatoes", ["--food=potatoes"], None), simple_opt_eq_val__int=(partial(cmd_runner_fmt.as_opt_eq_val, "--answer"), 42, ["--answer=42"], None), + simple_list_empty=(cmd_runner_fmt.as_list, [], [], None), simple_list_potato=(cmd_runner_fmt.as_list, "literal_potato", ["literal_potato"], None), simple_list_42=(cmd_runner_fmt.as_list, 42, ["42"], None), simple_list_min_len_ok=(partial(cmd_runner_fmt.as_list, min_len=1), 42, ["42"], None), @@ -47,6 +48,9 @@ TC_FORMATS = dict( simple_fixed_false=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), False, ["--always-here", "--forever"], None), simple_fixed_none=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), None, ["--always-here", "--forever"], None), simple_fixed_str=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), "something", ["--always-here", "--forever"], None), + stack_optval__str=(partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_optval), "-t"), ["potatoes", "bananas"], ["-tpotatoes", "-tbananas"], None), + stack_opt_val__str=(partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val), "-t"), ["potatoes", "bananas"], ["-t", "potatoes", "-t", "bananas"], None), + stack_opt_eq_val__int=(partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_eq_val), "--answer"), [42, 17], ["--answer=42", "--answer=17"], None), ) if tuple(version_info) >= (3, 1): from collections import OrderedDict @@ -67,7 +71,7 @@ TC_FORMATS_IDS = sorted(TC_FORMATS.keys()) def test_arg_format(func, value, expected, exception): fmt_func = func() try: - actual = fmt_func(value, ctx_ignore_none=True) + actual = fmt_func(value) print("formatted string = {0}".format(actual)) assert actual == expected, "actual = {0}".format(actual) except Exception as e: @@ -355,7 +359,7 @@ def test_runner_context(runner_input, cmd_execution, expected): ) def _assert_run_info(actual, expected): - reduced = dict((k, actual[k]) for k in expected.keys()) + reduced = {k: actual[k] for k in expected.keys()} assert reduced == expected, "{0}".format(reduced) def _assert_run(runner_input, cmd_execution, expected, ctx, results): diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index b2cd58690d..b1e2eafc7f 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -119,28 +119,27 @@ def test_variable_meta_change(): assert vd.has_changed('d') -class MockMH(object): - changed = None - - def _div(self, x, y): - return x / y - - func_none = cause_changes()(_div) - func_onsucc = cause_changes(on_success=True)(_div) - func_onfail = cause_changes(on_failure=True)(_div) - func_onboth = cause_changes(on_success=True, on_failure=True)(_div) - - -CAUSE_CHG_DECO_PARAMS = ['method', 'expect_exception', 'expect_changed'] +# +# DEPRECATION NOTICE +# Parameters on_success and on_failure are deprecated and will be removed in community.genral 12.0.0 +# Remove testcases with those params when releasing 12.0.0 +# +CAUSE_CHG_DECO_PARAMS = ['deco_args', 'expect_exception', 'expect_changed'] CAUSE_CHG_DECO = dict( - none_succ=dict(method='func_none', expect_exception=False, expect_changed=None), - none_fail=dict(method='func_none', expect_exception=True, expect_changed=None), - onsucc_succ=dict(method='func_onsucc', expect_exception=False, expect_changed=True), - onsucc_fail=dict(method='func_onsucc', expect_exception=True, expect_changed=None), - onfail_succ=dict(method='func_onfail', expect_exception=False, expect_changed=None), - onfail_fail=dict(method='func_onfail', expect_exception=True, expect_changed=True), - onboth_succ=dict(method='func_onboth', expect_exception=False, expect_changed=True), - onboth_fail=dict(method='func_onboth', expect_exception=True, expect_changed=True), + none_succ=dict(deco_args={}, expect_exception=False, expect_changed=None), + none_fail=dict(deco_args={}, expect_exception=True, expect_changed=None), + onsucc_succ=dict(deco_args=dict(on_success=True), expect_exception=False, expect_changed=True), + onsucc_fail=dict(deco_args=dict(on_success=True), expect_exception=True, expect_changed=None), + onfail_succ=dict(deco_args=dict(on_failure=True), expect_exception=False, expect_changed=None), + onfail_fail=dict(deco_args=dict(on_failure=True), expect_exception=True, expect_changed=True), + onboth_succ=dict(deco_args=dict(on_success=True, on_failure=True), expect_exception=False, expect_changed=True), + onboth_fail=dict(deco_args=dict(on_success=True, on_failure=True), expect_exception=True, expect_changed=True), + whensucc_succ=dict(deco_args=dict(when="success"), expect_exception=False, expect_changed=True), + whensucc_fail=dict(deco_args=dict(when="success"), expect_exception=True, expect_changed=None), + whenfail_succ=dict(deco_args=dict(when="failure"), expect_exception=False, expect_changed=None), + whenfail_fail=dict(deco_args=dict(when="failure"), expect_exception=True, expect_changed=True), + whenalways_succ=dict(deco_args=dict(when="always"), expect_exception=False, expect_changed=True), + whenalways_fail=dict(deco_args=dict(when="always"), expect_exception=True, expect_changed=True), ) CAUSE_CHG_DECO_IDS = sorted(CAUSE_CHG_DECO.keys()) @@ -150,12 +149,20 @@ CAUSE_CHG_DECO_IDS = sorted(CAUSE_CHG_DECO.keys()) for param in CAUSE_CHG_DECO_PARAMS] for tc in CAUSE_CHG_DECO_IDS], ids=CAUSE_CHG_DECO_IDS) -def test_cause_changes_deco(method, expect_exception, expect_changed): +def test_cause_changes_deco(deco_args, expect_exception, expect_changed): + + class MockMH(object): + changed = None + + @cause_changes(**deco_args) + def div_(self, x, y): + return x / y + mh = MockMH() if expect_exception: with pytest.raises(Exception): - getattr(mh, method)(1, 0) + mh.div_(1, 0) else: - getattr(mh, method)(9, 3) + mh.div_(9, 3) assert mh.changed == expect_changed diff --git a/tests/unit/plugins/module_utils/test_python_runner.py b/tests/unit/plugins/module_utils/test_python_runner.py index 015065bdd4..8572ee7d78 100644 --- a/tests/unit/plugins/module_utils/test_python_runner.py +++ b/tests/unit/plugins/module_utils/test_python_runner.py @@ -189,9 +189,11 @@ def test_runner_context(runner_input, cmd_execution, expected): def _extract_path(run_info): path = run_info.get("environ_update", {}).get("PATH") if path is not None: - run_info["environ_update"] = dict((k, v) - for k, v in run_info["environ_update"].items() - if k != "PATH") + run_info["environ_update"] = { + k: v + for k, v in run_info["environ_update"].items() + if k != "PATH" + } return run_info, path def _assert_run_info_env_path(actual, expected): @@ -199,7 +201,7 @@ def test_runner_context(runner_input, cmd_execution, expected): assert expected in actual2, "Missing expected path {0} in output PATH: {1}".format(expected, actual) def _assert_run_info(actual, expected): - reduced = dict((k, actual[k]) for k in expected.keys()) + reduced = {k: actual[k] for k in expected.keys()} reduced, act_path = _extract_path(reduced) expected, exp_path = _extract_path(expected) if exp_path is not None: diff --git a/tests/unit/plugins/modules/conftest.py b/tests/unit/plugins/modules/conftest.py index 9504c2336d..6e96c58316 100644 --- a/tests/unit/plugins/modules/conftest.py +++ b/tests/unit/plugins/modules/conftest.py @@ -16,22 +16,34 @@ from ansible.module_utils.common._collections_compat import MutableMapping from ansible_collections.community.general.plugins.module_utils import deps -@pytest.fixture -def patch_ansible_module(request, mocker): - if isinstance(request.param, string_types): - args = request.param - elif isinstance(request.param, MutableMapping): - if 'ANSIBLE_MODULE_ARGS' not in request.param: - request.param = {'ANSIBLE_MODULE_ARGS': request.param} - if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False - args = json.dumps(request.param) +def fix_ansible_args(args): + if isinstance(args, string_types): + return args + + if isinstance(args, MutableMapping): + if 'ANSIBLE_MODULE_ARGS' not in args: + args = {'ANSIBLE_MODULE_ARGS': args} + if '_ansible_remote_tmp' not in args['ANSIBLE_MODULE_ARGS']: + args['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in args['ANSIBLE_MODULE_ARGS']: + args['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + args = json.dumps(args) + return args + else: raise Exception('Malformed data to the patch_ansible_module pytest fixture') - mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) + +@pytest.fixture +def patch_ansible_module(request, mocker): + if hasattr(request, "param"): + args = fix_ansible_args(request.param) + mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) + else: + def _patch(args): + args = fix_ansible_args(args) + mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) + return _patch @pytest.fixture(autouse=True) diff --git a/tests/unit/plugins/modules/helper.py b/tests/unit/plugins/modules/helper.py index 1ffa19aad4..0626e39f1c 100644 --- a/tests/unit/plugins/modules/helper.py +++ b/tests/unit/plugins/modules/helper.py @@ -8,174 +8,240 @@ __metaclass__ = type import sys import json -from collections import namedtuple -from itertools import chain, repeat -import pytest import yaml - - -ModuleTestCase = namedtuple("ModuleTestCase", ["id", "input", "output", "run_command_calls", "flags"]) -RunCmdCall = namedtuple("RunCmdCall", ["command", "environ", "rc", "out", "err"]) - - -class _BaseContext(object): - def __init__(self, helper, testcase, mocker, capfd): - self.helper = helper - self.testcase = testcase - self.mocker = mocker - self.capfd = capfd - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - return False - - def _run(self): - with pytest.raises(SystemExit): - self.helper.module_main() - - out, err = self.capfd.readouterr() - results = json.loads(out) - - self.check_results(results) - - def test_flags(self, flag=None): - flags = self.testcase.flags - if flag: - flags = flags.get(flag) - return flags - - def run(self): - func = self._run - - test_flags = self.test_flags() - if test_flags.get("skip"): - pytest.skip(test_flags.get("skip")) - if test_flags.get("xfail"): - pytest.xfail(test_flags.get("xfail")) - - func() - - def check_results(self, results): - print("testcase =\n%s" % str(self.testcase)) - print("results =\n%s" % results) - if 'exception' in results: - print("exception = \n%s" % results["exception"]) - - for test_result in self.testcase.output: - assert results[test_result] == self.testcase.output[test_result], \ - "'{0}': '{1}' != '{2}'".format(test_result, results[test_result], self.testcase.output[test_result]) - - -class _RunCmdContext(_BaseContext): - def __init__(self, *args, **kwargs): - super(_RunCmdContext, self).__init__(*args, **kwargs) - self.run_cmd_calls = self.testcase.run_command_calls - self.mock_run_cmd = self._make_mock_run_cmd() - - def _make_mock_run_cmd(self): - call_results = [(x.rc, x.out, x.err) for x in self.run_cmd_calls] - error_call_results = (123, - "OUT: testcase has not enough run_command calls", - "ERR: testcase has not enough run_command calls") - mock_run_command = self.mocker.patch('ansible.module_utils.basic.AnsibleModule.run_command', - side_effect=chain(call_results, repeat(error_call_results))) - return mock_run_command - - def check_results(self, results): - super(_RunCmdContext, self).check_results(results) - call_args_list = [(item[0][0], item[1]) for item in self.mock_run_cmd.call_args_list] - expected_call_args_list = [(item.command, item.environ) for item in self.run_cmd_calls] - print("call args list =\n%s" % call_args_list) - print("expected args list =\n%s" % expected_call_args_list) - - assert self.mock_run_cmd.call_count == len(self.run_cmd_calls), "{0} != {1}".format(self.mock_run_cmd.call_count, len(self.run_cmd_calls)) - if self.mock_run_cmd.call_count: - assert call_args_list == expected_call_args_list +import pytest class Helper(object): @staticmethod - def from_list(module_main, list_): - helper = Helper(module_main, test_cases=list_) + def from_list(test_module, ansible_module, test_cases): + helper = Helper(test_module, ansible_module, test_cases=test_cases) return helper @staticmethod - def from_file(module_main, filename): + def from_file(test_module, ansible_module, filename): with open(filename, "r") as test_cases: - helper = Helper(module_main, test_cases=test_cases) - return helper + test_cases_data = yaml.safe_load(test_cases) + return Helper.from_list(test_module, ansible_module, test_cases_data) @staticmethod - def from_module(module, test_module_name): - basename = module.__name__.split(".")[-1] - test_spec = "tests/unit/plugins/modules/test_{0}.yaml".format(basename) - helper = Helper.from_file(module.main, test_spec) + def from_module(ansible_module, test_module_name, test_spec=None): + test_module = sys.modules[test_module_name] + if test_spec is None: + test_spec = test_module.__file__.replace('.py', '.yaml') + return Helper.from_file(test_module, ansible_module, test_spec) - setattr(sys.modules[test_module_name], "patch_bin", helper.cmd_fixture) - setattr(sys.modules[test_module_name], "test_module", helper.test_module) + def add_func_to_test_module(self, name, func): + setattr(self.test_module, name, func) - def __init__(self, module_main, test_cases): - self.module_main = module_main - self._test_cases = test_cases - if isinstance(test_cases, (list, tuple)): - self.testcases = test_cases - else: - self.testcases = self._make_test_cases() + def __init__(self, test_module, ansible_module, test_cases): + self.test_module = test_module + self.ansible_module = ansible_module + self.test_cases = [] + self.fixtures = {} + + for test_case in test_cases: + tc = ModuleTestCase.make_test_case(test_case, test_module) + self.test_cases.append(tc) + self.fixtures.update(tc.fixtures) + self.set_test_func() + self.set_fixtures(self.fixtures) @property - def cmd_fixture(self): + def runner(self): + return Runner(self.ansible_module.main) + + def set_test_func(self): + @pytest.mark.parametrize('test_case', self.test_cases, ids=[tc.id for tc in self.test_cases]) + @pytest.mark.usefixtures(*self.fixtures) + def _test_module(mocker, capfd, patch_ansible_module, test_case): + """ + Run unit tests for each test case in self.test_cases + """ + patch_ansible_module(test_case.input) + self.runner.run(mocker, capfd, test_case) + + self.add_func_to_test_module("test_module", _test_module) + + return _test_module + + def set_fixtures(self, fixtures): + for name, fixture in fixtures.items(): + self.add_func_to_test_module(name, fixture) + + +class Runner: + def __init__(self, module_main): + self.module_main = module_main + self.results = None + + def run(self, mocker, capfd, test_case): + test_case.setup(mocker) + self.pytest_module(capfd, test_case.flags) + test_case.check(self.results) + + def pytest_module(self, capfd, flags): + if flags.get("skip"): + pytest.skip(flags.get("skip")) + if flags.get("xfail"): + pytest.xfail(flags.get("xfail")) + + with pytest.raises(SystemExit): + (self.module_main)() + + out, err = capfd.readouterr() + self.results = json.loads(out) + + +class ModuleTestCase: + def __init__(self, id, input, output, mocks, flags): + self.id = id + self.input = input + self.output = output + self._mocks = mocks + self.mocks = {} + self.flags = flags + + self._fixtures = {} + + def __str__(self): + return "".format( + id=self.id, + input="input " if self.input else "", + output="output " if self.output else "", + mocks="({0})".format(", ".join(self.mocks.keys())), + flags=self.flags + ) + + def __repr__(self): + return "ModuleTestCase(id={id}, input={input}, output={output}, mocks={mocks}, flags={flags})".format( + id=self.id, + input=self.input, + output=self.output, + mocks=repr(self.mocks), + flags=self.flags + ) + + @staticmethod + def make_test_case(test_case, test_module): + tc = ModuleTestCase( + id=test_case["id"], + input=test_case.get("input", {}), + output=test_case.get("output", {}), + mocks=test_case.get("mocks", {}), + flags=test_case.get("flags", {}) + ) + tc.build_mocks(test_module) + return tc + + def build_mocks(self, test_module): + for mock, mock_spec in self._mocks.items(): + mock_class = self.get_mock_class(test_module, mock) + self.mocks[mock] = mock_class.build_mock(mock_spec) + + self._fixtures.update(self.mocks[mock].fixtures()) + + @staticmethod + def get_mock_class(test_module, mock): + try: + class_name = "".join(x.capitalize() for x in mock.split("_")) + "Mock" + plugin_class = getattr(test_module, class_name) + assert issubclass(plugin_class, TestCaseMock), "Class {0} is not a subclass of TestCaseMock".format(class_name) + return plugin_class + except AttributeError: + raise ValueError("Cannot find class {0} for mock {1}".format(class_name, mock)) + + @property + def fixtures(self): + return dict(self._fixtures) + + def setup(self, mocker): + self.setup_testcase(mocker) + self.setup_mocks(mocker) + + def check(self, results): + self.check_testcase(results) + self.check_mocks(self, results) + + def setup_testcase(self, mocker): + pass + + def setup_mocks(self, mocker): + for mock in self.mocks.values(): + mock.setup(mocker) + + def check_testcase(self, results): + print("testcase =\n%s" % repr(self)) + print("results =\n%s" % results) + if 'exception' in results: + print("exception = \n%s" % results["exception"]) + + for test_result in self.output: + assert results[test_result] == self.output[test_result], \ + "'{0}': '{1}' != '{2}'".format(test_result, results[test_result], self.output[test_result]) + + def check_mocks(self, test_case, results): + for mock in self.mocks.values(): + mock.check(test_case, results) + + +class TestCaseMock: + @classmethod + def build_mock(cls, mock_specs): + return cls(mock_specs) + + def __init__(self, mock_specs): + self.mock_specs = mock_specs + + def fixtures(self): + return {} + + def setup(self, mocker): + pass + + def check(self, test_case, results): + raise NotImplementedError() + + +class RunCommandMock(TestCaseMock): + def __str__(self): + return "".format(specs=self.mock_specs) + + def __repr__(self): + return "RunCommandMock({specs})".format(specs=self.mock_specs) + + def fixtures(self): @pytest.fixture def patch_bin(mocker): def mockie(self, path, *args, **kwargs): return "/testbin/{0}".format(path) mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path', mockie) - return patch_bin + return {"patch_bin": patch_bin} - def _make_test_cases(self): - test_cases = yaml.safe_load(self._test_cases) + def setup(self, mocker): + def _results(): + for result in [(x['rc'], x['out'], x['err']) for x in self.mock_specs]: + yield result + raise Exception("testcase has not enough run_command calls") - results = [] - for tc in test_cases: - for tc_param in ["input", "output", "flags"]: - if not tc.get(tc_param): - tc[tc_param] = {} - if tc.get("run_command_calls"): - tc["run_command_calls"] = [RunCmdCall(**r) for r in tc["run_command_calls"]] - else: - tc["run_command_calls"] = [] - results.append(ModuleTestCase(**tc)) + results = _results() - return results + def side_effect(self_, **kwargs): + result = next(results) + if kwargs.get("check_rc", False) and result[0] != 0: + raise Exception("rc = {0}".format(result[0])) + return result - @property - def testcases_params(self): - return [[x.input, x] for x in self.testcases] + self.mock_run_cmd = mocker.patch('ansible.module_utils.basic.AnsibleModule.run_command', side_effect=side_effect) - @property - def testcases_ids(self): - return [item.id for item in self.testcases] + def check(self, test_case, results): + call_args_list = [(item[0][0], item[1]) for item in self.mock_run_cmd.call_args_list] + expected_call_args_list = [(item['command'], item['environ']) for item in self.mock_specs] + print("call args list =\n%s" % call_args_list) + print("expected args list =\n%s" % expected_call_args_list) - def __call__(self, *args, **kwargs): - return _RunCmdContext(self, *args, **kwargs) - - @property - def test_module(self): - helper = self - - @pytest.mark.parametrize('patch_ansible_module, testcase', - helper.testcases_params, ids=helper.testcases_ids, - indirect=['patch_ansible_module']) - @pytest.mark.usefixtures('patch_ansible_module') - def _test_module(mocker, capfd, patch_bin, testcase): - """ - Run unit tests for test cases listed in TEST_CASES - """ - - with helper(testcase, mocker, capfd) as testcase_context: - testcase_context.run() - - return _test_module + assert self.mock_run_cmd.call_count == len(self.mock_specs), "{0} != {1}".format(self.mock_run_cmd.call_count, len(self.mock_specs)) + if self.mock_run_cmd.call_count: + assert call_args_list == expected_call_args_list diff --git a/tests/unit/plugins/modules/test_bootc_manage.py b/tests/unit/plugins/modules/test_bootc_manage.py new file mode 100644 index 0000000000..5393a57a07 --- /dev/null +++ b/tests/unit/plugins/modules/test_bootc_manage.py @@ -0,0 +1,72 @@ +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.plugins.modules import bootc_manage +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + + +class TestBootcManageModule(ModuleTestCase): + + def setUp(self): + super(TestBootcManageModule, self).setUp() + self.module = bootc_manage + + def tearDown(self): + super(TestBootcManageModule, self).tearDown() + + def test_switch_without_image(self): + """Failure if state is 'switch' but no image provided""" + set_module_args({'state': 'switch'}) + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], "state is switch but all of the following are missing: image") + + def test_switch_with_image(self): + """Test successful switch with image provided""" + set_module_args({'state': 'switch', 'image': 'example.com/image:latest'}) + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (0, 'Queued for next boot: ', '') + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() + self.assertTrue(result.exception.args[0]['changed']) + + def test_latest_state(self): + """Test successful upgrade to the latest state""" + set_module_args({'state': 'latest'}) + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (0, 'Queued for next boot: ', '') + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() + self.assertTrue(result.exception.args[0]['changed']) + + def test_latest_state_no_change(self): + """Test no change for latest state""" + set_module_args({'state': 'latest'}) + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (0, 'No changes in ', '') + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() + self.assertFalse(result.exception.args[0]['changed']) + + def test_switch_image_failure(self): + """Test failure during image switch""" + set_module_args({'state': 'switch', 'image': 'example.com/image:latest'}) + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (1, '', 'ERROR') + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], 'ERROR: Command execution failed.') + + def test_latest_state_failure(self): + """Test failure during upgrade""" + set_module_args({'state': 'latest'}) + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (1, '', 'ERROR') + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], 'ERROR: Command execution failed.') diff --git a/tests/unit/plugins/modules/test_cpanm.py b/tests/unit/plugins/modules/test_cpanm.py index 4eecf000fd..28090455f0 100644 --- a/tests/unit/plugins/modules/test_cpanm.py +++ b/tests/unit/plugins/modules/test_cpanm.py @@ -14,7 +14,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import cpanm -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(cpanm, __name__) diff --git a/tests/unit/plugins/modules/test_cpanm.yaml b/tests/unit/plugins/modules/test_cpanm.yaml index 4eed957206..ad081254d6 100644 --- a/tests/unit/plugins/modules/test_cpanm.yaml +++ b/tests/unit/plugins/modules/test_cpanm.yaml @@ -10,7 +10,8 @@ mode: compatibility output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/perl, -le, 'use Dancer;'] environ: &env-def-false {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} rc: 2 @@ -27,7 +28,8 @@ mode: compatibility output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/perl, -le, 'use Dancer;'] environ: *env-def-false rc: 0 @@ -38,7 +40,8 @@ name: Dancer output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, Dancer] environ: *env-def-true rc: 0 @@ -50,7 +53,8 @@ mode: compatibility output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz] environ: *env-def-true rc: 0 @@ -61,7 +65,8 @@ name: MIYAGAWA/Plack-0.99_05.tar.gz output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz] environ: *env-def-true rc: 0 @@ -74,7 +79,8 @@ locallib: /srv/webapps/my_app/extlib output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, --local-lib, /srv/webapps/my_app/extlib, Dancer] environ: *env-def-true rc: 0 @@ -86,7 +92,8 @@ mode: new output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, /srv/webapps/my_app/src/] environ: *env-def-true rc: 0 @@ -100,7 +107,8 @@ locallib: /srv/webapps/my_app/extlib output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, --notest, --local-lib, /srv/webapps/my_app/extlib, Dancer] environ: *env-def-true rc: 0 @@ -113,7 +121,8 @@ mirror: "http://cpan.cpantesters.org/" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, --mirror, "http://cpan.cpantesters.org/", Dancer] environ: *env-def-true rc: 0 @@ -126,7 +135,8 @@ system_lib: true output: failed: true - run_command_calls: [] + mocks: + run_command: [] - id: install_minversion_implicit input: name: Dancer @@ -134,7 +144,8 @@ version: "1.0" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, Dancer~1.0] environ: *env-def-true rc: 0 @@ -147,7 +158,8 @@ version: "~1.5" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, Dancer~1.5] environ: *env-def-true rc: 0 @@ -160,7 +172,8 @@ version: "@1.7" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, Dancer@1.7] environ: *env-def-true rc: 0 @@ -174,7 +187,8 @@ output: failed: true msg: parameter 'version' must not be used when installing from a file - run_command_calls: [] + mocks: + run_command: [] - id: install_specific_version_from_directory_error input: from_path: ~/ @@ -183,7 +197,8 @@ output: failed: true msg: parameter 'version' must not be used when installing from a directory - run_command_calls: [] + mocks: + run_command: [] - id: install_specific_version_from_git_url_explicit input: name: "git://github.com/plack/Plack.git" @@ -191,7 +206,8 @@ version: "@1.7" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, "git://github.com/plack/Plack.git@1.7"] environ: *env-def-true rc: 0 @@ -204,7 +220,8 @@ version: "2.5" output: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/cpanm, "git://github.com/plack/Plack.git@2.5"] environ: *env-def-true rc: 0 @@ -218,4 +235,5 @@ output: failed: true msg: operator '~' not allowed in version parameter when installing from git repository - run_command_calls: [] + mocks: + run_command: [] diff --git a/tests/unit/plugins/modules/test_datadog_downtime.py.disabled b/tests/unit/plugins/modules/test_datadog_downtime.py similarity index 86% rename from tests/unit/plugins/modules/test_datadog_downtime.py.disabled rename to tests/unit/plugins/modules/test_datadog_downtime.py index 52f27710cf..e1ecbfa66f 100644 --- a/tests/unit/plugins/modules/test_datadog_downtime.py.disabled +++ b/tests/unit/plugins/modules/test_datadog_downtime.py @@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.plugins.modules.monitoring.datadog import datadog_downtime +from ansible_collections.community.general.plugins.modules import datadog_downtime from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, patch from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args @@ -36,7 +36,7 @@ class TestDatadogDowntime(ModuleTestCase): set_module_args({}) self.module.main() - @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi") + @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_create_downtime_when_no_id(self, downtimes_api_mock): set_module_args({ "monitor_tags": ["foo:bar"], @@ -60,10 +60,11 @@ class TestDatadogDowntime(ModuleTestCase): downtime.end = 2222 downtime.timezone = "UTC" downtime.recurrence = DowntimeRecurrence( - rrule="rrule" + rrule="rrule", + type="rrule" ) - create_downtime_mock = MagicMock(return_value=Downtime(id=12345)) + create_downtime_mock = MagicMock(return_value=self.__downtime_with_id(12345)) downtimes_api_mock.return_value = MagicMock(create_downtime=create_downtime_mock) with self.assertRaises(AnsibleExitJson) as result: self.module.main() @@ -71,7 +72,7 @@ class TestDatadogDowntime(ModuleTestCase): self.assertEqual(result.exception.args[0]['downtime']['id'], 12345) create_downtime_mock.assert_called_once_with(downtime) - @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi") + @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_create_downtime_when_id_and_disabled(self, downtimes_api_mock): set_module_args({ "id": 1212, @@ -96,11 +97,16 @@ class TestDatadogDowntime(ModuleTestCase): downtime.end = 2222 downtime.timezone = "UTC" downtime.recurrence = DowntimeRecurrence( - rrule="rrule" + rrule="rrule", + type="rrule" ) - create_downtime_mock = MagicMock(return_value=Downtime(id=12345)) - get_downtime_mock = MagicMock(return_value=Downtime(id=1212, disabled=True)) + disabled_downtime = Downtime() + disabled_downtime.disabled = True + disabled_downtime.id = 1212 + + create_downtime_mock = MagicMock(return_value=self.__downtime_with_id(12345)) + get_downtime_mock = MagicMock(return_value=disabled_downtime) downtimes_api_mock.return_value = MagicMock( create_downtime=create_downtime_mock, get_downtime=get_downtime_mock ) @@ -111,7 +117,7 @@ class TestDatadogDowntime(ModuleTestCase): create_downtime_mock.assert_called_once_with(downtime) get_downtime_mock.assert_called_once_with(1212) - @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi") + @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_update_downtime_when_not_disabled(self, downtimes_api_mock): set_module_args({ "id": 1212, @@ -136,11 +142,16 @@ class TestDatadogDowntime(ModuleTestCase): downtime.end = 2222 downtime.timezone = "UTC" downtime.recurrence = DowntimeRecurrence( - rrule="rrule" + rrule="rrule", + type="rrule" ) - update_downtime_mock = MagicMock(return_value=Downtime(id=1212)) - get_downtime_mock = MagicMock(return_value=Downtime(id=1212, disabled=False)) + enabled_downtime = Downtime() + enabled_downtime.disabled = False + enabled_downtime.id = 1212 + + update_downtime_mock = MagicMock(return_value=self.__downtime_with_id(1212)) + get_downtime_mock = MagicMock(return_value=enabled_downtime) downtimes_api_mock.return_value = MagicMock( update_downtime=update_downtime_mock, get_downtime=get_downtime_mock ) @@ -151,7 +162,7 @@ class TestDatadogDowntime(ModuleTestCase): update_downtime_mock.assert_called_once_with(1212, downtime) get_downtime_mock.assert_called_once_with(1212) - @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi") + @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_update_downtime_no_change(self, downtimes_api_mock): set_module_args({ "id": 1212, @@ -176,7 +187,8 @@ class TestDatadogDowntime(ModuleTestCase): downtime.end = 2222 downtime.timezone = "UTC" downtime.recurrence = DowntimeRecurrence( - rrule="rrule" + rrule="rrule", + type="rrule" ) downtime_get = Downtime() @@ -205,7 +217,7 @@ class TestDatadogDowntime(ModuleTestCase): update_downtime_mock.assert_called_once_with(1212, downtime) get_downtime_mock.assert_called_once_with(1212) - @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi") + @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_delete_downtime(self, downtimes_api_mock): set_module_args({ "id": 1212, @@ -215,12 +227,16 @@ class TestDatadogDowntime(ModuleTestCase): }) cancel_downtime_mock = MagicMock() - get_downtime_mock = MagicMock(return_value=Downtime(id=1212)) downtimes_api_mock.return_value = MagicMock( - get_downtime=get_downtime_mock, + get_downtime=self.__downtime_with_id, cancel_downtime=cancel_downtime_mock ) with self.assertRaises(AnsibleExitJson) as result: self.module.main() self.assertTrue(result.exception.args[0]['changed']) cancel_downtime_mock.assert_called_once_with(1212) + + def __downtime_with_id(self, id): + downtime = Downtime() + downtime.id = id + return downtime diff --git a/tests/unit/plugins/modules/test_django_check.py b/tests/unit/plugins/modules/test_django_check.py new file mode 100644 index 0000000000..52210bdb76 --- /dev/null +++ b/tests/unit/plugins/modules/test_django_check.py @@ -0,0 +1,13 @@ +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.community.general.plugins.modules import django_check +from .helper import Helper, RunCommandMock # pylint: disable=unused-import + + +Helper.from_module(django_check, __name__) diff --git a/tests/unit/plugins/modules/test_django_check.yaml b/tests/unit/plugins/modules/test_django_check.yaml new file mode 100644 index 0000000000..91a8ff1953 --- /dev/null +++ b/tests/unit/plugins/modules/test_django_check.yaml @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +--- +- id: success + input: + settings: whatever.settings + mocks: + run_command: + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings] + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + rc: 0 + out: "whatever\n" + err: "" +- id: multiple_databases + input: + settings: whatever.settings + database: + - abc + - def + mocks: + run_command: + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, --database, abc, --database, def] + environ: *env-def + rc: 0 + out: "whatever\n" + err: "" diff --git a/tests/unit/plugins/modules/test_django_command.py b/tests/unit/plugins/modules/test_django_command.py index ffa9feb394..8be910fd27 100644 --- a/tests/unit/plugins/modules/test_django_command.py +++ b/tests/unit/plugins/modules/test_django_command.py @@ -7,7 +7,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import django_command -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(django_command, __name__) diff --git a/tests/unit/plugins/modules/test_django_command.yaml b/tests/unit/plugins/modules/test_django_command.yaml index 9fe9b419f9..2a19351083 100644 --- a/tests/unit/plugins/modules/test_django_command.yaml +++ b/tests/unit/plugins/modules/test_django_command.yaml @@ -8,12 +8,13 @@ input: command: check extra_args: - - babaloo - - yaba - - daba - - doo + - babaloo + - yaba + - daba + - doo settings: whatever.settings - run_command_calls: + mocks: + run_command: - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 @@ -23,16 +24,15 @@ input: command: check extra_args: - - babaloo - - yaba - - daba - - doo + - babaloo + - yaba + - daba + - doo settings: whatever.settings output: failed: true - flags: - xfail: not seem to be failing as it should - run_command_calls: + mocks: + run_command: - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] environ: *env-def rc: 1 diff --git a/tests/unit/plugins/modules/test_django_createcachetable.py b/tests/unit/plugins/modules/test_django_createcachetable.py new file mode 100644 index 0000000000..74bdf1cc63 --- /dev/null +++ b/tests/unit/plugins/modules/test_django_createcachetable.py @@ -0,0 +1,13 @@ +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.community.general.plugins.modules import django_createcachetable +from .helper import Helper, RunCommandMock # pylint: disable=unused-import + + +Helper.from_module(django_createcachetable, __name__) diff --git a/tests/unit/plugins/modules/test_django_createcachetable.yaml b/tests/unit/plugins/modules/test_django_createcachetable.yaml new file mode 100644 index 0000000000..22b7dcb304 --- /dev/null +++ b/tests/unit/plugins/modules/test_django_createcachetable.yaml @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +--- +- id: command_success + input: + settings: whatever.settings + mocks: + run_command: + - command: [/testbin/python, -m, django, createcachetable, --no-color, --settings=whatever.settings, --noinput, --database=default] + environ: {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + rc: 0 + out: "whatever\n" + err: "" diff --git a/tests/unit/plugins/modules/test_dnf_config_manager.py b/tests/unit/plugins/modules/test_dnf_config_manager.py index 90bffe4365..7b231e10a5 100644 --- a/tests/unit/plugins/modules/test_dnf_config_manager.py +++ b/tests/unit/plugins/modules/test_dnf_config_manager.py @@ -254,8 +254,8 @@ expected_repo_states_crb_disabled = {'disabled': ['appstream-debuginfo', 'rpmfusion-nonfree-updates']} call_get_repo_states = call(['/usr/bin/dnf', 'repolist', '--all', '--verbose'], check_rc=True) -call_disable_crb = call(['/usr/bin/dnf', 'config-manager', '--set-disabled', 'crb'], check_rc=True) -call_enable_crb = call(['/usr/bin/dnf', 'config-manager', '--set-enabled', 'crb'], check_rc=True) +call_disable_crb = call(['/usr/bin/dnf', 'config-manager', '--assumeyes', '--set-disabled', 'crb'], check_rc=True) +call_enable_crb = call(['/usr/bin/dnf', 'config-manager', '--assumeyes', '--set-enabled', 'crb'], check_rc=True) class TestDNFConfigManager(ModuleTestCase): diff --git a/tests/unit/plugins/modules/test_facter_facts.py b/tests/unit/plugins/modules/test_facter_facts.py index 227d8cd150..bb74216b88 100644 --- a/tests/unit/plugins/modules/test_facter_facts.py +++ b/tests/unit/plugins/modules/test_facter_facts.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import facter_facts -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(facter_facts, __name__) diff --git a/tests/unit/plugins/modules/test_facter_facts.yaml b/tests/unit/plugins/modules/test_facter_facts.yaml index c287fdcfda..e53f7fe60f 100644 --- a/tests/unit/plugins/modules/test_facter_facts.yaml +++ b/tests/unit/plugins/modules/test_facter_facts.yaml @@ -11,7 +11,8 @@ a: 1 b: 2 c: 3 - run_command_calls: + mocks: + run_command: - command: [/testbin/facter, --json] environ: &env-def {check_rc: true} rc: 0 @@ -21,17 +22,18 @@ - id: with args input: arguments: - - -p - - system_uptime - - timezone - - is_virtual + - -p + - system_uptime + - timezone + - is_virtual output: ansible_facts: facter: a: 1 b: 2 c: 3 - run_command_calls: + mocks: + run_command: - command: [/testbin/facter, --json, -p, system_uptime, timezone, is_virtual] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_gconftool2.py b/tests/unit/plugins/modules/test_gconftool2.py index 9608016e58..2ba2e1c70e 100644 --- a/tests/unit/plugins/modules/test_gconftool2.py +++ b/tests/unit/plugins/modules/test_gconftool2.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import gconftool2 -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(gconftool2, __name__) diff --git a/tests/unit/plugins/modules/test_gconftool2.yaml b/tests/unit/plugins/modules/test_gconftool2.yaml index 5114dc45fd..084741e6d1 100644 --- a/tests/unit/plugins/modules/test_gconftool2.yaml +++ b/tests/unit/plugins/modules/test_gconftool2.yaml @@ -13,7 +13,8 @@ output: new_value: '200' changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 @@ -38,7 +39,8 @@ output: new_value: '200' changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: *env-def rc: 0 @@ -63,7 +65,8 @@ output: new_value: 'false' changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] environ: *env-def rc: 0 @@ -84,9 +87,10 @@ state: absent key: /desktop/gnome/background/picture_filename output: - new_value: null + new_value: changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: *env-def rc: 0 @@ -102,9 +106,10 @@ state: absent key: /apps/gnome_settings_daemon/screensaver/start_screensaver output: - new_value: null + new_value: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_gconftool2_info.py b/tests/unit/plugins/modules/test_gconftool2_info.py index 54676a12d2..4daa655714 100644 --- a/tests/unit/plugins/modules/test_gconftool2_info.py +++ b/tests/unit/plugins/modules/test_gconftool2_info.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import gconftool2_info -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(gconftool2_info, __name__) diff --git a/tests/unit/plugins/modules/test_gconftool2_info.yaml b/tests/unit/plugins/modules/test_gconftool2_info.yaml index eb8bef750d..26db16a368 100644 --- a/tests/unit/plugins/modules/test_gconftool2_info.yaml +++ b/tests/unit/plugins/modules/test_gconftool2_info.yaml @@ -9,7 +9,8 @@ key: /desktop/gnome/background/picture_filename output: value: '100' - run_command_calls: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 @@ -19,8 +20,9 @@ input: key: /desktop/gnome/background/picture_filename output: - value: null - run_command_calls: + value: + mocks: + run_command: - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_gio_mime.py b/tests/unit/plugins/modules/test_gio_mime.py index f2402ac352..5e51320485 100644 --- a/tests/unit/plugins/modules/test_gio_mime.py +++ b/tests/unit/plugins/modules/test_gio_mime.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import gio_mime -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(gio_mime, __name__) diff --git a/tests/unit/plugins/modules/test_gio_mime.yaml b/tests/unit/plugins/modules/test_gio_mime.yaml index d9e47a60ea..75e5554c7c 100644 --- a/tests/unit/plugins/modules/test_gio_mime.yaml +++ b/tests/unit/plugins/modules/test_gio_mime.yaml @@ -11,7 +11,8 @@ output: handler: google-chrome.desktop changed: true - run_command_calls: + mocks: + run_command: - command: [/testbin/gio, mime, x-scheme-handler/http] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 @@ -32,7 +33,8 @@ changed: true flags: skip: test helper does not support check mode yet - run_command_calls: + mocks: + run_command: - command: [/testbin/gio, mime, x-scheme-handler/http] environ: *env-def rc: 0 @@ -51,7 +53,8 @@ output: handler: google-chrome.desktop changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/gio, mime, x-scheme-handler/http] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_ipa_getkeytab.py b/tests/unit/plugins/modules/test_ipa_getkeytab.py new file mode 100644 index 0000000000..e4e8ed2ece --- /dev/null +++ b/tests/unit/plugins/modules/test_ipa_getkeytab.py @@ -0,0 +1,60 @@ +# +# Copyright (c) 2021, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.plugins.modules import ipa_getkeytab +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args + + +class IPAKeytabModuleTestCase(ModuleTestCase): + module = ipa_getkeytab + + def setUp(self): + super(IPAKeytabModuleTestCase, self).setUp() + ansible_module_path = "ansible_collections.community.general.plugins.modules.ipa_getkeytab.AnsibleModule" + self.mock_run_command = patch('%s.run_command' % ansible_module_path) + self.module_main_command = self.mock_run_command.start() + self.mock_get_bin_path = patch('%s.get_bin_path' % ansible_module_path) + self.get_bin_path = self.mock_get_bin_path.start() + self.get_bin_path.return_value = '/testbin/ipa_getkeytab' + + def tearDown(self): + self.mock_run_command.stop() + self.mock_get_bin_path.stop() + super(IPAKeytabModuleTestCase, self).tearDown() + + def module_main(self, exit_exc): + with self.assertRaises(exit_exc) as exc: + self.module.main() + return exc.exception.args[0] + + def test_present(self): + set_module_args({ + 'path': '/tmp/test.keytab', + 'principal': 'HTTP/freeipa-dc02.ipa.test', + 'ipa_host': 'freeipa-dc01.ipa.test', + 'state': 'present' + }) + + self.module_main_command.side_effect = [ + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/ipa_getkeytab', + '--keytab', '/tmp/test.keytab', + '--server', 'freeipa-dc01.ipa.test', + '--principal', 'HTTP/freeipa-dc02.ipa.test' + ], + check_rc=True, + environ_update={'LC_ALL': 'C', 'LANGUAGE': 'C'} + ), + ]) diff --git a/tests/unit/plugins/modules/test_keycloak_identity_provider.py b/tests/unit/plugins/modules/test_keycloak_identity_provider.py index 6fd258b8a3..a893a130a5 100644 --- a/tests/unit/plugins/modules/test_keycloak_identity_provider.py +++ b/tests/unit/plugins/modules/test_keycloak_identity_provider.py @@ -23,7 +23,7 @@ from ansible.module_utils.six import StringIO @contextmanager def patch_keycloak_api(get_identity_provider, create_identity_provider=None, update_identity_provider=None, delete_identity_provider=None, get_identity_provider_mappers=None, create_identity_provider_mapper=None, update_identity_provider_mapper=None, - delete_identity_provider_mapper=None): + delete_identity_provider_mapper=None, get_realm_by_id=None): """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server Patches the `login` and `_post_json` methods @@ -55,9 +55,11 @@ def patch_keycloak_api(get_identity_provider, create_identity_provider=None, upd as mock_update_identity_provider_mapper: with patch.object(obj, 'delete_identity_provider_mapper', side_effect=delete_identity_provider_mapper) \ as mock_delete_identity_provider_mapper: - yield mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, \ - mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, \ - mock_update_identity_provider_mapper, mock_delete_identity_provider_mapper + with patch.object(obj, 'get_realm_by_id', side_effect=get_realm_by_id) \ + as mock_get_realm_by_id: + yield mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, \ + mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, \ + mock_update_identity_provider_mapper, mock_delete_identity_provider_mapper, mock_get_realm_by_id def get_response(object_with_future_response, method, get_id_call_count): @@ -200,6 +202,38 @@ class TestKeycloakIdentityProvider(ModuleTestCase): "name": "last_name" }] ] + return_value_realm_get = [ + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'identityProviders': [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + }, + ] return_value_idp_created = [None] return_value_mapper_created = [None, None] changed = True @@ -210,15 +244,17 @@ class TestKeycloakIdentityProvider(ModuleTestCase): with mock_good_connection(): with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - create_identity_provider=return_value_idp_created, create_identity_provider_mapper=return_value_mapper_created) \ + create_identity_provider=return_value_idp_created, create_identity_provider_mapper=return_value_mapper_created, + get_realm_by_id=return_value_realm_get) \ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper): + mock_delete_identity_provider_mapper, mock_get_realm_by_id): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() self.assertEqual(len(mock_get_identity_provider.mock_calls), 2) self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1) + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1) self.assertEqual(len(mock_create_identity_provider.mock_calls), 1) self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 2) @@ -444,6 +480,68 @@ class TestKeycloakIdentityProvider(ModuleTestCase): "name": "last_name" }] ] + return_value_realm_get = [ + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'identityProviders': [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + }, + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'identityProviders': [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + }, + ] return_value_idp_updated = [None] return_value_mapper_updated = [None] return_value_mapper_created = [None] @@ -456,15 +554,16 @@ class TestKeycloakIdentityProvider(ModuleTestCase): with mock_good_connection(): with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, - create_identity_provider_mapper=return_value_mapper_created) \ + create_identity_provider_mapper=return_value_mapper_created, get_realm_by_id=return_value_realm_get) \ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper): + mock_delete_identity_provider_mapper, mock_get_realm_by_id): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() self.assertEqual(len(mock_get_identity_provider.mock_calls), 2) self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 5) + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 2) self.assertEqual(len(mock_update_identity_provider.mock_calls), 1) self.assertEqual(len(mock_update_identity_provider_mapper.mock_calls), 1) self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 1) @@ -472,6 +571,156 @@ class TestKeycloakIdentityProvider(ModuleTestCase): # Verify that the module's changed status matches what is expected self.assertIs(exec_info.exception.args[0]['changed'], changed) + def test_no_change_when_present(self): + """Update existing identity provider""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP changeme", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + 'mappers': [{ + 'name': "username", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'config': { + 'claim': "username", + 'user.attribute': "username", + 'syncMode': "INHERIT", + } + }] + } + return_value_idp_get = [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP changeme", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + }, + ] + return_value_mappers_get = [ + [{ + 'config': { + 'claim': "username", + 'syncMode': "INHERIT", + 'user.attribute': "username" + }, + "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'name': "username" + }], + [{ + 'config': { + 'claim': "username", + 'syncMode': "INHERIT", + 'user.attribute': "username" + }, + "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'name': "username" + }] + ] + return_value_realm_get = [ + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'identityProviders': [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + } + ] + return_value_idp_updated = [None] + return_value_mapper_updated = [None] + return_value_mapper_created = [None] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, + create_identity_provider_mapper=return_value_mapper_created, get_realm_by_id=return_value_realm_get) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_identity_provider.mock_calls), 1) + self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 2) + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1) + self.assertEqual(len(mock_update_identity_provider.mock_calls), 0) + self.assertEqual(len(mock_update_identity_provider_mapper.mock_calls), 0) + self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + def test_delete_when_absent(self): """Remove an absent identity provider""" @@ -497,7 +746,7 @@ class TestKeycloakIdentityProvider(ModuleTestCase): with patch_keycloak_api(get_identity_provider=return_value_idp_get) \ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper): + mock_delete_identity_provider_mapper, mock_get_realm_by_id): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -560,6 +809,38 @@ class TestKeycloakIdentityProvider(ModuleTestCase): "name": "email" }] ] + return_value_realm_get = [ + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'identityProviders': [ + { + "alias": "oidc", + "displayName": "", + "internalId": "2bca4192-e816-4beb-bcba-190164eb55b8", + "providerId": "oidc", + "enabled": True, + "updateProfileFirstLoginMode": "on", + "trustEmail": False, + "storeToken": False, + "addReadTokenRoleOnCreate": False, + "authenticateByDefault": False, + "linkOnly": False, + "config": { + "validateSignature": "false", + "pkceEnabled": "false", + "tokenUrl": "https://localhost:8000", + "clientId": "asdf", + "authorizationUrl": "https://localhost:8000", + "clientAuthMethod": "client_secret_post", + "clientSecret": "real_secret", + "guiOrder": "0" + } + }, + ] + }, + ] return_value_idp_deleted = [None] changed = True @@ -569,15 +850,16 @@ class TestKeycloakIdentityProvider(ModuleTestCase): with mock_good_connection(): with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - delete_identity_provider=return_value_idp_deleted) \ + delete_identity_provider=return_value_idp_deleted, get_realm_by_id=return_value_realm_get) \ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper): + mock_delete_identity_provider_mapper, mock_get_realm_by_id): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() self.assertEqual(len(mock_get_identity_provider.mock_calls), 1) self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1) + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1) self.assertEqual(len(mock_delete_identity_provider.mock_calls), 1) # Verify that the module's changed status matches what is expected diff --git a/tests/unit/plugins/modules/test_keycloak_realm_keys.py b/tests/unit/plugins/modules/test_keycloak_realm_keys.py new file mode 100644 index 0000000000..628fa54f31 --- /dev/null +++ b/tests/unit/plugins/modules/test_keycloak_realm_keys.py @@ -0,0 +1,380 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules import keycloak_realm_key + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_components=None, get_component=None, create_component=None, update_component=None, delete_component=None): + """Mock context manager for patching the methods in KeycloakAPI + """ + + obj = keycloak_realm_key.KeycloakAPI + with patch.object(obj, 'get_components', side_effect=get_components) \ + as mock_get_components: + with patch.object(obj, 'get_component', side_effect=get_component) \ + as mock_get_component: + with patch.object(obj, 'create_component', side_effect=create_component) \ + as mock_create_component: + with patch.object(obj, 'update_component', side_effect=update_component) \ + as mock_update_component: + with patch.object(obj, 'delete_component', side_effect=delete_component) \ + as mock_delete_component: + yield mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakRealmKeys(ModuleTestCase): + def setUp(self): + super(TestKeycloakRealmKeys, self).setUp() + self.module = keycloak_realm_key + + def test_create_when_absent(self): + """Add a new realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'testkey', + 'state': 'present', + 'provider_id': 'rsa', + 'config': { + 'priority': 0, + 'enabled': True, + 'private_key': 'privatekey', + 'algorithm': 'RS256', + 'certificate': 'foo', + }, + } + return_value_component_create = [ + { + "id": "ebb7d999-60cc-4dfe-ab79-48f7bbd9d4d9", + "name": "testkey", + "providerId": "rsa", + "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", + "config": { + "privateKey": [ + "**********" + ], + "certificate": [ + "foo" + ], + "active": [ + "true" + ], + "priority": [ + "122" + ], + "enabled": [ + "true" + ], + "algorithm": [ + "RS256" + ] + } + } + ] + # get before_comp, get default_mapper, get after_mapper + return_value_components_get = [ + [], [], [] + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 1) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # must not contain parent_id + mock_create_component.assert_called_once_with({ + 'name': 'testkey', + 'providerId': 'rsa', + 'providerType': 'org.keycloak.keys.KeyProvider', + 'config': { + 'priority': ['0'], + 'enabled': ['true'], + 'privateKey': ['privatekey'], + 'algorithm': ['RS256'], + 'certificate': ['foo'], + 'active': ['true'], + }, + }, 'realm-name') + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present(self): + """Update existing realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'testkey', + 'state': 'present', + 'provider_id': 'rsa', + 'config': { + 'priority': 0, + 'enabled': True, + 'private_key': 'privatekey', + 'algorithm': 'RS256', + 'certificate': 'foo', + }, + } + return_value_components_get = [ + [ + + { + "id": "c1a957aa-3df0-4f70-9418-44202bf4ae1f", + "name": "testkey", + "providerId": "rsa", + "providerType": "org.keycloak.keys.KeyProvider", + "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", + "config": { + "privateKey": [ + "**********" + ], + "certificate": [ + "foo" + ], + "active": [ + "true" + ], + "priority": [ + "122" + ], + "enabled": [ + "true" + ], + "algorithm": [ + "RS256" + ] + } + }, + ], + [], + [] + ] + return_value_component_update = [ + None + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, + update_component=return_value_component_update) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 1) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_absent(self): + """Remove an absent realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'testkey', + 'state': 'absent', + 'provider_id': 'rsa', + 'config': { + 'priority': 0, + 'enabled': True, + 'private_key': 'privatekey', + 'algorithm': 'RS256', + 'certificate': 'foo', + }, + } + return_value_components_get = [ + [] + ] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_present(self): + """Remove an existing realm key""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'name': 'testkey', + 'state': 'absent', + 'provider_id': 'rsa', + 'config': { + 'priority': 0, + 'enabled': True, + 'private_key': 'privatekey', + 'algorithm': 'RS256', + 'certificate': 'foo', + }, + } + + return_value_components_get = [ + [ + + { + "id": "c1a957aa-3df0-4f70-9418-44202bf4ae1f", + "name": "testkey", + "providerId": "rsa", + "providerType": "org.keycloak.keys.KeyProvider", + "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", + "config": { + "privateKey": [ + "**********" + ], + "certificate": [ + "foo" + ], + "active": [ + "true" + ], + "priority": [ + "122" + ], + "enabled": [ + "true" + ], + "algorithm": [ + "RS256" + ] + } + }, + ], + [], + [] + ] + return_value_component_delete = [ + None + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py b/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py new file mode 100644 index 0000000000..14d36f6aab --- /dev/null +++ b/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from contextlib import contextmanager +from itertools import count + +from ansible.module_utils.six import StringIO +from ansible_collections.community.general.plugins.modules import \ + keycloak_realm_keys_metadata_info +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, ModuleTestCase, set_module_args) + + +@contextmanager +def patch_keycloak_api(side_effect): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_realm_keys_metadata_info.KeycloakAPI + with patch.object(obj, "get_realm_keys_metadata_by_id", side_effect=side_effect) as obj_mocked: + yield obj_mocked + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count + ) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count + ) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs["method"] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + + def _create_wrapper(): + return StringIO(text_as_string) + + return _create_wrapper + + +def mock_good_connection(): + token_response = { + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } + return patch( + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", + side_effect=build_mocked_request(count(), token_response), + autospec=True, + ) + + +class TestKeycloakRealmRole(ModuleTestCase): + def setUp(self): + super(TestKeycloakRealmRole, self).setUp() + self.module = keycloak_realm_keys_metadata_info + + def test_get_public_info(self): + """Get realm public info""" + + module_args = { + "auth_keycloak_url": "http://keycloak.url/auth", + "token": "{{ access_token }}", + "realm": "my-realm", + } + return_value = [ + { + "active": { + "AES": "aba3778d-d69d-4240-a578-a30720dbd3ca", + "HS512": "6e4fe29d-a7e4-472b-a348-298d8ae45dcc", + "RS256": "jaON84xLYg2fsKiV4p3wZag_S8MTjAp-dkpb1kRqzEs", + "RSA-OAEP": "3i_GikMqBBxtqhWXwpucxMvwl55jYlhiNIvxDTgNAEk", + }, + "keys": [ + { + "algorithm": "HS512", + "kid": "6e4fe29d-a7e4-472b-a348-298d8ae45dcc", + "providerId": "225dbe0b-3fc4-4e0d-8479-90a0cbc8adf7", + "providerPriority": 100, + "status": "ACTIVE", + "type": "OCT", + "use": "SIG", + }, + { + "algorithm": "RS256", + "certificate": "MIIC…", + "kid": "jaON84xLYg2fsKiV4p3wZag_S8MTjAp-dkpb1kRqzEs", + "providerId": "98c1ebeb-c690-4c5c-8b32-81bebe264cda", + "providerPriority": 100, + "publicKey": "MIIB…", + "status": "ACTIVE", + "type": "RSA", + "use": "SIG", + "validTo": 2034748624000, + }, + { + "algorithm": "AES", + "kid": "aba3778d-d69d-4240-a578-a30720dbd3ca", + "providerId": "99c70057-9b8d-4177-a83c-de2d081139e8", + "providerPriority": 100, + "status": "ACTIVE", + "type": "OCT", + "use": "ENC", + }, + { + "algorithm": "RSA-OAEP", + "certificate": "MIIC…", + "kid": "3i_GikMqBBxtqhWXwpucxMvwl55jYlhiNIvxDTgNAEk", + "providerId": "ab3de3fb-a32d-4be8-8324-64aa48d14c36", + "providerPriority": 100, + "publicKey": "MIIB…", + "status": "ACTIVE", + "type": "RSA", + "use": "ENC", + "validTo": 2034748625000, + }, + ], + } + ] + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(side_effect=return_value) as ( + mock_get_realm_keys_metadata_by_id + ): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + result = exec_info.exception.args[0] + self.assertIs(result["changed"], False) + self.assertEqual( + result["msg"], "Get realm keys metadata successful for ID my-realm" + ) + self.assertEqual(result["keys_metadata"], return_value[0]) + + self.assertEqual(len(mock_get_realm_keys_metadata_by_id.mock_calls), 1) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_user_federation.py b/tests/unit/plugins/modules/test_keycloak_user_federation.py index 523ef9f210..81fd65e108 100644 --- a/tests/unit/plugins/modules/test_keycloak_user_federation.py +++ b/tests/unit/plugins/modules/test_keycloak_user_federation.py @@ -144,8 +144,9 @@ class TestKeycloakUserFederation(ModuleTestCase): } } ] + # get before_comp, get default_mapper, get after_mapper return_value_components_get = [ - [], [] + [], [], [] ] changed = True @@ -159,7 +160,7 @@ class TestKeycloakUserFederation(ModuleTestCase): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() - self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_components.mock_calls), 3) self.assertEqual(len(mock_get_component.mock_calls), 0) self.assertEqual(len(mock_create_component.mock_calls), 1) self.assertEqual(len(mock_update_component.mock_calls), 0) @@ -228,6 +229,7 @@ class TestKeycloakUserFederation(ModuleTestCase): } } ], + [], [] ] return_value_component_get = [ @@ -281,7 +283,7 @@ class TestKeycloakUserFederation(ModuleTestCase): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() - self.assertEqual(len(mock_get_components.mock_calls), 2) + self.assertEqual(len(mock_get_components.mock_calls), 3) self.assertEqual(len(mock_get_component.mock_calls), 1) self.assertEqual(len(mock_create_component.mock_calls), 0) self.assertEqual(len(mock_update_component.mock_calls), 1) @@ -344,7 +346,47 @@ class TestKeycloakUserFederation(ModuleTestCase): ] } return_value_components_get = [ - [], [] + [], + # exemplary default mapper created by keylocak + [ + { + "config": { + "always.read.value.from.ldap": "false", + "is.mandatory.in.ldap": "false", + "ldap.attribute": "mail", + "read.only": "true", + "user.model.attribute": "email" + }, + "id": "77e1763f-c51a-4286-bade-75577d64803c", + "name": "email", + "parentId": "e5f48aa3-b56b-4983-a8ad-2c7b8b5e77cb", + "providerId": "user-attribute-ldap-mapper", + "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + }, + ], + [ + { + "id": "2dfadafd-8b34-495f-a98b-153e71a22311", + "name": "full name", + "providerId": "full-name-ldap-mapper", + "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper", + "parentId": "eb691537-b73c-4cd8-b481-6031c26499d8", + "config": { + "ldap.full.name.attribute": [ + "cn" + ], + "read.only": [ + "true" + ], + "write.only": [ + "false" + ] + } + } + ] + ] + return_value_component_delete = [ + None ] return_value_component_create = [ { @@ -462,11 +504,11 @@ class TestKeycloakUserFederation(ModuleTestCase): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() - self.assertEqual(len(mock_get_components.mock_calls), 2) + self.assertEqual(len(mock_get_components.mock_calls), 3) self.assertEqual(len(mock_get_component.mock_calls), 0) self.assertEqual(len(mock_create_component.mock_calls), 2) self.assertEqual(len(mock_update_component.mock_calls), 0) - self.assertEqual(len(mock_delete_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 1) # Verify that the module's changed status matches what is expected self.assertIs(exec_info.exception.args[0]['changed'], changed) diff --git a/tests/unit/plugins/modules/test_keycloak_userprofile.py b/tests/unit/plugins/modules/test_keycloak_userprofile.py new file mode 100644 index 0000000000..3ae01bbb8b --- /dev/null +++ b/tests/unit/plugins/modules/test_keycloak_userprofile.py @@ -0,0 +1,868 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2024, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules import keycloak_userprofile + +from itertools import count + +from json import dumps + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_components=None, get_component=None, create_component=None, update_component=None, delete_component=None): + """Mock context manager for patching the methods in KeycloakAPI + """ + + obj = keycloak_userprofile.KeycloakAPI + with patch.object(obj, 'get_components', side_effect=get_components) as mock_get_components: + with patch.object(obj, 'get_component', side_effect=get_component) as mock_get_component: + with patch.object(obj, 'create_component', side_effect=create_component) as mock_create_component: + with patch.object(obj, 'update_component', side_effect=update_component) as mock_update_component: + with patch.object(obj, 'delete_component', side_effect=delete_component) as mock_delete_component: + yield mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response(object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), + } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakUserprofile(ModuleTestCase): + def setUp(self): + super(TestKeycloakUserprofile, self).setUp() + self.module = keycloak_userprofile + + def test_create_when_absent(self): + """Add a new userprofile""" + + module_args = { + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "state": "present", + "provider_id": "declarative-user-profile", + "config": { + "kc_user_profile_config": [ + { + "attributes": [ + { + "annotations": {}, + "displayName": "${username}", + "multivalued": False, + "name": "username", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": None, + "validations": { + "length": { + "max": 255, + "min": 3 + }, + "up_username_not_idn_homograph": {}, + "username_prohibited_characters": {} + } + }, + { + "annotations": {}, + "displayName": "${email}", + "multivalued": False, + "name": "email", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "email": {}, + "length": { + "max": 255 + } + } + }, + { + "annotations": {}, + "displayName": "${firstName}", + "multivalued": False, + "name": "firstName", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "length": { + "max": 255 + }, + "person_name_prohibited_characters": {} + } + }, + { + "annotations": {}, + "displayName": "${lastName}", + "multivalued": False, + "name": "lastName", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "length": { + "max": 255 + }, + "person_name_prohibited_characters": {} + } + } + ], + "groups": [ + { + "displayDescription": "Attributes, which refer to user metadata", + "displayHeader": "User metadata", + "name": "user-metadata" + } + ], + } + ] + } + } + return_value_component_create = [ + { + "id": "4ba43451-6bb4-4b50-969f-e890539f15e3", + "parentId": "realm-name", + "providerId": "declarative-user-profile", + "providerType": "org.keycloak.userprofile.UserProfileProvider", + "config": { + "kc.user.profile.config": [ + { + "attributes": [ + { + "name": "username", + "displayName": "${username}", + "validations": { + "length": { + "min": 3, + "max": 255 + }, + "username-prohibited-characters": {}, + "up-username-not-idn-homograph": {} + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {}, + "required": None + }, + { + "name": "email", + "displayName": "${email}", + "validations": { + "email": {}, + "length": { + "max": 255 + } + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "firstName", + "displayName": "${firstName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "lastName", + "displayName": "${lastName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + + + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + } + ], + "groups": [ + { + "name": "user-metadata", + "displayHeader": "User metadata", + "displayDescription": "Attributes, which refer to user metadata", + } + ], + } + ] + } + } + ] + return_value_get_components_get = [ + [], [] + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get, create_component=return_value_component_create) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 1) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present(self): + """Update existing userprofile""" + + module_args = { + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "state": "present", + "provider_id": "declarative-user-profile", + "config": { + "kc_user_profile_config": [ + { + "attributes": [ + { + "annotations": {}, + "displayName": "${username}", + "multivalued": False, + "name": "username", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": None, + "validations": { + "length": { + "max": 255, + "min": 3 + }, + "up_username_not_idn_homograph": {}, + "username_prohibited_characters": {} + } + }, + { + "annotations": {}, + "displayName": "${email}", + "multivalued": False, + "name": "email", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "email": {}, + "length": { + "max": 255 + } + } + }, + { + "annotations": {}, + "displayName": "${firstName}", + "multivalued": False, + "name": "firstName", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "length": { + "max": 255 + }, + "person_name_prohibited_characters": {} + } + }, + { + "annotations": {}, + "displayName": "${lastName}", + "multivalued": False, + "name": "lastName", + "permissions": { + "edit": [ + "admin", + "user" + ], + "view": [ + "admin", + "user" + ] + }, + "required": { + "roles": [ + "user" + ] + }, + "validations": { + "length": { + "max": 255 + }, + "person_name_prohibited_characters": {} + } + } + ], + "groups": [ + { + "displayDescription": "Attributes, which refer to user metadata", + "displayHeader": "User metadata", + "name": "user-metadata" + } + ], + } + ] + } + } + return_value_get_components_get = [ + [ + { + "id": "4ba43451-6bb4-4b50-969f-e890539f15e3", + "parentId": "realm-1", + "providerId": "declarative-user-profile", + "providerType": "org.keycloak.userprofile.UserProfileProvider", + "config": { + "kc.user.profile.config": [ + dumps({ + "attributes": [ + { + "name": "username", + "displayName": "${username}", + "validations": { + "length": { + "min": 3, + "max": 255 + }, + "username-prohibited-characters": {}, + "up-username-not-idn-homograph": {} + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {}, + "required": None + }, + { + "name": "email", + "displayName": "${email}", + "validations": { + "email": {}, + "length": { + "max": 255 + } + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "firstName", + "displayName": "${firstName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "lastName", + "displayName": "${lastName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + } + ], + "groups": [ + { + "name": "user-metadata", + "displayHeader": "User metadata", + "displayDescription": "Attributes, which refer to user metadata", + } + ], + }) + ] + } + } + ], + [] + ] + return_value_component_update = [ + None + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get, + update_component=return_value_component_update) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 1) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_absent(self): + """Remove an absent userprofile""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'provider_id': 'declarative-user-profile', + 'state': 'absent', + } + return_value_get_components_get = [ + [] + ] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_present(self): + """Remove an existing userprofile""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'parent_id': 'realm-name', + 'provider_id': 'declarative-user-profile', + 'state': 'absent', + } + return_value_get_components_get = [ + [ + { + "id": "4ba43451-6bb4-4b50-969f-e890539f15e3", + "parentId": "realm-1", + "providerId": "declarative-user-profile", + "providerType": "org.keycloak.userprofile.UserProfileProvider", + "config": { + "kc.user.profile.config": [ + dumps({ + "attributes": [ + { + "name": "username", + "displayName": "${username}", + "validations": { + "length": { + "min": 3, + "max": 255 + }, + "username-prohibited-characters": {}, + "up-username-not-idn-homograph": {} + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {}, + "required": None + }, + { + "name": "email", + "displayName": "${email}", + "validations": { + "email": {}, + "length": { + "max": 255 + } + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "firstName", + "displayName": "${firstName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + }, + { + "name": "lastName", + "displayName": "${lastName}", + "validations": { + "length": { + "max": 255 + }, + "person-name-prohibited-characters": {} + }, + "required": { + "roles": [ + "user" + ] + }, + "permissions": { + "view": [ + "admin", + "user" + ], + "edit": [ + "admin", + "user" + ] + }, + "multivalued": False, + "annotations": {} + } + ], + "groups": [ + { + "name": "user-metadata", + "displayHeader": "User metadata", + "displayDescription": "Attributes, which refer to user metadata", + } + ], + }) + ] + } + } + ], + [] + ] + return_value_component_delete = [ + None + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get, delete_component=return_value_component_delete) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_components.mock_calls), 1) + self.assertEqual(len(mock_get_component.mock_calls), 0) + self.assertEqual(len(mock_create_component.mock_calls), 0) + self.assertEqual(len(mock_update_component.mock_calls), 0) + self.assertEqual(len(mock_delete_component.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/unit/plugins/modules/test_nmcli.py b/tests/unit/plugins/modules/test_nmcli.py index 8c9c007ace..570b04d56f 100644 --- a/tests/unit/plugins/modules/test_nmcli.py +++ b/tests/unit/plugins/modules/test_nmcli.py @@ -4251,6 +4251,7 @@ def test_bond_connection_unchanged(mocked_generic_connection_diff_check, capfd): autoconnect=dict(type='bool', default=True), state=dict(type='str', required=True, choices=['absent', 'present']), conn_name=dict(type='str', required=True), + conn_reload=dict(type='bool', required=False, default=False), master=dict(type='str'), slave_type=dict(type=str, choices=['bond', 'bridge', 'team']), ifname=dict(type='str'), diff --git a/tests/unit/plugins/modules/test_opkg.py b/tests/unit/plugins/modules/test_opkg.py index c42025959e..cfee3e1115 100644 --- a/tests/unit/plugins/modules/test_opkg.py +++ b/tests/unit/plugins/modules/test_opkg.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import opkg -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(opkg, __name__) diff --git a/tests/unit/plugins/modules/test_opkg.yaml b/tests/unit/plugins/modules/test_opkg.yaml index 6e227dea27..0cef54ac08 100644 --- a/tests/unit/plugins/modules/test_opkg.yaml +++ b/tests/unit/plugins/modules/test_opkg.yaml @@ -10,7 +10,8 @@ state: present output: msg: installed 1 package(s) - run_command_calls: + mocks: + run_command: - command: [/testbin/opkg, list-installed, zlib-dev] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} rc: 0 @@ -39,7 +40,8 @@ state: present output: msg: package(s) already present - run_command_calls: + mocks: + run_command: - command: [/testbin/opkg, list-installed, zlib-dev] environ: *env-def rc: 0 @@ -53,7 +55,8 @@ force: reinstall output: msg: installed 1 package(s) - run_command_calls: + mocks: + run_command: - command: [/testbin/opkg, list-installed, zlib-dev] environ: *env-def rc: 0 @@ -80,7 +83,8 @@ state: present output: msg: installed 1 package(s) - run_command_calls: + mocks: + run_command: - command: [/testbin/opkg, list-installed, zlib-dev] environ: *env-def rc: 0 @@ -109,7 +113,8 @@ update_cache: true output: msg: installed 1 package(s) - run_command_calls: + mocks: + run_command: - command: [/testbin/opkg, update] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_puppet.py b/tests/unit/plugins/modules/test_puppet.py index 57f88ada1c..efdb042a5a 100644 --- a/tests/unit/plugins/modules/test_puppet.py +++ b/tests/unit/plugins/modules/test_puppet.py @@ -14,7 +14,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import puppet -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(puppet, __name__) diff --git a/tests/unit/plugins/modules/test_puppet.yaml b/tests/unit/plugins/modules/test_puppet.yaml index 7909403cfb..668571273c 100644 --- a/tests/unit/plugins/modules/test_puppet.yaml +++ b/tests/unit/plugins/modules/test_puppet.yaml @@ -8,27 +8,28 @@ input: {} output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" environ: *env-def rc: 0 out: "" @@ -38,28 +39,29 @@ certname: potatobox output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --certname=potatobox + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --certname=potatobox environ: *env-def rc: 0 out: "" @@ -69,29 +71,30 @@ tags: [a, b, c] output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --tags - - a,b,c + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --tags + - a,b,c environ: *env-def rc: 0 out: "" @@ -101,29 +104,30 @@ skip_tags: [d, e, f] output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --skip_tags - - d,e,f + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --skip_tags + - d,e,f environ: *env-def rc: 0 out: "" @@ -133,28 +137,29 @@ noop: false output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --no-noop + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --no-noop environ: *env-def rc: 0 out: "" @@ -164,28 +169,29 @@ noop: true output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --noop + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --noop environ: *env-def rc: 0 out: "" @@ -195,29 +201,30 @@ waitforlock: 30 output: changed: false - run_command_calls: + mocks: + run_command: - command: [/testbin/puppet, config, print, agent_disabled_lockfile] environ: *env-def rc: 0 out: "blah, anything" err: "" - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --waitforlock - - "30" + - /testbin/timeout + - -s + - "9" + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - "0" + - --waitforlock + - "30" environ: *env-def rc: 0 out: "" diff --git a/tests/unit/plugins/modules/test_redhat_subscription.py b/tests/unit/plugins/modules/test_redhat_subscription.py index 9473d0d46f..eec8e5cb7b 100644 --- a/tests/unit/plugins/modules/test_redhat_subscription.py +++ b/tests/unit/plugins/modules/test_redhat_subscription.py @@ -199,11 +199,6 @@ TEST_CASES = [ {'check_rc': False}, (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '') ), - ( - ['/testbin/subscription-manager', 'remove', '--all'], - {'check_rc': True}, - (0, '', '') - ), ( ['/testbin/subscription-manager', 'unregister'], {'check_rc': True}, diff --git a/tests/unit/plugins/modules/test_redis_info.py b/tests/unit/plugins/modules/test_redis_info.py index cdc78680e5..831b8f4052 100644 --- a/tests/unit/plugins/modules/test_redis_info.py +++ b/tests/unit/plugins/modules/test_redis_info.py @@ -55,6 +55,8 @@ class TestRedisInfoModule(ModuleTestCase): 'password': None, 'ssl': False, 'ssl_ca_certs': None, + 'ssl_certfile': None, + 'ssl_keyfile': None, 'ssl_cert_reqs': 'required'},)) self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999') @@ -74,6 +76,8 @@ class TestRedisInfoModule(ModuleTestCase): 'password': 'PASS', 'ssl': False, 'ssl_ca_certs': None, + 'ssl_certfile': None, + 'ssl_keyfile': None, 'ssl_cert_reqs': 'required'},)) self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999') @@ -87,6 +91,8 @@ class TestRedisInfoModule(ModuleTestCase): 'login_password': 'PASS', 'tls': True, 'ca_certs': '/etc/ssl/ca.pem', + 'client_cert_file': '/etc/ssl/client.pem', + 'client_key_file': '/etc/ssl/client.key', 'validate_certs': False }) self.module.main() @@ -96,6 +102,8 @@ class TestRedisInfoModule(ModuleTestCase): 'password': 'PASS', 'ssl': True, 'ssl_ca_certs': '/etc/ssl/ca.pem', + 'ssl_certfile': '/etc/ssl/client.pem', + 'ssl_keyfile': '/etc/ssl/client.key', 'ssl_cert_reqs': None},)) self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999') diff --git a/tests/unit/plugins/modules/test_snap.py b/tests/unit/plugins/modules/test_snap.py index 480f637b6d..d70094551a 100644 --- a/tests/unit/plugins/modules/test_snap.py +++ b/tests/unit/plugins/modules/test_snap.py @@ -6,8 +6,10 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from .helper import Helper, ModuleTestCase, RunCmdCall +import sys + from ansible_collections.community.general.plugins.modules import snap +from .helper import Helper, RunCommandMock # pylint: disable=unused-import issue_6803_status_out = """Name Version Rev Tracking Publisher Notes @@ -375,100 +377,102 @@ issue_6803_kubectl_out = ( ) TEST_CASES = [ - ModuleTestCase( + dict( id="simple case", input={"name": ["hello-world"]}, output=dict(changed=True, snaps_installed=["hello-world"]), flags={}, - run_command_calls=[ - RunCmdCall( - command=['/testbin/snap', 'info', 'hello-world'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out='name: hello-world\n', - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out="", - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'install', 'hello-world'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out="hello-world (12345/stable) v12345 from Canonical** installed\n", - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=( - "Name Version Rev Tracking Publisher Notes" - "core20 20220826 1623 latest/stable canonical** base" - "lxd 5.6-794016a 23680 latest/stable/… canonical** -" - "hello-world 5.6-794016a 23680 latest/stable/… canonical** -" - "snapd 2.57.4 17336 latest/stable canonical** snapd" - ""), - err="", - ), - ] + mocks=dict( + run_command=[ + dict( + command=['/testbin/snap', 'info', 'hello-world'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out='name: hello-world\n', + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out="", + err="", + ), + dict( + command=['/testbin/snap', 'install', 'hello-world'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out="hello-world (12345/stable) v12345 from Canonical** installed\n", + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out=( + "Name Version Rev Tracking Publisher Notes" + "core20 20220826 1623 latest/stable canonical** base" + "lxd 5.6-794016a 23680 latest/stable/… canonical** -" + "hello-world 5.6-794016a 23680 latest/stable/… canonical** -" + "snapd 2.57.4 17336 latest/stable canonical** snapd" + ""), + err="", + ), + ], + ), ), - ModuleTestCase( + dict( id="issue_6803", input={"name": ["microk8s", "kubectl"], "classic": True}, output=dict(changed=True, snaps_installed=["microk8s", "kubectl"]), flags={}, - run_command_calls=[ - RunCmdCall( - command=['/testbin/snap', 'info', 'microk8s', 'kubectl'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out='name: microk8s\n---\nname: kubectl\n', - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=issue_6803_status_out, - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'install', '--classic', 'microk8s'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=issue_6803_microk8s_out, - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'install', '--classic', 'kubectl'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=issue_6803_kubectl_out, - err="", - ), - RunCmdCall( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=( - "Name Version Rev Tracking Publisher Notes" - "core20 20220826 1623 latest/stable canonical** base" - "lxd 5.6-794016a 23680 latest/stable/… canonical** -" - "microk8s 5.6-794016a 23680 latest/stable/… canonical** -" - "kubectl 5.6-794016a 23680 latest/stable/… canonical** -" - "snapd 2.57.4 17336 latest/stable canonical** snapd" - ""), - err="", - ), - ] + mocks=dict( + run_command=[ + dict( + command=['/testbin/snap', 'info', 'microk8s', 'kubectl'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out='name: microk8s\n---\nname: kubectl\n', + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out=issue_6803_status_out, + err="", + ), + dict( + command=['/testbin/snap', 'install', '--classic', 'microk8s'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out=issue_6803_microk8s_out, + err="", + ), + dict( + command=['/testbin/snap', 'install', '--classic', 'kubectl'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out=issue_6803_kubectl_out, + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, + rc=0, + out=( + "Name Version Rev Tracking Publisher Notes" + "core20 20220826 1623 latest/stable canonical** base" + "lxd 5.6-794016a 23680 latest/stable/… canonical** -" + "microk8s 5.6-794016a 23680 latest/stable/… canonical** -" + "kubectl 5.6-794016a 23680 latest/stable/… canonical** -" + "snapd 2.57.4 17336 latest/stable canonical** snapd" + ""), + err="", + ), + ], + ), ), ] -helper = Helper.from_list(snap.main, TEST_CASES) -patch_bin = helper.cmd_fixture -test_module = helper.test_module +Helper.from_list(sys.modules[__name__], snap, TEST_CASES) diff --git a/tests/unit/plugins/modules/test_wdc_redfish_command.py b/tests/unit/plugins/modules/test_wdc_redfish_command.py index 332b976f70..0775ac73dd 100644 --- a/tests/unit/plugins/modules/test_wdc_redfish_command.py +++ b/tests/unit/plugins/modules/test_wdc_redfish_command.py @@ -289,7 +289,7 @@ def mock_get_firmware_inventory_version_1_2_3(*args, **kwargs): } -ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION = "Unable to extract bundle version or multi-tenant status from update image tarfile" +ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION = "Unable to extract bundle version or multi-tenant status or generation from update image file" ACTION_WAS_SUCCESSFUL_MESSAGE = "Action was successful" diff --git a/tests/unit/plugins/modules/test_xfconf.py b/tests/unit/plugins/modules/test_xfconf.py index fbc2dae5f2..f902797ee3 100644 --- a/tests/unit/plugins/modules/test_xfconf.py +++ b/tests/unit/plugins/modules/test_xfconf.py @@ -14,7 +14,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import xfconf -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(xfconf, __name__) diff --git a/tests/unit/plugins/modules/test_xfconf.yaml b/tests/unit/plugins/modules/test_xfconf.yaml index 908154df26..481b090e94 100644 --- a/tests/unit/plugins/modules/test_xfconf.yaml +++ b/tests/unit/plugins/modules/test_xfconf.yaml @@ -21,7 +21,8 @@ previous_value: '100' type: int value: '90' - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} rc: 0 @@ -44,7 +45,8 @@ previous_value: '90' type: int value: '90' - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] environ: *env-def rc: 0 @@ -61,13 +63,14 @@ property: /general/SaveOnExit state: present value_type: bool - value: False + value: false output: changed: true previous_value: 'true' type: bool value: 'False' - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfce4-session, --property, /general/SaveOnExit] environ: *env-def rc: 0 @@ -90,32 +93,33 @@ previous_value: [Main, Work, Tmp] type: [string, string, string] value: [A, B, C] - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 out: "Value is an array with 3 items:\n\nMain\nWork\nTmp\n" err: "" - command: - - /testbin/xfconf-query - - --channel - - xfwm4 - - --property - - /general/workspace_names - - --create - - --force-array - - --type - - string - - --set - - A - - --type - - string - - --set - - B - - --type - - string - - --set - - C + - /testbin/xfconf-query + - --channel + - xfwm4 + - --property + - /general/workspace_names + - --create + - --force-array + - --type + - string + - --set + - A + - --type + - string + - --set + - B + - --type + - string + - --set + - C environ: *env-def rc: 0 out: "" @@ -132,32 +136,33 @@ previous_value: [A, B, C] type: [string, string, string] value: [A, B, C] - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 out: "Value is an array with 3 items:\n\nA\nB\nC\n" err: "" - command: - - /testbin/xfconf-query - - --channel - - xfwm4 - - --property - - /general/workspace_names - - --create - - --force-array - - --type - - string - - --set - - A - - --type - - string - - --set - - B - - --type - - string - - --set - - C + - /testbin/xfconf-query + - --channel + - xfwm4 + - --property + - /general/workspace_names + - --create + - --force-array + - --type + - string + - --set + - A + - --type + - string + - --set + - B + - --type + - string + - --set + - C environ: *env-def rc: 0 out: "" @@ -170,9 +175,10 @@ output: changed: true previous_value: [A, B, C] - type: null - value: null - run_command_calls: + type: + value: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 diff --git a/tests/unit/plugins/modules/test_xfconf_info.py b/tests/unit/plugins/modules/test_xfconf_info.py index 67c63dda09..308f075490 100644 --- a/tests/unit/plugins/modules/test_xfconf_info.py +++ b/tests/unit/plugins/modules/test_xfconf_info.py @@ -7,7 +7,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import xfconf_info -from .helper import Helper +from .helper import Helper, RunCommandMock # pylint: disable=unused-import Helper.from_module(xfconf_info, __name__) diff --git a/tests/unit/plugins/modules/test_xfconf_info.yaml b/tests/unit/plugins/modules/test_xfconf_info.yaml index 519a87fdbd..26f77ce474 100644 --- a/tests/unit/plugins/modules/test_xfconf_info.yaml +++ b/tests/unit/plugins/modules/test_xfconf_info.yaml @@ -11,7 +11,8 @@ output: value: '100' is_array: false - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} rc: 0 @@ -22,7 +23,8 @@ channel: xfwm4 property: /general/i_dont_exist output: {} - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/i_dont_exist] environ: *env-def rc: 1 @@ -34,7 +36,8 @@ output: failed: true msg: "missing parameter(s) required by 'property': channel" - run_command_calls: [] + mocks: + run_command: [] - id: test_property_get_array input: channel: xfwm4 @@ -42,7 +45,8 @@ output: is_array: true value_array: [Main, Work, Tmp] - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] environ: *env-def rc: 0 @@ -52,7 +56,8 @@ input: {} output: channels: [a, b, c] - run_command_calls: + mocks: + run_command: - command: [/testbin/xfconf-query, --list] environ: *env-def rc: 0 @@ -63,13 +68,14 @@ channel: xfwm4 output: properties: - - /general/wrap_cycle - - /general/wrap_layout - - /general/wrap_resistance - - /general/wrap_windows - - /general/wrap_workspaces - - /general/zoom_desktop - run_command_calls: + - /general/wrap_cycle + - /general/wrap_layout + - /general/wrap_resistance + - /general/wrap_windows + - /general/wrap_workspaces + - /general/zoom_desktop + mocks: + run_command: - command: [/testbin/xfconf-query, --list, --channel, xfwm4] environ: *env-def rc: 0