diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index b9d428f2b0..455d0659b4 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -29,22 +29,20 @@ schedules: always: true branches: include: + - stable-11 - stable-10 - - stable-9 - cron: 0 11 * * 0 displayName: Weekly (old stable branches) always: true branches: include: - - stable-8 + - stable-9 variables: - name: checkoutPath value: ansible_collections/community/general - name: coverageBranches value: main - - name: pipelinesCoverage - value: coverage - name: entryPoint value: tests/utils/shippable/shippable.sh - name: fetchDepth @@ -53,7 +51,7 @@ variables: resources: containers: - container: default - image: quay.io/ansible/azure-pipelines-test-container:6.0.0 + image: quay.io/ansible/azure-pipelines-test-container:7.0.0 pool: Standard @@ -72,7 +70,19 @@ stages: - test: 2 - test: 3 - test: 4 - - test: extra + - stage: Sanity_2_19 + displayName: Sanity 2.19 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Test {0} + testFormat: 2.19/sanity/{0} + targets: + - test: 1 + - test: 2 + - test: 3 + - test: 4 - stage: Sanity_2_18 displayName: Sanity 2.18 dependsOn: [] @@ -99,19 +109,6 @@ stages: - test: 2 - test: 3 - test: 4 - - stage: Sanity_2_16 - displayName: Sanity 2.16 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Test {0} - testFormat: 2.16/sanity/{0} - targets: - - test: 1 - - test: 2 - - test: 3 - - test: 4 ### Units - stage: Units_devel displayName: Units devel @@ -122,12 +119,24 @@ stages: nameFormat: Python {0} testFormat: devel/units/{0}/1 targets: - - test: 3.8 - test: 3.9 - test: '3.10' - test: '3.11' - test: '3.12' - test: '3.13' + - test: '3.14' + - stage: Units_2_19 + displayName: Units 2.19 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: 2.19/units/{0}/1 + targets: + - test: 3.8 + - test: "3.11" + - test: "3.13" - stage: Units_2_18 displayName: Units 2.18 dependsOn: [] @@ -138,6 +147,7 @@ stages: testFormat: 2.18/units/{0}/1 targets: - test: 3.8 + - test: "3.11" - test: "3.13" - stage: Units_2_17 displayName: Units 2.17 @@ -149,19 +159,8 @@ stages: testFormat: 2.17/units/{0}/1 targets: - test: 3.7 + - test: "3.10" - test: "3.12" - - stage: Units_2_16 - displayName: Units 2.16 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.16/units/{0}/1 - targets: - - test: 2.7 - - test: 3.6 - - test: "3.11" ## Remote - stage: Remote_devel_extra_vms @@ -172,10 +171,10 @@ stages: parameters: testFormat: devel/{0} targets: - - name: Alpine 3.20 - test: alpine/3.20 - # - name: Fedora 40 - # test: fedora/40 + - name: Alpine 3.22 + test: alpine/3.22 + # - name: Fedora 42 + # test: fedora/42 - name: Ubuntu 22.04 test: ubuntu/22.04 - name: Ubuntu 24.04 @@ -190,14 +189,34 @@ stages: parameters: testFormat: devel/{0} targets: - - name: macOS 14.3 - test: macos/14.3 - - name: RHEL 9.4 - test: rhel/9.4 - - name: FreeBSD 14.1 - test: freebsd/14.1 - - name: FreeBSD 13.4 - test: freebsd/13.4 + - name: macOS 15.3 + test: macos/15.3 + - name: RHEL 10.0 + test: rhel/10.0 + - name: RHEL 9.6 + test: rhel/9.6 + - name: FreeBSD 14.3 + test: freebsd/14.3 + - name: FreeBSD 13.5 + test: freebsd/13.5 + groups: + - 1 + - 2 + - 3 + - stage: Remote_2_19 + displayName: Remote 2.19 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.19/{0} + targets: + - name: RHEL 9.5 + test: rhel/9.5 + - name: RHEL 10.0 + test: rhel/10.0 + - name: FreeBSD 14.2 + test: freebsd/14.2 groups: - 1 - 2 @@ -210,8 +229,12 @@ stages: parameters: testFormat: 2.18/{0} targets: + - name: macOS 14.3 + test: macos/14.3 - name: RHEL 9.4 test: rhel/9.4 + - name: FreeBSD 14.1 + test: freebsd/14.1 groups: - 1 - 2 @@ -224,34 +247,10 @@ stages: parameters: testFormat: 2.17/{0} targets: - - name: FreeBSD 13.3 - test: freebsd/13.3 + - name: FreeBSD 13.5 + test: freebsd/13.5 - name: RHEL 9.3 test: rhel/9.3 - - name: FreeBSD 14.0 - test: freebsd/14.0 - groups: - - 1 - - 2 - - 3 - - stage: Remote_2_16 - displayName: Remote 2.16 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.16/{0} - targets: - - name: macOS 13.2 - test: macos/13.2 - - name: RHEL 9.2 - test: rhel/9.2 - - name: RHEL 8.8 - test: rhel/8.8 - - name: RHEL 7.9 - test: rhel/7.9 - # - name: FreeBSD 13.2 - # test: freebsd/13.2 groups: - 1 - 2 @@ -266,10 +265,10 @@ stages: parameters: testFormat: devel/linux/{0} targets: - - name: Fedora 40 - test: fedora40 - - name: Alpine 3.20 - test: alpine320 + - name: Fedora 42 + test: fedora42 + - name: Alpine 3.22 + test: alpine322 - name: Ubuntu 22.04 test: ubuntu2204 - name: Ubuntu 24.04 @@ -278,6 +277,22 @@ stages: - 1 - 2 - 3 + - stage: Docker_2_19 + displayName: Docker 2.19 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.19/linux/{0} + targets: + - name: Fedora 41 + test: fedora41 + - name: Alpine 3.21 + test: alpine321 + groups: + - 1 + - 2 + - 3 - stage: Docker_2_18 displayName: Docker 2.18 dependsOn: [] @@ -286,6 +301,10 @@ stages: parameters: testFormat: 2.18/linux/{0} targets: + - name: Fedora 40 + test: fedora40 + - name: Alpine 3.20 + test: alpine320 - name: Ubuntu 24.04 test: ubuntu2404 groups: @@ -310,26 +329,6 @@ stages: - 1 - 2 - 3 - - stage: Docker_2_16 - displayName: Docker 2.16 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.16/linux/{0} - targets: - - name: Fedora 38 - test: fedora38 - - name: openSUSE 15 - test: opensuse15 - - name: Alpine 3 - test: alpine3 - - name: CentOS 7 - test: centos7 - groups: - - 1 - - 2 - - 3 ### Community Docker - stage: Docker_community_devel @@ -362,8 +361,19 @@ stages: # nameFormat: Python {0} # testFormat: devel/generic/{0}/1 # targets: -# - test: '3.8' -# - test: '3.11' +# - test: '3.9' +# - test: '3.12' +# - test: '3.14' +# - stage: Generic_2_19 +# displayName: Generic 2.19 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.19/generic/{0}/1 +# targets: +# - test: '3.9' # - test: '3.13' # - stage: Generic_2_18 # displayName: Generic 2.18 @@ -387,44 +397,32 @@ stages: # targets: # - test: '3.7' # - test: '3.12' -# - stage: Generic_2_16 -# displayName: Generic 2.16 -# dependsOn: [] -# jobs: -# - template: templates/matrix.yml -# parameters: -# nameFormat: Python {0} -# testFormat: 2.16/generic/{0}/1 -# targets: -# - test: '2.7' -# - test: '3.6' -# - test: '3.11' - stage: Summary condition: succeededOrFailed() dependsOn: - Sanity_devel + - Sanity_2_19 - Sanity_2_18 - Sanity_2_17 - - Sanity_2_16 - Units_devel + - Units_2_19 - Units_2_18 - Units_2_17 - - Units_2_16 - Remote_devel_extra_vms - Remote_devel + - Remote_2_19 - Remote_2_18 - Remote_2_17 - - Remote_2_16 - Docker_devel + - Docker_2_19 - Docker_2_18 - Docker_2_17 - - Docker_2_16 - Docker_community_devel # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. # - Generic_devel +# - Generic_2_19 # - Generic_2_18 # - Generic_2_17 -# - Generic_2_16 jobs: - template: templates/coverage.yml diff --git a/.azure-pipelines/templates/coverage.yml b/.azure-pipelines/templates/coverage.yml index 3c8841aa26..1bf17e053a 100644 --- a/.azure-pipelines/templates/coverage.yml +++ b/.azure-pipelines/templates/coverage.yml @@ -28,16 +28,6 @@ jobs: - bash: .azure-pipelines/scripts/report-coverage.sh displayName: Generate Coverage Report condition: gt(variables.coverageFileCount, 0) - - task: PublishCodeCoverageResults@1 - inputs: - codeCoverageTool: Cobertura - # Azure Pipelines only accepts a single coverage data file. - # That means only Python or PowerShell coverage can be uploaded, but not both. - # Set the "pipelinesCoverage" variable to determine which type is uploaded. - # Use "coverage" for Python and "coverage-powershell" for PowerShell. - summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml" - displayName: Publish to Azure Pipelines - condition: gt(variables.coverageFileCount, 0) - bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)" displayName: Publish to codecov.io condition: gt(variables.coverageFileCount, 0) diff --git a/.azure-pipelines/templates/matrix.yml b/.azure-pipelines/templates/matrix.yml index 4876375855..49f5d8595a 100644 --- a/.azure-pipelines/templates/matrix.yml +++ b/.azure-pipelines/templates/matrix.yml @@ -50,11 +50,11 @@ jobs: parameters: jobs: - ${{ if eq(length(parameters.groups), 0) }}: - - ${{ each target in parameters.targets }}: - - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }} - test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }} - - ${{ if not(eq(length(parameters.groups), 0)) }}: - - ${{ each group in parameters.groups }}: - ${{ each target in parameters.targets }}: - - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }} - test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }} + - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }} + test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }} + - ${{ if not(eq(length(parameters.groups), 0)) }}: + - ${{ each group in parameters.groups }}: + - ${{ each target in parameters.targets }}: + - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }} + test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }} diff --git a/.azure-pipelines/templates/test.yml b/.azure-pipelines/templates/test.yml index 700cf629d7..b263379c06 100644 --- a/.azure-pipelines/templates/test.yml +++ b/.azure-pipelines/templates/test.yml @@ -14,37 +14,37 @@ parameters: jobs: - ${{ each job in parameters.jobs }}: - - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }} - displayName: ${{ job.name }} - container: default - workspace: - clean: all - steps: - - checkout: self - fetchDepth: $(fetchDepth) - path: $(checkoutPath) - - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)" - displayName: Run Tests - - bash: .azure-pipelines/scripts/process-results.sh - condition: succeededOrFailed() - displayName: Process Results - - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)" - condition: eq(variables.haveCoverageData, 'true') - displayName: Aggregate Coverage Data - - task: PublishTestResults@2 - condition: eq(variables.haveTestResults, 'true') - inputs: - testResultsFiles: "$(outputPath)/junit/*.xml" - displayName: Publish Test Results - - task: PublishPipelineArtifact@1 - condition: eq(variables.haveBotResults, 'true') - displayName: Publish Bot Results - inputs: - targetPath: "$(outputPath)/bot/" - artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" - - task: PublishPipelineArtifact@1 - condition: eq(variables.haveCoverageData, 'true') - displayName: Publish Coverage Data - inputs: - targetPath: "$(Agent.TempDirectory)/coverage/" - artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" + - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }} + displayName: ${{ job.name }} + container: default + workspace: + clean: all + steps: + - checkout: self + fetchDepth: $(fetchDepth) + path: $(checkoutPath) + - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)" + displayName: Run Tests + - bash: .azure-pipelines/scripts/process-results.sh + condition: succeededOrFailed() + displayName: Process Results + - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)" + condition: eq(variables.haveCoverageData, 'true') + displayName: Aggregate Coverage Data + - task: PublishTestResults@2 + condition: eq(variables.haveTestResults, 'true') + inputs: + testResultsFiles: "$(outputPath)/junit/*.xml" + displayName: Publish Test Results + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveBotResults, 'true') + displayName: Publish Bot Results + inputs: + targetPath: "$(outputPath)/bot/" + artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveCoverageData, 'true') + displayName: Publish Coverage Data + inputs: + targetPath: "$(Agent.TempDirectory)/coverage/" + artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..cd4bdfee65 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,9 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# YAML reformatting +d032de3b16eed11ea3a31cd3d96d78f7c46a2ee0 +e8f965fbf8154ea177c6622da149f2ae8533bd3c +e938ca5f20651abc160ee6aba10014013d04dcc1 +eaa5e07b2866e05b6c7b5628ca92e9cb1142d008 diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 5390d81b27..fac3fae8f8 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -77,6 +77,8 @@ files: $callbacks/opentelemetry.py: keywords: opentelemetry observability maintainers: v1v + $callbacks/print_task.py: + maintainers: demonpig $callbacks/say.py: keywords: brew cask darwin homebrew macosx macports osx labels: macos say @@ -90,6 +92,8 @@ files: maintainers: ryancurrah $callbacks/syslog_json.py: maintainers: imjoseangel + $callbacks/tasks_only.py: + maintainers: felixfontein $callbacks/timestamp.py: maintainers: kurokobo $callbacks/unixy.py: @@ -116,6 +120,8 @@ files: $connections/saltstack.py: labels: saltstack maintainers: mscherer + $connections/wsl.py: + maintainers: rgl $connections/zone.py: maintainers: $team_ansible_core $doc_fragments/: @@ -159,6 +165,14 @@ files: maintainers: Ajpantuso $filters/jc.py: maintainers: kellyjonbrazil + $filters/json_diff.yml: + maintainers: numo68 + $filters/json_patch.py: + maintainers: numo68 + $filters/json_patch.yml: + maintainers: numo68 + $filters/json_patch_recipe.yml: + maintainers: numo68 $filters/json_query.py: {} $filters/keep_keys.py: maintainers: vbotka @@ -195,6 +209,8 @@ files: maintainers: resmo $filters/to_months.yml: maintainers: resmo + $filters/to_prettytable.py: + maintainers: tgadiev $filters/to_seconds.yml: maintainers: resmo $filters/to_time_unit.yml: @@ -230,13 +246,9 @@ files: keywords: opennebula dynamic inventory script labels: cloud opennebula maintainers: feldsam - $inventories/proxmox.py: - maintainers: $team_virt ilijamt krauthosting $inventories/scaleway.py: labels: cloud scaleway maintainers: $team_scaleway - $inventories/stackpath_compute.py: - maintainers: shayrybak $inventories/virtualbox.py: {} $inventories/xen_orchestra.py: maintainers: ddelnano shinuza @@ -280,9 +292,6 @@ files: $lookups/lastpass.py: {} $lookups/lmdb_kv.py: maintainers: jpmens - $lookups/manifold.py: - labels: manifold - maintainers: galanoff $lookups/merge_variables.py: maintainers: rlenferink m-a-r-k-e alpex8 $lookups/onepass: @@ -294,6 +303,8 @@ files: $lookups/onepassword_raw.py: ignore: scottsb maintainers: azenk + $lookups/onepassword_ssh_key.py: + maintainers: mohammedbabelly20 $lookups/passwordstore.py: {} $lookups/random_pet.py: maintainers: Akasurde @@ -363,9 +374,13 @@ files: $module_utils/oracle/oci_utils.py: labels: cloud maintainers: $team_oracle + $module_utils/pacemaker.py: + maintainers: munchtoast $module_utils/pipx.py: labels: pipx maintainers: russoz + $module_utils/pkg_req.py: + maintainers: russoz $module_utils/python_runner.py: maintainers: russoz $module_utils/puppet.py: @@ -387,6 +402,8 @@ files: maintainers: russoz $module_utils/ssh.py: maintainers: russoz + $module_utils/systemd.py: + maintainers: NomakCooper $module_utils/storage/hpe3par/hpe3par.py: maintainers: farhan7500 gautamphegde $module_utils/utm_utils.py: @@ -398,6 +415,8 @@ files: $module_utils/wdc_redfish_utils.py: labels: wdc_redfish_utils maintainers: $team_wdc + $module_utils/xdg_mime.py: + maintainers: mhalano $module_utils/xenserver.py: labels: xenserver maintainers: bvitnik @@ -456,7 +475,7 @@ files: $modules/bearychat.py: maintainers: tonyseek $modules/bigpanda.py: - maintainers: hkariti + ignore: hkariti $modules/bitbucket_: maintainers: catcombo $modules/bootc_manage.py: @@ -481,8 +500,6 @@ files: maintainers: NickatEpic $modules/cisco_webex.py: maintainers: drew-russell - $modules/clc_: - maintainers: clc-runner $modules/cloud_init_data_facts.py: maintainers: resmo $modules/cloudflare_dns.py: @@ -641,8 +658,6 @@ files: maintainers: marns93 $modules/hg.py: maintainers: yeukhon - $modules/hipchat.py: - maintainers: pb8226 shirou $modules/homebrew.py: ignore: ryansb keywords: brew cask darwin homebrew macosx macports osx @@ -768,6 +783,8 @@ files: maintainers: brettmilford unnecessary-username juanmcasanova $modules/jenkins_build_info.py: maintainers: juanmcasanova + $modules/jenkins_credential.py: + maintainers: YoussefKhalidAli $modules/jenkins_job.py: maintainers: sermilrod $modules/jenkins_job_info.py: @@ -884,6 +901,8 @@ files: maintainers: nerzhul $modules/lvg.py: maintainers: abulimov + $modules/lvm_pv.py: + maintainers: klention $modules/lvg_rename.py: maintainers: lszomor $modules/lvol.py: @@ -1038,7 +1057,9 @@ files: $modules/ovh_monthly_billing.py: maintainers: fraff $modules/pacemaker_cluster.py: - maintainers: matbu + maintainers: matbu munchtoast + $modules/pacemaker_resource.py: + maintainers: munchtoast $modules/packet_: maintainers: nurfet-becirevic t0mk $modules/packet_device.py: @@ -1114,34 +1135,6 @@ files: maintainers: $team_bsd berenddeboer $modules/pritunl_: maintainers: Lowess - $modules/profitbricks: - maintainers: baldwinSPC - $modules/proxmox: - keywords: kvm libvirt proxmox qemu - labels: proxmox virt - maintainers: $team_virt UnderGreen krauthosting - ignore: tleguern - $modules/proxmox.py: - ignore: skvidal - maintainers: UnderGreen krauthosting - $modules/proxmox_disk.py: - maintainers: castorsky krauthosting - $modules/proxmox_kvm.py: - ignore: skvidal - maintainers: helldorado krauthosting - $modules/proxmox_backup.py: - maintainers: IamLunchbox - $modules/proxmox_nic.py: - maintainers: Kogelvis krauthosting - $modules/proxmox_node_info.py: - maintainers: jwbernin krauthosting - $modules/proxmox_storage_contents_info.py: - maintainers: l00ptr krauthosting - $modules/proxmox_tasks_info: - maintainers: paginabianca krauthosting - $modules/proxmox_template.py: - ignore: skvidal - maintainers: UnderGreen krauthosting $modules/pubnub_blocks.py: maintainers: parfeon pubnub $modules/pulp_repo.py: @@ -1212,9 +1205,9 @@ files: $modules/scaleway_compute_private_network.py: maintainers: pastral $modules/scaleway_container.py: - maintainers: Lunik + maintainers: Lunik $modules/scaleway_container_info.py: - maintainers: Lunik + maintainers: Lunik $modules/scaleway_container_namespace.py: maintainers: Lunik $modules/scaleway_container_namespace_info.py: @@ -1347,6 +1340,8 @@ files: maintainers: konstruktoid $modules/systemd_creds_encrypt.py: maintainers: konstruktoid + $modules/systemd_info.py: + maintainers: NomakCooper $modules/sysupgrade.py: maintainers: precurse $modules/taiga_issue.py: @@ -1378,16 +1373,19 @@ files: keywords: sophos utm maintainers: $team_e_spirit $modules/utm_ca_host_key_cert.py: - maintainers: stearz + ignore: stearz + maintainers: $team_e_spirit $modules/utm_ca_host_key_cert_info.py: - maintainers: stearz + ignore: stearz + maintainers: $team_e_spirit $modules/utm_network_interface_address.py: maintainers: steamx $modules/utm_network_interface_address_info.py: maintainers: steamx $modules/utm_proxy_auth_profile.py: keywords: sophos utm - maintainers: $team_e_spirit stearz + ignore: stearz + maintainers: $team_e_spirit $modules/utm_proxy_exception.py: keywords: sophos utm maintainers: $team_e_spirit RickS-C137 @@ -1417,6 +1415,8 @@ files: maintainers: dinoocch the-maldridge $modules/xcc_: maintainers: panyy3 renxulei + $modules/xdg_mime.py: + maintainers: mhalano $modules/xenserver_: maintainers: bvitnik $modules/xenserver_facts.py: @@ -1449,6 +1449,8 @@ files: maintainers: natefoo $modules/znode.py: maintainers: treyperry + $modules/zpool.py: + maintainers: tomhesse $modules/zpool_facts: keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool labels: solaris @@ -1515,6 +1517,22 @@ files: maintainers: russoz docs/docsite/rst/guide_deps.rst: maintainers: russoz + docs/docsite/rst/guide_iocage.rst: + maintainers: russoz felixfontein + docs/docsite/rst/guide_iocage_inventory.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_aliases.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_basics.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_dhcp.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_hooks.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_properties.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_tags.rst: + maintainers: vbotka docs/docsite/rst/guide_modulehelper.rst: maintainers: russoz docs/docsite/rst/guide_online.rst: @@ -1523,6 +1541,8 @@ files: maintainers: baldwinSPC nurfet-becirevic t0mk teebes docs/docsite/rst/guide_scaleway.rst: maintainers: $team_scaleway + docs/docsite/rst/guide_uthelper.rst: + maintainers: russoz docs/docsite/rst/guide_vardict.rst: maintainers: russoz docs/docsite/rst/test_guide.rst: diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index f64de2abe3..4b1c1bfb95 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -7,147 +7,147 @@ name: Bug report description: Create a report to help us improve body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: Explain the problem briefly below. - placeholder: >- - When I try to do X with the collection from the main branch on GitHub, Y - breaks in a way Z under the env E. Here are all the details I know - about this problem... - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Bug Report - validations: - required: true - -- type: textarea - attributes: - # For smaller collections we could use a multi-select and hardcode the list - # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins - # Select from list, filter as you type (`mysql` would only show the 3 mysql components) - # OR freeform - doesn't seem to be supported in adaptivecards - label: Component Name - description: >- - Write the short name of the module, plugin, task or feature below, - *use your best guess if unsure*. Do not include `community.general.`! - placeholder: dnf, apt, yum, pip, user etc. - validations: - required: true - -- type: textarea - attributes: - label: Ansible Version - description: >- - Paste verbatim output from `ansible --version` between - tripple backticks. - value: | - ```console (paste below) - $ ansible --version - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Community.general Version - description: >- - Paste verbatim output from "ansible-galaxy collection list community.general" - between tripple backticks. - value: | - ```console (paste below) - $ ansible-galaxy collection list community.general - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Configuration - description: >- - If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. - This can be a piece of YAML from, e.g., an automation, script, scene or configuration. - Paste verbatim output from `ansible-config dump --only-changed` between quotes - value: | - ```console (paste below) - $ ansible-config dump --only-changed - - ``` - - -- type: textarea - attributes: - label: OS / Environment - description: >- - Provide all relevant information below, e.g. target OS versions, - network device firmware, etc. - placeholder: RHEL 8, CentOS Stream etc. - validations: - required: false - - -- type: textarea - attributes: - label: Steps to Reproduce - description: | - Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used. - - **HINT:** You can paste https://gist.github.com links for larger files. - value: | - - ```yaml (paste below) - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Expected Results - description: >- - Describe what you expected to happen when running the steps above. - placeholder: >- - I expected X to happen because I assumed Y. - that it did not. - validations: - required: true - -- type: textarea - attributes: - label: Actual Results - description: | - Describe what actually happened. If possible run with extra verbosity (`-vvvv`). - - Paste verbatim command output between quotes. - value: | - ```console (paste below) - - ``` -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + - type: textarea + attributes: + label: Summary + description: Explain the problem briefly below. + placeholder: >- + When I try to do X with the collection from the main branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Bug Report + validations: + required: true + + - type: textarea + attributes: + # For smaller collections we could use a multi-select and hardcode the list + # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins + # Select from list, filter as you type (`mysql` would only show the 3 mysql components) + # OR freeform - doesn't seem to be supported in adaptivecards + label: Component Name + description: >- + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + + - type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Community.general Version + description: >- + Paste verbatim output from "ansible-galaxy collection list community.general" + between tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list community.general + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Configuration + description: >- + If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. + This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + Paste verbatim output from `ansible-config dump --only-changed` between quotes + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + + + - type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. target OS versions, + network device firmware, etc. + placeholder: RHEL 8, CentOS Stream etc. + validations: + required: false + + + - type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y. + that it did not. + validations: + required: true + + - type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output between quotes. + value: | + ```console (paste below) + + ``` + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 0cc2db058c..476eed516e 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -6,26 +6,26 @@ # Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser blank_issues_enabled: false # default: true contact_links: -- name: Security bug report - url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: | - Please learn how to report security vulnerabilities here. + - name: Security bug report + url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: | + Please learn how to report security vulnerabilities here. - For all security related bugs, email security@ansible.com - instead of using this issue tracker and you will receive - a prompt response. + For all security related bugs, email security@ansible.com + instead of using this issue tracker and you will receive + a prompt response. - For more information, see - https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html -- name: Ansible Code of Conduct - url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Be nice to other members of the community. -- name: Talks to the community - url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information - about: Please ask and answer usage questions here -- name: Working groups - url: https://github.com/ansible/community/wiki - about: Interested in improving a specific area? Become a part of a working group! -- name: For Enterprise - url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Red Hat offers support for the Ansible Automation Platform + For more information, see + https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html + - name: Ansible Code of Conduct + url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Be nice to other members of the community. + - name: Talks to the community + url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information + about: Please ask and answer usage questions here + - name: Working groups + url: https://github.com/ansible/community/wiki + about: Interested in improving a specific area? Become a part of a working group! + - name: For Enterprise + url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Red Hat offers support for the Ansible Automation Platform diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml index 6ec49fcb37..2ad4bce44a 100644 --- a/.github/ISSUE_TEMPLATE/documentation_report.yml +++ b/.github/ISSUE_TEMPLATE/documentation_report.yml @@ -8,122 +8,122 @@ description: Ask us about docs # NOTE: issue body is enabled to allow screenshots body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: | - Explain the problem briefly below, add suggestions to wording or structure. + - type: textarea + attributes: + label: Summary + description: | + Explain the problem briefly below, add suggestions to wording or structure. - **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? - placeholder: >- - I was reading the Collection documentation of version X and I'm having - problems understanding Y. It would be very helpful if that got - rephrased as Z. - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Documentation Report - validations: - required: true - -- type: input - attributes: - label: Component Name - description: >- - Write the short name of the file, module, plugin, task or feature below, - *use your best guess if unsure*. Do not include `community.general.`! - placeholder: mysql_user - validations: - required: true - -- type: textarea - attributes: - label: Ansible Version - description: >- - Paste verbatim output from `ansible --version` between - tripple backticks. - value: | - ```console (paste below) - $ ansible --version - - ``` - validations: - required: false - -- type: textarea - attributes: - label: Community.general Version - description: >- - Paste verbatim output from "ansible-galaxy collection list community.general" - between tripple backticks. - value: | - ```console (paste below) - $ ansible-galaxy collection list community.general - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Configuration - description: >- - Paste verbatim output from `ansible-config dump --only-changed` between quotes. - value: | - ```console (paste below) - $ ansible-config dump --only-changed - - ``` - validations: - required: false - -- type: textarea - attributes: - label: OS / Environment - description: >- - Provide all relevant information below, e.g. OS version, - browser, etc. - placeholder: Fedora 33, Firefox etc. - validations: - required: false - -- type: textarea - attributes: - label: Additional Information - description: | - Describe how this improves the documentation, e.g. before/after situation or screenshots. - - **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. - - **HINT:** You can paste https://gist.github.com links for larger files. - placeholder: >- - When the improvement is applied, it makes it more straightforward - to understand X. - validations: - required: false - -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? + placeholder: >- + I was reading the Collection documentation of version X and I'm having + problems understanding Y. It would be very helpful if that got + rephrased as Z. + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Documentation Report + validations: + required: true + + - type: input + attributes: + label: Component Name + description: >- + Write the short name of the file, module, plugin, task or feature below, + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: mysql_user + validations: + required: true + + - type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: false + + - type: textarea + attributes: + label: Community.general Version + description: >- + Paste verbatim output from "ansible-galaxy collection list community.general" + between tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list community.general + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Configuration + description: >- + Paste verbatim output from `ansible-config dump --only-changed` between quotes. + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + validations: + required: false + + - type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. OS version, + browser, etc. + placeholder: Fedora 33, Firefox etc. + validations: + required: false + + - type: textarea + attributes: + label: Additional Information + description: | + Describe how this improves the documentation, e.g. before/after situation or screenshots. + + **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. + + **HINT:** You can paste https://gist.github.com links for larger files. + placeholder: >- + When the improvement is applied, it makes it more straightforward + to understand X. + validations: + required: false + + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index f34564283c..dc62f94c5c 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -7,67 +7,67 @@ name: Feature request description: Suggest an idea for this project body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: Describe the new feature/improvement briefly below. - placeholder: >- - I am trying to do X with the collection from the main branch on GitHub and - I think that implementing a feature Y would be very helpful for me and - every other user of community.general because of Z. - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Feature Idea - validations: - required: true - -- type: input - attributes: - label: Component Name - description: >- - Write the short name of the module or plugin, or which other part(s) of the collection this feature affects. - *use your best guess if unsure*. Do not include `community.general.`! - placeholder: dnf, apt, yum, pip, user etc. - validations: - required: true - -- type: textarea - attributes: - label: Additional Information - description: | - Describe how the feature would be used, why it is needed and what it would solve. - - **HINT:** You can paste https://gist.github.com links for larger files. - value: | - - ```yaml (paste below) - - ``` - validations: - required: false -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + - type: textarea + attributes: + label: Summary + description: Describe the new feature/improvement briefly below. + placeholder: >- + I am trying to do X with the collection from the main branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of community.general because of Z. + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Feature Idea + validations: + required: true + + - type: input + attributes: + label: Component Name + description: >- + Write the short name of the module or plugin, or which other part(s) of the collection this feature affects. + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + + - type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: false + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 2f4ff900d8..f71b322d2a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -9,3 +9,7 @@ updates: directory: "/" schedule: interval: "weekly" + groups: + ci: + patterns: + - "*" diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml index ca06791a38..89b36e6163 100644 --- a/.github/workflows/ansible-test.yml +++ b/.github/workflows/ansible-test.yml @@ -7,7 +7,7 @@ # https://github.com/marketplace/actions/ansible-test name: EOL CI -on: +"on": # Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests push: branches: @@ -29,12 +29,7 @@ jobs: strategy: matrix: ansible: - - '2.15' - # Ansible-test on various stable branches does not yet work well with cgroups v2. - # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04 - # image for these stable branches. The list of branches where this is necessary will - # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28 - # for the latest list. + - '2.16' runs-on: ubuntu-latest steps: - name: Perform sanity testing @@ -45,13 +40,10 @@ jobs: coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} pull-request-change-detection: 'true' testing-type: sanity + pre-test-cmd: >- + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools units: - # Ansible-test on various stable branches does not yet work well with cgroups v2. - # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04 - # image for these stable branches. The list of branches where this is necessary will - # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28 - # for the latest list. runs-on: ubuntu-latest name: EOL Units (Ⓐ${{ matrix.ansible }}+py${{ matrix.python }}) strategy: @@ -65,12 +57,12 @@ jobs: exclude: - ansible: '' include: - - ansible: '2.15' + - ansible: '2.16' python: '2.7' - - ansible: '2.15' - python: '3.5' - - ansible: '2.15' - python: '3.10' + - ansible: '2.16' + python: '3.6' + - ansible: '2.16' + python: '3.11' steps: - name: >- @@ -90,11 +82,6 @@ jobs: testing-type: units integration: - # Ansible-test on various stable branches does not yet work well with cgroups v2. - # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04 - # image for these stable branches. The list of branches where this is necessary will - # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28 - # for the latest list. runs-on: ubuntu-latest name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }}) strategy: @@ -111,43 +98,56 @@ jobs: exclude: - ansible: '' include: - # 2.15 - - ansible: '2.15' - docker: alpine3 + # 2.16 + # CentOS 7 does not work in GHA, that's why it's not listed here. + - ansible: '2.16' + docker: fedora38 python: '' target: azp/posix/1/ - - ansible: '2.15' - docker: alpine3 + - ansible: '2.16' + docker: fedora38 python: '' target: azp/posix/2/ - - ansible: '2.15' - docker: alpine3 + - ansible: '2.16' + docker: fedora38 python: '' target: azp/posix/3/ - - ansible: '2.15' - docker: fedora37 + - ansible: '2.16' + docker: opensuse15 python: '' target: azp/posix/1/ - - ansible: '2.15' - docker: fedora37 + - ansible: '2.16' + docker: opensuse15 python: '' target: azp/posix/2/ - - ansible: '2.15' - docker: fedora37 + - ansible: '2.16' + docker: opensuse15 + python: '' + target: azp/posix/3/ + - ansible: '2.16' + docker: alpine3 + python: '' + target: azp/posix/1/ + - ansible: '2.16' + docker: alpine3 + python: '' + target: azp/posix/2/ + - ansible: '2.16' + docker: alpine3 python: '' target: azp/posix/3/ # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. - # - ansible: '2.13' + # - ansible: '2.16' # docker: default - # python: '3.9' + # python: '2.7' # target: azp/generic/1/ - # - ansible: '2.14' + # - ansible: '2.16' # docker: default - # python: '3.10' + # python: '3.6' # target: azp/generic/1/ - # - ansible: '2.15' + # - ansible: '2.16' # docker: default - # python: '3.9' + # python: '3.11' # target: azp/generic/1/ steps: @@ -164,12 +164,15 @@ jobs: integration-continue-on-error: 'false' integration-diff: 'false' integration-retry-on-error: 'true' + # TODO: remove "--branch stable-2" from community.crypto install once we're only using ansible-core 2.17 or newer! pre-test-cmd: >- mkdir -p ../../ansible ; git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git ../../ansible/posix ; - git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git ../../community/crypto + git clone --depth=1 --single-branch --branch stable-2 https://github.com/ansible-collections/community.crypto.git ../../community/crypto + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.docker.git ../../community/docker ; git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools pull-request-change-detection: 'true' diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index e8572fafb6..ec344315bb 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -5,7 +5,7 @@ name: "Code scanning - action" -on: +"on": schedule: - cron: '26 19 * * 1' workflow_dispatch: @@ -23,16 +23,16 @@ jobs: runs-on: ubuntu-latest steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - persist-credentials: false + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v3 - with: - languages: python + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: python - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/import-galaxy.yml b/.github/workflows/import-galaxy.yml deleted file mode 100644 index 0c0ee402a8..0000000000 --- a/.github/workflows/import-galaxy.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -name: import-galaxy -'on': - # Run CI against all pushes (direct commits, also merged PRs) to main, and all Pull Requests - push: - branches: - - main - - stable-* - pull_request: - -jobs: - import-galaxy: - permissions: - contents: read - name: Test to import built collection artifact with Galaxy importer - uses: ansible-community/github-action-test-galaxy-import/.github/workflows/test-galaxy-import.yml@main diff --git a/.github/workflows/nox.yml b/.github/workflows/nox.yml new file mode 100644 index 0000000000..bed8dff985 --- /dev/null +++ b/.github/workflows/nox.yml @@ -0,0 +1,28 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +name: nox +'on': + push: + branches: + - main + - stable-* + pull_request: + # Run CI once per day (at 08:00 UTC) + schedule: + - cron: '0 8 * * *' + workflow_dispatch: + +jobs: + nox: + runs-on: ubuntu-latest + name: "Run extra sanity tests" + steps: + - name: Check out collection + uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Run nox + uses: ansible-community/antsibull-nox@main diff --git a/.github/workflows/reuse.yml b/.github/workflows/reuse.yml deleted file mode 100644 index 3c5e986e57..0000000000 --- a/.github/workflows/reuse.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -name: Verify REUSE - -on: - push: - branches: - - main - - stable-* - pull_request: - types: [opened, synchronize, reopened] - branches: - - main - - stable-* - # Run CI once per day (at 07:30 UTC) - schedule: - - cron: '30 7 * * *' - -jobs: - check: - permissions: - contents: read - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - with: - persist-credentials: false - ref: ${{ github.event.pull_request.head.sha || '' }} - - - name: REUSE Compliance Check - uses: fsfe/reuse-action@v5 diff --git a/.gitignore b/.gitignore index cf1f74e41c..5c6e9c86c6 100644 --- a/.gitignore +++ b/.gitignore @@ -383,6 +383,16 @@ cython_debug/ # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ +### Python Patch ### +# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration +poetry.toml + +# ruff +.ruff_cache/ + +# LSP config files +pyrightconfig.json + ### Vim ### # Swap [._]*.s[a-v][a-z] @@ -482,6 +492,10 @@ tags # https://plugins.jetbrains.com/plugin/12206-codestream .idea/codestream.xml +# Azure Toolkit for IntelliJ plugin +# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij +.idea/**/azureSettings.xml + ### Windows ### # Windows thumbnail cache files Thumbs.db diff --git a/.reuse/dep5 b/.reuse/dep5 deleted file mode 100644 index 0c3745ebf8..0000000000 --- a/.reuse/dep5 +++ /dev/null @@ -1,5 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ - -Files: changelogs/fragments/* -Copyright: Ansible Project -License: GPL-3.0-or-later diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000000..c10d86ab19 --- /dev/null +++ b/.yamllint @@ -0,0 +1,52 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +extends: default + +ignore: | + /changelogs/ + +rules: + line-length: + max: 1000 + level: error + document-start: disable + document-end: disable + truthy: + level: error + allowed-values: + - 'true' + - 'false' + indentation: + spaces: 2 + indent-sequences: true + key-duplicates: enable + trailing-spaces: enable + new-line-at-end-of-file: disable + hyphens: + max-spaces-after: 1 + empty-lines: + max: 2 + max-start: 0 + max-end: 0 + commas: + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + colons: + max-spaces-before: 0 + max-spaces-after: 1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + octal-values: + forbid-implicit-octal: true + forbid-explicit-octal: true + comments: + min-spaces-from-content: 1 + comments-indentation: false diff --git a/CHANGELOG.md b/CHANGELOG.md index cfd526da94..b35c52441b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,660 +1,5 @@ -# Community General Release Notes +# Placeholder changelog -**Topics** - -- v10\.2\.0 - - Release Summary - - Minor Changes - - Deprecated Features - - Security Fixes - - Bugfixes - - New Plugins - - Inventory - - New Modules -- v10\.1\.0 - - Release Summary - - Minor Changes - - Deprecated Features - - Bugfixes - - New Plugins - - Filter - - New Modules -- v10\.0\.1 - - Release Summary - - Bugfixes -- v10\.0\.0 - - Release Summary - - Minor Changes - - Breaking Changes / Porting Guide - - Deprecated Features - - Removed Features \(previously deprecated\) - - Bugfixes - - Known Issues - - New Plugins - - Filter - - Test - - New Modules -This changelog describes changes after version 9\.0\.0\. - - -## v10\.2\.0 - - -### Release Summary - -Regular bugfix and feature release\. - - -### Minor Changes - -* bitwarden lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* cgroup\_memory\_recap callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* chef\_databag lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* chroot connection plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* chroot connection plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\. -* cobbler inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* cobbler inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* collection\_version lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* consul\_kv lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* context\_demo callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* counter\_enabled callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* credstash lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* cyberarkpassword lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* cyberarkpassword lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* dense callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* dependent lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* dig lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* dig lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* diy callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* dnstxt lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* dnstxt lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* doas become plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\. -* dsv lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* dzdo become plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\. -* elastic callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* etcd lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* etcd3 lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* etcd3 lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* filetree lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* from\_csv filter plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* from\_ini filter plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* funcd connection plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\. -* github\_app\_access\_token lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* gitlab\_instance\_variable \- add support for raw variables suboption \([https\://github\.com/ansible\-collections/community\.general/pull/9425](https\://github\.com/ansible\-collections/community\.general/pull/9425)\)\. -* gitlab\_runners inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* gitlab\_runners inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* hiera lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* icinga2 inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* incus connection plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\. -* iocage connection plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\. -* iocage inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* iptables\_state action plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9318](https\://github\.com/ansible\-collections/community\.general/pull/9318)\)\. -* jabber callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* jail connection plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\. -* keycloak \- add an action group for Keycloak modules to allow module\_defaults to be set for Keycloak tasks \([https\://github\.com/ansible\-collections/community\.general/pull/9284](https\://github\.com/ansible\-collections/community\.general/pull/9284)\)\. -* keyring lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* ksu become plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\. -* lastpass lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* linode inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* lmdb\_kv lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* lmdb\_kv lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* locale\_gen \- invert the logic to determine ubuntu\_mode\, making it look first for /etc/locale\.gen \(set ubuntu\_mode to False\) and only then looking for /var/lib/locales/supported\.d/ \(set ubuntu\_mode to True\) \([https\://github\.com/ansible\-collections/community\.general/pull/9238](https\://github\.com/ansible\-collections/community\.general/pull/9238)\, [https\://github\.com/ansible\-collections/community\.general/issues/9131](https\://github\.com/ansible\-collections/community\.general/issues/9131)\, [https\://github\.com/ansible\-collections/community\.general/issues/8487](https\://github\.com/ansible\-collections/community\.general/issues/8487)\)\. -* locale\_gen \- new return value mechanism to better express the semantics of the ubuntu\_mode\, with the possible values being either glibc \(ubuntu\_mode\=False\) or ubuntu\_legacy \(ubuntu\_mode\=True\) \([https\://github\.com/ansible\-collections/community\.general/pull/9238](https\://github\.com/ansible\-collections/community\.general/pull/9238)\)\. -* log\_plays callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* loganalytics callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* logdna callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* logentries callback plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* logentries callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* lxc connection plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\. -* lxd connection plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\. -* lxd inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* lxd inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* machinectl become plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\. -* mail callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* manageiq\_alert\_profiles \- improve handling of parameter requirements \([https\://github\.com/ansible\-collections/community\.general/pull/9449](https\://github\.com/ansible\-collections/community\.general/pull/9449)\)\. -* manifold lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* manifold lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* memcached cache plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9320](https\://github\.com/ansible\-collections/community\.general/pull/9320)\)\. -* merge\_variables lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* nmap inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* nmap inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* nrdp callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* onepassword lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* onepassword lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* onepassword\_doc lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* online inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* opennebula inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* opennebula inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* opentelemetry callback plugin \- remove code handling Python versions prior to 3\.7 \([https\://github\.com/ansible\-collections/community\.general/pull/9482](https\://github\.com/ansible\-collections/community\.general/pull/9482)\)\. -* opentelemetry callback plugin \- remove code handling Python versions prior to 3\.7 \([https\://github\.com/ansible\-collections/community\.general/pull/9503](https\://github\.com/ansible\-collections/community\.general/pull/9503)\)\. -* opentelemetry callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* pacemaker\_cluster \- remove unused code \([https\://github\.com/ansible\-collections/community\.general/pull/9471](https\://github\.com/ansible\-collections/community\.general/pull/9471)\)\. -* pacemaker\_cluster \- using safer mechanism to run external command \([https\://github\.com/ansible\-collections/community\.general/pull/9471](https\://github\.com/ansible\-collections/community\.general/pull/9471)\)\. -* passwordstore lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* pbrun become plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\. -* pfexec become plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\. -* pmrun become plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\. -* proxmox inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* proxmox inventory plugin \- strip whitespace from user\, token\_id\, and token\_secret \([https\://github\.com/ansible\-collections/community\.general/issues/9227](https\://github\.com/ansible\-collections/community\.general/issues/9227)\, [https\://github\.com/ansible\-collections/community\.general/pull/9228/](https\://github\.com/ansible\-collections/community\.general/pull/9228/)\)\. -* proxmox inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* proxmox module utils \- add method api\_task\_complete that can wait for task completion and return error message \([https\://github\.com/ansible\-collections/community\.general/pull/9256](https\://github\.com/ansible\-collections/community\.general/pull/9256)\)\. -* proxmox\_backup \- refactor permission checking to improve code readability and maintainability \([https\://github\.com/ansible\-collections/community\.general/pull/9239](https\://github\.com/ansible\-collections/community\.general/pull/9239)\)\. -* qubes connection plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\. -* random\_pet lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* redis cache plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* redis cache plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9320](https\://github\.com/ansible\-collections/community\.general/pull/9320)\)\. -* redis lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* revbitspss lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* saltstack connection plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\. -* say callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* scaleway inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* scaleway inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* selective callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* sesu become plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\. -* shelvefile lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* shutdown action plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* shutdown action plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9318](https\://github\.com/ansible\-collections/community\.general/pull/9318)\)\. -* slack callback plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* slack callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* splunk callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* stackpath\_compute inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* sudosu become plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\. -* timestamp callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* to\_ini filter plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* tss lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* tss lookup plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\. -* unixy callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* virtualbox inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\. -* virtualbox inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* xbps \- add root and repository options to enable bootstrapping new void installations \([https\://github\.com/ansible\-collections/community\.general/pull/9174](https\://github\.com/ansible\-collections/community\.general/pull/9174)\)\. -* xen\_orchestra inventory plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\. -* xfconf \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9226](https\://github\.com/ansible\-collections/community\.general/pull/9226)\)\. -* xfconf\_info \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9226](https\://github\.com/ansible\-collections/community\.general/pull/9226)\)\. -* yaml callback plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\. -* zone connection plugin \- use f\-strings instead of interpolations or format \([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\. -* zypper \- add quiet option \([https\://github\.com/ansible\-collections/community\.general/pull/9270](https\://github\.com/ansible\-collections/community\.general/pull/9270)\)\. -* zypper \- add simple\_errors option \([https\://github\.com/ansible\-collections/community\.general/pull/9270](https\://github\.com/ansible\-collections/community\.general/pull/9270)\)\. - - -### Deprecated Features - -* atomic\_container \- module is deprecated and will be removed in community\.general 13\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9487](https\://github\.com/ansible\-collections/community\.general/pull/9487)\)\. -* atomic\_host \- module is deprecated and will be removed in community\.general 13\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9487](https\://github\.com/ansible\-collections/community\.general/pull/9487)\)\. -* atomic\_image \- module is deprecated and will be removed in community\.general 13\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9487](https\://github\.com/ansible\-collections/community\.general/pull/9487)\)\. -* facter \- module is deprecated and will be removed in community\.general 12\.0\.0\, use community\.general\.facter\_facts instead \([https\://github\.com/ansible\-collections/community\.general/pull/9451](https\://github\.com/ansible\-collections/community\.general/pull/9451)\)\. -* locale\_gen \- ubuntu\_mode\=True\, or mechanism\=ubuntu\_legacy is deprecated and will be removed in community\.general 13\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9238](https\://github\.com/ansible\-collections/community\.general/pull/9238)\)\. -* pure module utils \- the module utils is deprecated and will be removed from community\.general 12\.0\.0\. The modules using this were removed in community\.general 3\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9432](https\://github\.com/ansible\-collections/community\.general/pull/9432)\)\. -* purestorage doc fragments \- the doc fragment is deprecated and will be removed from community\.general 12\.0\.0\. The modules using this were removed in community\.general 3\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9432](https\://github\.com/ansible\-collections/community\.general/pull/9432)\)\. -* sensu\_check \- module is deprecated and will be removed in community\.general 13\.0\.0\, use collection sensu\.sensu\_go instead \([https\://github\.com/ansible\-collections/community\.general/pull/9483](https\://github\.com/ansible\-collections/community\.general/pull/9483)\)\. -* sensu\_client \- module is deprecated and will be removed in community\.general 13\.0\.0\, use collection sensu\.sensu\_go instead \([https\://github\.com/ansible\-collections/community\.general/pull/9483](https\://github\.com/ansible\-collections/community\.general/pull/9483)\)\. -* sensu\_handler \- module is deprecated and will be removed in community\.general 13\.0\.0\, use collection sensu\.sensu\_go instead \([https\://github\.com/ansible\-collections/community\.general/pull/9483](https\://github\.com/ansible\-collections/community\.general/pull/9483)\)\. -* sensu\_silence \- module is deprecated and will be removed in community\.general 13\.0\.0\, use collection sensu\.sensu\_go instead \([https\://github\.com/ansible\-collections/community\.general/pull/9483](https\://github\.com/ansible\-collections/community\.general/pull/9483)\)\. -* sensu\_subscription \- module is deprecated and will be removed in community\.general 13\.0\.0\, use collection sensu\.sensu\_go instead \([https\://github\.com/ansible\-collections/community\.general/pull/9483](https\://github\.com/ansible\-collections/community\.general/pull/9483)\)\. -* slack \- the default value auto of the prepend\_hash option is deprecated and will change to never in community\.general 12\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9443](https\://github\.com/ansible\-collections/community\.general/pull/9443)\)\. -* yaml callback plugin \- deprecate plugin in favor of result\_format\=yaml in plugin ansible\.bulitin\.default \([https\://github\.com/ansible\-collections/community\.general/pull/9456](https\://github\.com/ansible\-collections/community\.general/pull/9456)\)\. - - -### Security Fixes - -* keycloak\_authentication \- API calls did not properly set the priority during update resulting in incorrectly sorted authentication flows\. This apparently only affects Keycloak 25 or newer \([https\://github\.com/ansible\-collections/community\.general/pull/9263](https\://github\.com/ansible\-collections/community\.general/pull/9263)\)\. - - -### Bugfixes - -* dig lookup plugin \- correctly handle NoNameserver exception \([https\://github\.com/ansible\-collections/community\.general/pull/9363](https\://github\.com/ansible\-collections/community\.general/pull/9363)\, [https\://github\.com/ansible\-collections/community\.general/issues/9362](https\://github\.com/ansible\-collections/community\.general/issues/9362)\)\. -* homebrew \- fix incorrect handling of aliased homebrew modules when the alias is requested \([https\://github\.com/ansible\-collections/community\.general/pull/9255](https\://github\.com/ansible\-collections/community\.general/pull/9255)\, [https\://github\.com/ansible\-collections/community\.general/issues/9240](https\://github\.com/ansible\-collections/community\.general/issues/9240)\)\. -* htpasswd \- report changes when file permissions are adjusted \([https\://github\.com/ansible\-collections/community\.general/issues/9485](https\://github\.com/ansible\-collections/community\.general/issues/9485)\, [https\://github\.com/ansible\-collections/community\.general/pull/9490](https\://github\.com/ansible\-collections/community\.general/pull/9490)\)\. -* proxmox\_backup \- fix incorrect key lookup in vmid permission check \([https\://github\.com/ansible\-collections/community\.general/pull/9223](https\://github\.com/ansible\-collections/community\.general/pull/9223)\)\. -* proxmox\_disk \- fix async method and make resize\_disk method handle errors correctly \([https\://github\.com/ansible\-collections/community\.general/pull/9256](https\://github\.com/ansible\-collections/community\.general/pull/9256)\)\. -* proxmox\_template \- fix the wrong path called on proxmox\_template\.task\_status \([https\://github\.com/ansible\-collections/community\.general/issues/9276](https\://github\.com/ansible\-collections/community\.general/issues/9276)\, [https\://github\.com/ansible\-collections/community\.general/pull/9277](https\://github\.com/ansible\-collections/community\.general/pull/9277)\)\. -* qubes connection plugin \- fix the printing of debug information \([https\://github\.com/ansible\-collections/community\.general/pull/9334](https\://github\.com/ansible\-collections/community\.general/pull/9334)\)\. -* redfish\_utils module utils \- Fix VerifyBiosAttributes command on multi system resource nodes \([https\://github\.com/ansible\-collections/community\.general/pull/9234](https\://github\.com/ansible\-collections/community\.general/pull/9234)\)\. - - -### New Plugins - - -#### Inventory - -* community\.general\.iocage \- iocage inventory source\. - - -### New Modules - -* community\.general\.android\_sdk \- Manages Android SDK packages\. -* community\.general\.ldap\_inc \- Use the Modify\-Increment LDAP V3 feature to increment an attribute value\. -* community\.general\.systemd\_creds\_decrypt \- C\(systemd\)\'s C\(systemd\-creds decrypt\) plugin\. -* community\.general\.systemd\_creds\_encrypt \- C\(systemd\)\'s C\(systemd\-creds encrypt\) plugin\. - - -## v10\.1\.0 - - -### Release Summary - -Regular bugfix and feature release\. - - -### Minor Changes - -* alternatives \- add family parameter that allows to utilize the \-\-family option available in RedHat version of update\-alternatives \([https\://github\.com/ansible\-collections/community\.general/issues/5060](https\://github\.com/ansible\-collections/community\.general/issues/5060)\, [https\://github\.com/ansible\-collections/community\.general/pull/9096](https\://github\.com/ansible\-collections/community\.general/pull/9096)\)\. -* cloudflare\_dns \- add support for comment and tags \([https\://github\.com/ansible\-collections/community\.general/pull/9132](https\://github\.com/ansible\-collections/community\.general/pull/9132)\)\. -* deps module utils \- add deps\.clear\(\) to clear out previously declared dependencies \([https\://github\.com/ansible\-collections/community\.general/pull/9179](https\://github\.com/ansible\-collections/community\.general/pull/9179)\)\. -* homebrew \- greatly speed up module when multiple packages are passed in the name option \([https\://github\.com/ansible\-collections/community\.general/pull/9181](https\://github\.com/ansible\-collections/community\.general/pull/9181)\)\. -* homebrew \- remove duplicated package name validation \([https\://github\.com/ansible\-collections/community\.general/pull/9076](https\://github\.com/ansible\-collections/community\.general/pull/9076)\)\. -* iso\_extract \- adds password parameter that is passed to 7z \([https\://github\.com/ansible\-collections/community\.general/pull/9159](https\://github\.com/ansible\-collections/community\.general/pull/9159)\)\. -* launchd \- add plist option for services such as sshd\, where the plist filename doesn\'t match the service name \([https\://github\.com/ansible\-collections/community\.general/pull/9102](https\://github\.com/ansible\-collections/community\.general/pull/9102)\)\. -* nmcli \- add sriov parameter that enables support for SR\-IOV settings \([https\://github\.com/ansible\-collections/community\.general/pull/9168](https\://github\.com/ansible\-collections/community\.general/pull/9168)\)\. -* pipx \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9180](https\://github\.com/ansible\-collections/community\.general/pull/9180)\)\. -* pipx\_info \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9180](https\://github\.com/ansible\-collections/community\.general/pull/9180)\)\. -* proxmox\_template \- add server side artifact fetching support \([https\://github\.com/ansible\-collections/community\.general/pull/9113](https\://github\.com/ansible\-collections/community\.general/pull/9113)\)\. -* redfish\_command \- add update\_custom\_oem\_header\, update\_custom\_oem\_params\, and update\_custom\_oem\_mime\_type options \([https\://github\.com/ansible\-collections/community\.general/pull/9123](https\://github\.com/ansible\-collections/community\.general/pull/9123)\)\. -* redfish\_utils module utils \- remove redundant code \([https\://github\.com/ansible\-collections/community\.general/pull/9190](https\://github\.com/ansible\-collections/community\.general/pull/9190)\)\. -* rpm\_ostree\_pkg \- added the options apply\_live \([https\://github\.com/ansible\-collections/community\.general/pull/9167](https\://github\.com/ansible\-collections/community\.general/pull/9167)\)\. -* rpm\_ostree\_pkg \- added the return value needs\_reboot \([https\://github\.com/ansible\-collections/community\.general/pull/9167](https\://github\.com/ansible\-collections/community\.general/pull/9167)\)\. -* scaleway\_lb \- minor simplification in the code \([https\://github\.com/ansible\-collections/community\.general/pull/9189](https\://github\.com/ansible\-collections/community\.general/pull/9189)\)\. -* ssh\_config \- add dynamicforward option \([https\://github\.com/ansible\-collections/community\.general/pull/9192](https\://github\.com/ansible\-collections/community\.general/pull/9192)\)\. - - -### Deprecated Features - -* opkg \- deprecate value \"\" for parameter force \([https\://github\.com/ansible\-collections/community\.general/pull/9172](https\://github\.com/ansible\-collections/community\.general/pull/9172)\)\. -* redfish\_utils module utils \- deprecate method RedfishUtils\.\_init\_session\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/9190](https\://github\.com/ansible\-collections/community\.general/pull/9190)\)\. - - -### Bugfixes - -* dnf\_config\_manager \- fix hanging when prompting to import GPG keys \([https\://github\.com/ansible\-collections/community\.general/pull/9124](https\://github\.com/ansible\-collections/community\.general/pull/9124)\, [https\://github\.com/ansible\-collections/community\.general/issues/8830](https\://github\.com/ansible\-collections/community\.general/issues/8830)\)\. -* dnf\_config\_manager \- forces locale to C before module starts\. If the locale was set to non\-English\, the output of the dnf config\-manager could not be parsed \([https\://github\.com/ansible\-collections/community\.general/pull/9157](https\://github\.com/ansible\-collections/community\.general/pull/9157)\, [https\://github\.com/ansible\-collections/community\.general/issues/9046](https\://github\.com/ansible\-collections/community\.general/issues/9046)\)\. -* flatpak \- force the locale language to C when running the flatpak command \([https\://github\.com/ansible\-collections/community\.general/pull/9187](https\://github\.com/ansible\-collections/community\.general/pull/9187)\, [https\://github\.com/ansible\-collections/community\.general/issues/8883](https\://github\.com/ansible\-collections/community\.general/issues/8883)\)\. -* gio\_mime \- fix command line when determining version of gio \([https\://github\.com/ansible\-collections/community\.general/pull/9171](https\://github\.com/ansible\-collections/community\.general/pull/9171)\, [https\://github\.com/ansible\-collections/community\.general/issues/9158](https\://github\.com/ansible\-collections/community\.general/issues/9158)\)\. -* github\_key \- in check mode\, a faulty call to \`datetime\.strftime\(\.\.\.\)\` was being made which generated an exception \([https\://github\.com/ansible\-collections/community\.general/issues/9185](https\://github\.com/ansible\-collections/community\.general/issues/9185)\)\. -* homebrew\_cask \- allow \+ symbol in Homebrew cask name validation regex \([https\://github\.com/ansible\-collections/community\.general/pull/9128](https\://github\.com/ansible\-collections/community\.general/pull/9128)\)\. -* keycloak\_clientscope\_type \- sort the default and optional clientscope lists to improve the diff \([https\://github\.com/ansible\-collections/community\.general/pull/9202](https\://github\.com/ansible\-collections/community\.general/pull/9202)\)\. -* slack \- fail if Slack API response is not OK with error message \([https\://github\.com/ansible\-collections/community\.general/pull/9198](https\://github\.com/ansible\-collections/community\.general/pull/9198)\)\. - - -### New Plugins - - -#### Filter - -* community\.general\.accumulate \- Produce a list of accumulated sums of the input list contents\. - - -### New Modules - -* community\.general\.decompress \- Decompresses compressed files\. -* community\.general\.proxmox\_backup \- Start a VM backup in Proxmox VE cluster\. - - -## v10\.0\.1 - - -### Release Summary - -Bugfix release for inclusion in Ansible 11\.0\.0rc1\. - - -### Bugfixes - -* keycloak\_client \- fix diff by removing code that turns the attributes dict which contains additional settings into a list \([https\://github\.com/ansible\-collections/community\.general/pull/9077](https\://github\.com/ansible\-collections/community\.general/pull/9077)\)\. -* keycloak\_clientscope \- fix diff and end\_state by removing the code that turns the attributes dict\, which contains additional config items\, into a list \([https\://github\.com/ansible\-collections/community\.general/pull/9082](https\://github\.com/ansible\-collections/community\.general/pull/9082)\)\. -* redfish\_utils module utils \- remove undocumented default applytime \([https\://github\.com/ansible\-collections/community\.general/pull/9114](https\://github\.com/ansible\-collections/community\.general/pull/9114)\)\. - - -## v10\.0\.0 - - -### Release Summary - -This is release 10\.0\.0 of community\.general\, released on 2024\-11\-04\. - - -### Minor Changes - -* CmdRunner module util \- argument formats can be specified as plain functions without calling cmd\_runner\_fmt\.as\_func\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8479](https\://github\.com/ansible\-collections/community\.general/pull/8479)\)\. -* CmdRunner module utils \- the parameter force\_lang now supports the special value auto which will automatically try and determine the best parsable locale in the system \([https\://github\.com/ansible\-collections/community\.general/pull/8517](https\://github\.com/ansible\-collections/community\.general/pull/8517)\)\. -* MH module utils \- add parameter when to cause\_changes decorator \([https\://github\.com/ansible\-collections/community\.general/pull/8766](https\://github\.com/ansible\-collections/community\.general/pull/8766)\)\. -* MH module utils \- minor refactor in decorators \([https\://github\.com/ansible\-collections/community\.general/pull/8766](https\://github\.com/ansible\-collections/community\.general/pull/8766)\)\. -* alternatives \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* ansible\_galaxy\_install \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9060](https\://github\.com/ansible\-collections/community\.general/pull/9060)\)\. -* ansible\_galaxy\_install \- add upgrade feature \([https\://github\.com/ansible\-collections/community\.general/pull/8431](https\://github\.com/ansible\-collections/community\.general/pull/8431)\, [https\://github\.com/ansible\-collections/community\.general/issues/8351](https\://github\.com/ansible\-collections/community\.general/issues/8351)\)\. -* ansible\_galaxy\_install \- minor refactor in the module \([https\://github\.com/ansible\-collections/community\.general/pull/8413](https\://github\.com/ansible\-collections/community\.general/pull/8413)\)\. -* apache2\_mod\_proxy \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* apache2\_mod\_proxy \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* cargo \- add option directory\, which allows source directory to be specified \([https\://github\.com/ansible\-collections/community\.general/pull/8480](https\://github\.com/ansible\-collections/community\.general/pull/8480)\)\. -* cgroup\_memory\_recap\, hipchat\, jabber\, log\_plays\, loganalytics\, logentries\, logstash\, slack\, splunk\, sumologic\, syslog\_json callback plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8628](https\://github\.com/ansible\-collections/community\.general/pull/8628)\)\. -* chef\_databag\, consul\_kv\, cyberarkpassword\, dsv\, etcd\, filetree\, hiera\, onepassword\, onepassword\_doc\, onepassword\_raw\, passwordstore\, redis\, shelvefile\, tss lookup plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8626](https\://github\.com/ansible\-collections/community\.general/pull/8626)\)\. -* chroot\, funcd\, incus\, iocage\, jail\, lxc\, lxd\, qubes\, zone connection plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8627](https\://github\.com/ansible\-collections/community\.general/pull/8627)\)\. -* cmd\_runner module utils \- add decorator cmd\_runner\_fmt\.stack \([https\://github\.com/ansible\-collections/community\.general/pull/8415](https\://github\.com/ansible\-collections/community\.general/pull/8415)\)\. -* cmd\_runner module utils \- refactor argument formatting code to its own Python module \([https\://github\.com/ansible\-collections/community\.general/pull/8964](https\://github\.com/ansible\-collections/community\.general/pull/8964)\)\. -* cmd\_runner\_fmt module utils \- simplify implementation of cmd\_runner\_fmt\.as\_bool\_not\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8512](https\://github\.com/ansible\-collections/community\.general/pull/8512)\)\. -* cobbler\, linode\, lxd\, nmap\, online\, scaleway\, stackpath\_compute\, virtualbox inventory plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8625](https\://github\.com/ansible\-collections/community\.general/pull/8625)\)\. -* consul\_acl \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* consul\_kv \- add argument for the datacenter option on Consul API \([https\://github\.com/ansible\-collections/community\.general/pull/9026](https\://github\.com/ansible\-collections/community\.general/pull/9026)\)\. -* copr \- Added includepkgs and excludepkgs parameters to limit the list of packages fetched or excluded from the repository\([https\://github\.com/ansible\-collections/community\.general/pull/8779](https\://github\.com/ansible\-collections/community\.general/pull/8779)\)\. -* cpanm \- add return value cpanm\_version \([https\://github\.com/ansible\-collections/community\.general/pull/9061](https\://github\.com/ansible\-collections/community\.general/pull/9061)\)\. -* credstash lookup plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* csv module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* deco MH module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* dig lookup plugin \- add port option to specify DNS server port \([https\://github\.com/ansible\-collections/community\.general/pull/8966](https\://github\.com/ansible\-collections/community\.general/pull/8966)\)\. -* django module utils \- always retrieve version \([https\://github\.com/ansible\-collections/community\.general/pull/9063](https\://github\.com/ansible\-collections/community\.general/pull/9063)\)\. -* django\_check \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9063](https\://github\.com/ansible\-collections/community\.general/pull/9063)\)\. -* django\_command \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9063](https\://github\.com/ansible\-collections/community\.general/pull/9063)\)\. -* django\_createcachetable \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9063](https\://github\.com/ansible\-collections/community\.general/pull/9063)\)\. -* doas\, dzdo\, ksu\, machinectl\, pbrun\, pfexec\, pmrun\, sesu\, sudosu become plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8623](https\://github\.com/ansible\-collections/community\.general/pull/8623)\)\. -* etcd3 \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* flatpak \- improve the parsing of Flatpak application IDs based on official guidelines \([https\://github\.com/ansible\-collections/community\.general/pull/8909](https\://github\.com/ansible\-collections/community\.general/pull/8909)\)\. -* gconftool2 \- make use of ModuleHelper features to simplify code \([https\://github\.com/ansible\-collections/community\.general/pull/8711](https\://github\.com/ansible\-collections/community\.general/pull/8711)\)\. -* gcontool2 \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9064](https\://github\.com/ansible\-collections/community\.general/pull/9064)\)\. -* gcontool2 module utils \- add argument formatter version \([https\://github\.com/ansible\-collections/community\.general/pull/9064](https\://github\.com/ansible\-collections/community\.general/pull/9064)\)\. -* gcontool2\_info \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9064](https\://github\.com/ansible\-collections/community\.general/pull/9064)\)\. -* gio\_mime \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9067](https\://github\.com/ansible\-collections/community\.general/pull/9067)\)\. -* gio\_mime \- adjust code ahead of the old VardDict deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8855](https\://github\.com/ansible\-collections/community\.general/pull/8855)\)\. -* gio\_mime \- mute the old VarDict deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8776](https\://github\.com/ansible\-collections/community\.general/pull/8776)\)\. -* gio\_mime module utils \- add argument formatter version \([https\://github\.com/ansible\-collections/community\.general/pull/9067](https\://github\.com/ansible\-collections/community\.general/pull/9067)\)\. -* github\_app\_access\_token lookup plugin \- adds new private\_key parameter \([https\://github\.com/ansible\-collections/community\.general/pull/8989](https\://github\.com/ansible\-collections/community\.general/pull/8989)\)\. -* gitlab\_deploy\_key \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* gitlab\_group \- add many new parameters \([https\://github\.com/ansible\-collections/community\.general/pull/8908](https\://github\.com/ansible\-collections/community\.general/pull/8908)\)\. -* gitlab\_group \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* gitlab\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* gitlab\_issue \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* gitlab\_merge\_request \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* gitlab\_project \- add option container\_expiration\_policy to schedule container registry cleanup \([https\://github\.com/ansible\-collections/community\.general/pull/8674](https\://github\.com/ansible\-collections/community\.general/pull/8674)\)\. -* gitlab\_project \- add option issues\_access\_level to enable/disable project issues \([https\://github\.com/ansible\-collections/community\.general/pull/8760](https\://github\.com/ansible\-collections/community\.general/pull/8760)\)\. -* gitlab\_project \- add option model\_registry\_access\_level to disable model registry \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\. -* gitlab\_project \- add option pages\_access\_level to disable project pages \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\. -* gitlab\_project \- add option repository\_access\_level to disable project repository \([https\://github\.com/ansible\-collections/community\.general/pull/8674](https\://github\.com/ansible\-collections/community\.general/pull/8674)\)\. -* gitlab\_project \- add option service\_desk\_enabled to disable service desk \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\. -* gitlab\_project \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* gitlab\_project \- sorted parameters in order to avoid future merge conflicts \([https\://github\.com/ansible\-collections/community\.general/pull/8759](https\://github\.com/ansible\-collections/community\.general/pull/8759)\)\. -* gitlab\_runner \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* hashids filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* homebrew \- speed up brew install and upgrade \([https\://github\.com/ansible\-collections/community\.general/pull/9022](https\://github\.com/ansible\-collections/community\.general/pull/9022)\)\. -* hwc\_ecs\_instance \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* hwc\_evs\_disk \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* hwc\_vpc\_eip \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* hwc\_vpc\_peering\_connect \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* hwc\_vpc\_port \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* hwc\_vpc\_subnet \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* icinga2\_host \- replace loop with dict comprehension \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* imc\_rest \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* ipa\_dnsrecord \- adds SSHFP record type for managing SSH fingerprints in FreeIPA DNS \([https\://github\.com/ansible\-collections/community\.general/pull/8404](https\://github\.com/ansible\-collections/community\.general/pull/8404)\)\. -* ipa\_otptoken \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* jenkins\_node \- add offline\_message parameter for updating a Jenkins node offline cause reason when the state is \"disabled\" \(offline\) \([https\://github\.com/ansible\-collections/community\.general/pull/9084](https\://github\.com/ansible\-collections/community\.general/pull/9084)\)\.\" -* jira \- adjust code ahead of the old VardDict deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8856](https\://github\.com/ansible\-collections/community\.general/pull/8856)\)\. -* jira \- mute the old VarDict deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8776](https\://github\.com/ansible\-collections/community\.general/pull/8776)\)\. -* jira \- replace deprecated params when using decorator cause\_changes \([https\://github\.com/ansible\-collections/community\.general/pull/8791](https\://github\.com/ansible\-collections/community\.general/pull/8791)\)\. -* keep\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* keycloak module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* keycloak\_client \- add client\-x509 choice to client\_authenticator\_type \([https\://github\.com/ansible\-collections/community\.general/pull/8973](https\://github\.com/ansible\-collections/community\.general/pull/8973)\)\. -* keycloak\_client \- assign auth flow by name \([https\://github\.com/ansible\-collections/community\.general/pull/8428](https\://github\.com/ansible\-collections/community\.general/pull/8428)\)\. -* keycloak\_client \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* keycloak\_clientscope \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* keycloak\_identity\_provider \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* keycloak\_realm \- add boolean toggle to configure organization support for a given keycloak realm \([https\://github\.com/ansible\-collections/community\.general/issues/9027](https\://github\.com/ansible\-collections/community\.general/issues/9027)\, [https\://github\.com/ansible\-collections/community\.general/pull/8927/](https\://github\.com/ansible\-collections/community\.general/pull/8927/)\)\. -* keycloak\_user\_federation \- add module argument allowing users to optout of the removal of unspecified mappers\, for example to keep the keycloak default mappers \([https\://github\.com/ansible\-collections/community\.general/pull/8764](https\://github\.com/ansible\-collections/community\.general/pull/8764)\)\. -* keycloak\_user\_federation \- add the user federation config parameter referral to the module arguments \([https\://github\.com/ansible\-collections/community\.general/pull/8954](https\://github\.com/ansible\-collections/community\.general/pull/8954)\)\. -* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* linode \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* locale\_gen \- add support for multiple locales \([https\://github\.com/ansible\-collections/community\.general/issues/8677](https\://github\.com/ansible\-collections/community\.general/issues/8677)\, [https\://github\.com/ansible\-collections/community\.general/pull/8682](https\://github\.com/ansible\-collections/community\.general/pull/8682)\)\. -* lxc\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* lxd\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* manageiq\_provider \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* mattermost \- adds support for message priority \([https\://github\.com/ansible\-collections/community\.general/issues/9068](https\://github\.com/ansible\-collections/community\.general/issues/9068)\, [https\://github\.com/ansible\-collections/community\.general/pull/9087](https\://github\.com/ansible\-collections/community\.general/pull/9087)\)\. -* memcached\, pickle\, redis\, yaml cache plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8624](https\://github\.com/ansible\-collections/community\.general/pull/8624)\)\. -* memset\_dns\_reload \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* memset\_memstore\_info \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* memset\_server\_info \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* memset\_zone \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* memset\_zone\_domain \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* memset\_zone\_record \- replace loop with dict\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* nmcli \- add conn\_enable param to reload connection \([https\://github\.com/ansible\-collections/community\.general/issues/3752](https\://github\.com/ansible\-collections/community\.general/issues/3752)\, [https\://github\.com/ansible\-collections/community\.general/issues/8704](https\://github\.com/ansible\-collections/community\.general/issues/8704)\, [https\://github\.com/ansible\-collections/community\.general/pull/8897](https\://github\.com/ansible\-collections/community\.general/pull/8897)\)\. -* nmcli \- add state\=up and state\=down to enable/disable connections \([https\://github\.com/ansible\-collections/community\.general/issues/3752](https\://github\.com/ansible\-collections/community\.general/issues/3752)\, [https\://github\.com/ansible\-collections/community\.general/issues/8704](https\://github\.com/ansible\-collections/community\.general/issues/8704)\, [https\://github\.com/ansible\-collections/community\.general/issues/7152](https\://github\.com/ansible\-collections/community\.general/issues/7152)\, [https\://github\.com/ansible\-collections/community\.general/pull/8897](https\://github\.com/ansible\-collections/community\.general/pull/8897)\)\. -* nmcli \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* npm \- add force parameter to allow \-\-force \([https\://github\.com/ansible\-collections/community\.general/pull/8885](https\://github\.com/ansible\-collections/community\.general/pull/8885)\)\. -* ocapi\_utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* one\_image \- add create\, template and datastore\_id arguments for image creation \([https\://github\.com/ansible\-collections/community\.general/pull/9075](https\://github\.com/ansible\-collections/community\.general/pull/9075)\)\. -* one\_image \- add wait\_timeout argument for adjustable timeouts \([https\://github\.com/ansible\-collections/community\.general/pull/9075](https\://github\.com/ansible\-collections/community\.general/pull/9075)\)\. -* one\_image \- add option persistent to manage image persistence \([https\://github\.com/ansible\-collections/community\.general/issues/3578](https\://github\.com/ansible\-collections/community\.general/issues/3578)\, [https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\. -* one\_image \- extend xsd scheme to make it return a lot more info about image \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\. -* one\_image \- refactor code to make it more similar to one\_template and one\_vnet \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\. -* one\_image\_info \- extend xsd scheme to make it return a lot more info about image \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\. -* one\_image\_info \- refactor code to make it more similar to one\_template and one\_vnet \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\. -* one\_service \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* one\_vm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* onepassword lookup plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* open\_iscsi \- allow login to a portal with multiple targets without specifying any of them \([https\://github\.com/ansible\-collections/community\.general/pull/8719](https\://github\.com/ansible\-collections/community\.general/pull/8719)\)\. -* openbsd\_pkg \- adds diff support to show changes in installed package list\. This does not yet work for check mode \([https\://github\.com/ansible\-collections/community\.general/pull/8402](https\://github\.com/ansible\-collections/community\.general/pull/8402)\)\. -* opennebula\.py \- add VM id and VM host to inventory host data \([https\://github\.com/ansible\-collections/community\.general/pull/8532](https\://github\.com/ansible\-collections/community\.general/pull/8532)\)\. -* opentelemetry callback plugin \- fix default value for store\_spans\_in\_file causing traces to be produced to a file named None \([https\://github\.com/ansible\-collections/community\.general/issues/8566](https\://github\.com/ansible\-collections/community\.general/issues/8566)\, [https\://github\.com/ansible\-collections/community\.general/pull/8741](https\://github\.com/ansible\-collections/community\.general/pull/8741)\)\. -* opkg \- add return value version \([https\://github\.com/ansible\-collections/community\.general/pull/9086](https\://github\.com/ansible\-collections/community\.general/pull/9086)\)\. -* passwordstore lookup plugin \- add subkey creation/update support \([https\://github\.com/ansible\-collections/community\.general/pull/8952](https\://github\.com/ansible\-collections/community\.general/pull/8952)\)\. -* passwordstore lookup plugin \- add the current user to the lockfile file name to address issues on multi\-user systems \([https\://github\.com/ansible\-collections/community\.general/pull/8689](https\://github\.com/ansible\-collections/community\.general/pull/8689)\)\. -* pids \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* pipx \- add parameter suffix to module \([https\://github\.com/ansible\-collections/community\.general/pull/8675](https\://github\.com/ansible\-collections/community\.general/pull/8675)\, [https\://github\.com/ansible\-collections/community\.general/issues/8656](https\://github\.com/ansible\-collections/community\.general/issues/8656)\)\. -* pipx \- added new states install\_all\, uninject\, upgrade\_shared\, pin\, and unpin \([https\://github\.com/ansible\-collections/community\.general/pull/8809](https\://github\.com/ansible\-collections/community\.general/pull/8809)\)\. -* pipx \- added parameter global to module \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\. -* pipx \- refactor out parsing of pipx list output to module utils \([https\://github\.com/ansible\-collections/community\.general/pull/9044](https\://github\.com/ansible\-collections/community\.general/pull/9044)\)\. -* pipx \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* pipx\_info \- add new return value pinned \([https\://github\.com/ansible\-collections/community\.general/pull/9044](https\://github\.com/ansible\-collections/community\.general/pull/9044)\)\. -* pipx\_info \- added parameter global to module \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\. -* pipx\_info \- refactor out parsing of pipx list output to module utils \([https\://github\.com/ansible\-collections/community\.general/pull/9044](https\://github\.com/ansible\-collections/community\.general/pull/9044)\)\. -* pipx\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* pkg5\_publisher \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* pkgng \- add option use\_globs \(default true\) to optionally disable glob patterns \([https\://github\.com/ansible\-collections/community\.general/issues/8632](https\://github\.com/ansible\-collections/community\.general/issues/8632)\, [https\://github\.com/ansible\-collections/community\.general/pull/8633](https\://github\.com/ansible\-collections/community\.general/pull/8633)\)\. -* proxmox \- add disk\_volume and mount\_volumes keys for better readability \([https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\. -* proxmox \- allow specification of the API port when using proxmox\_\* \([https\://github\.com/ansible\-collections/community\.general/issues/8440](https\://github\.com/ansible\-collections/community\.general/issues/8440)\, [https\://github\.com/ansible\-collections/community\.general/pull/8441](https\://github\.com/ansible\-collections/community\.general/pull/8441)\)\. -* proxmox \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* proxmox \- translate the old disk and mounts keys to the new handling internally \([https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\. -* proxmox inventory plugin \- add new fact for LXC interface details \([https\://github\.com/ansible\-collections/community\.general/pull/8713](https\://github\.com/ansible\-collections/community\.general/pull/8713)\)\. -* proxmox inventory plugin \- clean up authentication code \([https\://github\.com/ansible\-collections/community\.general/pull/8917](https\://github\.com/ansible\-collections/community\.general/pull/8917)\)\. -* proxmox inventory plugin \- fix urllib3 InsecureRequestWarnings not being suppressed when a token is used \([https\://github\.com/ansible\-collections/community\.general/pull/9099](https\://github\.com/ansible\-collections/community\.general/pull/9099)\)\. -* proxmox\_disk \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* proxmox\_kvm \- adds the ciupgrade parameter to specify whether cloud\-init should upgrade system packages at first boot \([https\://github\.com/ansible\-collections/community\.general/pull/9066](https\://github\.com/ansible\-collections/community\.general/pull/9066)\)\. -* proxmox\_kvm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* proxmox\_kvm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* proxmox\_template \- small refactor in logic for determining whether a template exists or not \([https\://github\.com/ansible\-collections/community\.general/pull/8516](https\://github\.com/ansible\-collections/community\.general/pull/8516)\)\. -* proxmox\_vm\_info \- add network option to retrieve current network information \([https\://github\.com/ansible\-collections/community\.general/pull/8471](https\://github\.com/ansible\-collections/community\.general/pull/8471)\)\. -* redfish\_\* modules \- adds ciphers option for custom cipher selection \([https\://github\.com/ansible\-collections/community\.general/pull/8533](https\://github\.com/ansible\-collections/community\.general/pull/8533)\)\. -* redfish\_command \- add UpdateUserAccountTypes command \([https\://github\.com/ansible\-collections/community\.general/issues/9058](https\://github\.com/ansible\-collections/community\.general/issues/9058)\, [https\://github\.com/ansible\-collections/community\.general/pull/9059](https\://github\.com/ansible\-collections/community\.general/pull/9059)\)\. -* redfish\_command \- add wait and wait\_timeout options to allow a user to block a command until a service is accessible after performing the requested command \([https\://github\.com/ansible\-collections/community\.general/issues/8051](https\://github\.com/ansible\-collections/community\.general/issues/8051)\, [https\://github\.com/ansible\-collections/community\.general/pull/8434](https\://github\.com/ansible\-collections/community\.general/pull/8434)\)\. -* redfish\_command \- add handling of the PasswordChangeRequired message from services in the UpdateUserPassword command to directly modify the user\'s password if the requested user is the one invoking the operation \([https\://github\.com/ansible\-collections/community\.general/issues/8652](https\://github\.com/ansible\-collections/community\.general/issues/8652)\, [https\://github\.com/ansible\-collections/community\.general/pull/8653](https\://github\.com/ansible\-collections/community\.general/pull/8653)\)\. -* redfish\_confg \- remove CapacityBytes from required paramaters of the CreateVolume command \([https\://github\.com/ansible\-collections/community\.general/pull/8956](https\://github\.com/ansible\-collections/community\.general/pull/8956)\)\. -* redfish\_config \- add parameter storage\_none\_volume\_deletion to CreateVolume command in order to control the automatic deletion of non\-RAID volumes \([https\://github\.com/ansible\-collections/community\.general/pull/8990](https\://github\.com/ansible\-collections/community\.general/pull/8990)\)\. -* redfish\_info \- add command CheckAvailability to check if a service is accessible \([https\://github\.com/ansible\-collections/community\.general/issues/8051](https\://github\.com/ansible\-collections/community\.general/issues/8051)\, [https\://github\.com/ansible\-collections/community\.general/pull/8434](https\://github\.com/ansible\-collections/community\.general/pull/8434)\)\. -* redfish\_info \- adds RedfishURI and StorageId to Disk inventory \([https\://github\.com/ansible\-collections/community\.general/pull/8937](https\://github\.com/ansible\-collections/community\.general/pull/8937)\)\. -* redfish\_utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* redfish\_utils module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* redfish\_utils module utils \- schedule a BIOS configuration job at next reboot when the BIOS config is changed \([https\://github\.com/ansible\-collections/community\.general/pull/9012](https\://github\.com/ansible\-collections/community\.general/pull/9012)\)\. -* redis cache plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* redis\, redis\_info \- add client\_cert and client\_key options to specify path to certificate for Redis authentication \([https\://github\.com/ansible\-collections/community\.general/pull/8654](https\://github\.com/ansible\-collections/community\.general/pull/8654)\)\. -* redis\_info \- adds support for getting cluster info \([https\://github\.com/ansible\-collections/community\.general/pull/8464](https\://github\.com/ansible\-collections/community\.general/pull/8464)\)\. -* remove\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* replace\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* scaleway \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* scaleway module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* scaleway\_compute \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* scaleway\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. -* scaleway\_container\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. -* scaleway\_container\_namespace \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. -* scaleway\_container\_namespace\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. -* scaleway\_container\_registry \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. -* scaleway\_container\_registry\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. -* scaleway\_function \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. -* scaleway\_function\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. -* scaleway\_function\_namespace \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. -* scaleway\_function\_namespace\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\. -* scaleway\_ip \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* scaleway\_lb \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* scaleway\_security\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* scaleway\_security\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* scaleway\_user\_data \- better construct when using dict\.items\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* scaleway\_user\_data \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* sensu\_silence \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* snmp\_facts \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* sorcery \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\. -* sudosu become plugin \- added an option \(alt\_method\) to enhance compatibility with more versions of su \([https\://github\.com/ansible\-collections/community\.general/pull/8214](https\://github\.com/ansible\-collections/community\.general/pull/8214)\)\. -* udm\_dns\_record \- replace loop with dict\.update\(\) \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\. -* ufw \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* unsafe plugin utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* vardict module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* vars MH module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\. -* virtualbox inventory plugin \- expose a new parameter enable\_advanced\_group\_parsing to change how the VirtualBox dynamic inventory parses VM groups \([https\://github\.com/ansible\-collections/community\.general/issues/8508](https\://github\.com/ansible\-collections/community\.general/issues/8508)\, [https\://github\.com/ansible\-collections/community\.general/pull/8510](https\://github\.com/ansible\-collections/community\.general/pull/8510)\)\. -* vmadm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\. -* wdc\_redfish\_command \- minor change to handle upgrade file for Redfish WD platforms \([https\://github\.com/ansible\-collections/community\.general/pull/8444](https\://github\.com/ansible\-collections/community\.general/pull/8444)\)\. - - -### Breaking Changes / Porting Guide - -* The collection no longer supports ansible\-core 2\.13 and ansible\-core 2\.14\. While most \(or even all\) modules and plugins might still work with these versions\, they are no longer tested in CI and breakages regarding them will not be fixed \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\. -* cmd\_runner module utils \- CLI arguments created directly from module parameters are no longer assigned a default formatter \([https\://github\.com/ansible\-collections/community\.general/pull/8928](https\://github\.com/ansible\-collections/community\.general/pull/8928)\)\. -* irc \- the defaults of use\_tls and validate\_certs changed from false to true \([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\. -* rhsm\_repository \- the states present and absent have been removed\. Use enabled and disabled instead \([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\. - - -### Deprecated Features - -* CmdRunner module util \- setting the value of the ignore\_none parameter within a CmdRunner context is deprecated and that feature should be removed in community\.general 12\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8479](https\://github\.com/ansible\-collections/community\.general/pull/8479)\)\. -* MH decorator cause\_changes module utils \- deprecate parameters on\_success and on\_failure \([https\://github\.com/ansible\-collections/community\.general/pull/8791](https\://github\.com/ansible\-collections/community\.general/pull/8791)\)\. -* git\_config \- the list\_all option has been deprecated and will be removed in community\.general 11\.0\.0\. Use the community\.general\.git\_config\_info module instead \([https\://github\.com/ansible\-collections/community\.general/pull/8453](https\://github\.com/ansible\-collections/community\.general/pull/8453)\)\. -* git\_config \- using state\=present without providing value is deprecated and will be disallowed in community\.general 11\.0\.0\. Use the community\.general\.git\_config\_info module instead to read a value \([https\://github\.com/ansible\-collections/community\.general/pull/8453](https\://github\.com/ansible\-collections/community\.general/pull/8453)\)\. -* hipchat \- the hipchat service has been discontinued and the self\-hosted variant has been End of Life since 2020\. The module is therefore deprecated and will be removed from community\.general 11\.0\.0 if nobody provides compelling reasons to still keep it \([https\://github\.com/ansible\-collections/community\.general/pull/8919](https\://github\.com/ansible\-collections/community\.general/pull/8919)\)\. -* pipx \- support for versions of the command line tool pipx older than 1\.7\.0 is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\. -* pipx\_info \- support for versions of the command line tool pipx older than 1\.7\.0 is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\. - - -### Removed Features \(previously deprecated\) - -* The consul\_acl module has been removed\. Use community\.general\.consul\_token and/or community\.general\.consul\_policy instead \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\. -* The hipchat callback plugin has been removed\. The hipchat service has been discontinued and the self\-hosted variant has been End of Life since 2020 \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\. -* The redhat module utils has been removed \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\. -* The rhn\_channel module has been removed \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\. -* The rhn\_register module has been removed \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\. -* consul \- removed the ack\_params\_state\_absent option\. It had no effect anymore \([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\. -* ejabberd\_user \- removed the logging option \([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\. -* gitlab modules \- remove basic auth feature \([https\://github\.com/ansible\-collections/community\.general/pull/8405](https\://github\.com/ansible\-collections/community\.general/pull/8405)\)\. -* proxmox\_kvm \- removed the proxmox\_default\_behavior option\. Explicitly specify the old default values if you were using proxmox\_default\_behavior\=compatibility\, otherwise simply remove it \([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\. -* redhat\_subscriptions \- removed the pool option\. Use pool\_ids instead \([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\. - - -### Bugfixes - -* bitwarden lookup plugin \- fix KeyError in search\_field \([https\://github\.com/ansible\-collections/community\.general/issues/8549](https\://github\.com/ansible\-collections/community\.general/issues/8549)\, [https\://github\.com/ansible\-collections/community\.general/pull/8557](https\://github\.com/ansible\-collections/community\.general/pull/8557)\)\. -* bitwarden lookup plugin \- support BWS v0\.3\.0 syntax breaking change \([https\://github\.com/ansible\-collections/community\.general/pull/9028](https\://github\.com/ansible\-collections/community\.general/pull/9028)\)\. -* cloudflare\_dns \- fix changing Cloudflare SRV records \([https\://github\.com/ansible\-collections/community\.general/issues/8679](https\://github\.com/ansible\-collections/community\.general/issues/8679)\, [https\://github\.com/ansible\-collections/community\.general/pull/8948](https\://github\.com/ansible\-collections/community\.general/pull/8948)\)\. -* cmd\_runner module utils \- call to get\_best\_parsable\_locales\(\) was missing parameter \([https\://github\.com/ansible\-collections/community\.general/pull/8929](https\://github\.com/ansible\-collections/community\.general/pull/8929)\)\. -* collection\_version lookup plugin \- use importlib directly instead of the deprecated and in ansible\-core 2\.19 removed ansible\.module\_utils\.compat\.importlib \([https\://github\.com/ansible\-collections/community\.general/pull/9084](https\://github\.com/ansible\-collections/community\.general/pull/9084)\)\. -* cpanm \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. -* dig lookup plugin \- fix using only the last nameserver specified \([https\://github\.com/ansible\-collections/community\.general/pull/8970](https\://github\.com/ansible\-collections/community\.general/pull/8970)\)\. -* django module utils \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. -* django\_command \- option command is now split lexically before passed to underlying PythonRunner \([https\://github\.com/ansible\-collections/community\.general/pull/8944](https\://github\.com/ansible\-collections/community\.general/pull/8944)\)\. -* gconftool2\_info \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. -* git\_config \- fix behavior of state\=absent if value is present \([https\://github\.com/ansible\-collections/community\.general/issues/8436](https\://github\.com/ansible\-collections/community\.general/issues/8436)\, [https\://github\.com/ansible\-collections/community\.general/pull/8452](https\://github\.com/ansible\-collections/community\.general/pull/8452)\)\. -* gitlab\_group\_access\_token \- fix crash in check mode caused by attempted access to a newly created access token \([https\://github\.com/ansible\-collections/community\.general/pull/8796](https\://github\.com/ansible\-collections/community\.general/pull/8796)\)\. -* gitlab\_label \- update label\'s color \([https\://github\.com/ansible\-collections/community\.general/pull/9010](https\://github\.com/ansible\-collections/community\.general/pull/9010)\)\. -* gitlab\_project \- fix container\_expiration\_policy not being applied when creating a new project \([https\://github\.com/ansible\-collections/community\.general/pull/8790](https\://github\.com/ansible\-collections/community\.general/pull/8790)\)\. -* gitlab\_project \- fix crash caused by old Gitlab projects not having a container\_expiration\_policy attribute \([https\://github\.com/ansible\-collections/community\.general/pull/8790](https\://github\.com/ansible\-collections/community\.general/pull/8790)\)\. -* gitlab\_project\_access\_token \- fix crash in check mode caused by attempted access to a newly created access token \([https\://github\.com/ansible\-collections/community\.general/pull/8796](https\://github\.com/ansible\-collections/community\.general/pull/8796)\)\. -* gitlab\_runner \- fix paused parameter being ignored \([https\://github\.com/ansible\-collections/community\.general/pull/8648](https\://github\.com/ansible\-collections/community\.general/pull/8648)\)\. -* homebrew \- do not fail when brew prints warnings \([https\://github\.com/ansible\-collections/community\.general/pull/8406](https\://github\.com/ansible\-collections/community\.general/pull/8406)\, [https\://github\.com/ansible\-collections/community\.general/issues/7044](https\://github\.com/ansible\-collections/community\.general/issues/7044)\)\. -* homebrew\_cask \- fix upgrade\_all returns changed when nothing upgraded \([https\://github\.com/ansible\-collections/community\.general/issues/8707](https\://github\.com/ansible\-collections/community\.general/issues/8707)\, [https\://github\.com/ansible\-collections/community\.general/pull/8708](https\://github\.com/ansible\-collections/community\.general/pull/8708)\)\. -* homectl \- the module now tries to use legacycrypt on Python 3\.13\+ \([https\://github\.com/ansible\-collections/community\.general/issues/4691](https\://github\.com/ansible\-collections/community\.general/issues/4691)\, [https\://github\.com/ansible\-collections/community\.general/pull/8987](https\://github\.com/ansible\-collections/community\.general/pull/8987)\)\. -* hponcfg \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. -* ini\_file \- pass absolute paths to module\.atomic\_move\(\) \([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\. -* ipa\_host \- add force\_create\, fix enabled and disabled states \([https\://github\.com/ansible\-collections/community\.general/issues/1094](https\://github\.com/ansible\-collections/community\.general/issues/1094)\, [https\://github\.com/ansible\-collections/community\.general/pull/8920](https\://github\.com/ansible\-collections/community\.general/pull/8920)\)\. -* ipa\_hostgroup \- fix enabled \`\` and \`\`disabled states \([https\://github\.com/ansible\-collections/community\.general/issues/8408](https\://github\.com/ansible\-collections/community\.general/issues/8408)\, [https\://github\.com/ansible\-collections/community\.general/pull/8900](https\://github\.com/ansible\-collections/community\.general/pull/8900)\)\. -* java\_keystore \- pass absolute paths to module\.atomic\_move\(\) \([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\. -* jenkins\_node \- fixed enabled\, disable and absent node state redirect authorization issues\, same as was present for present \([https\://github\.com/ansible\-collections/community\.general/pull/9084](https\://github\.com/ansible\-collections/community\.general/pull/9084)\)\. -* jenkins\_plugin \- pass absolute paths to module\.atomic\_move\(\) \([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\. -* kdeconfig \- pass absolute paths to module\.atomic\_move\(\) \([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\. -* kernel\_blacklist \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. -* keycloak\_client \- fix TypeError when sanitizing the saml\.signing\.private\.key attribute in the module\'s diff or state output\. The sanitize\_cr function expected a dict where in some cases a list might occur \([https\://github\.com/ansible\-collections/community\.general/pull/8403](https\://github\.com/ansible\-collections/community\.general/pull/8403)\)\. -* keycloak\_clientscope \- remove IDs from clientscope and its protocol mappers on comparison for changed check \([https\://github\.com/ansible\-collections/community\.general/pull/8545](https\://github\.com/ansible\-collections/community\.general/pull/8545)\)\. -* keycloak\_clientscope\_type \- fix detect changes in check mode \([https\://github\.com/ansible\-collections/community\.general/issues/9092](https\://github\.com/ansible\-collections/community\.general/issues/9092)\, [https\://github\.com/ansible\-collections/community\.general/pull/9093](https\://github\.com/ansible\-collections/community\.general/pull/9093)\)\. -* keycloak\_group \- fix crash caused in subgroup creation\. The crash was caused by a missing or empty subGroups property in Keycloak ≥23 \([https\://github\.com/ansible\-collections/community\.general/issues/8788](https\://github\.com/ansible\-collections/community\.general/issues/8788)\, [https\://github\.com/ansible\-collections/community\.general/pull/8979](https\://github\.com/ansible\-collections/community\.general/pull/8979)\)\. -* keycloak\_realm \- add normalizations for attributes and protocol\_mappers \([https\://github\.com/ansible\-collections/community\.general/pull/8496](https\://github\.com/ansible\-collections/community\.general/pull/8496)\)\. -* keycloak\_realm \- fix change detection in check mode by sorting the lists in the realms beforehand \([https\://github\.com/ansible\-collections/community\.general/pull/8877](https\://github\.com/ansible\-collections/community\.general/pull/8877)\)\. -* keycloak\_realm\_key \- fix invalid usage of parent\_id \([https\://github\.com/ansible\-collections/community\.general/issues/7850](https\://github\.com/ansible\-collections/community\.general/issues/7850)\, [https\://github\.com/ansible\-collections/community\.general/pull/8823](https\://github\.com/ansible\-collections/community\.general/pull/8823)\)\. -* keycloak\_user\_federation \- add module argument allowing users to configure the update mode for the parameter bindCredential \([https\://github\.com/ansible\-collections/community\.general/pull/8898](https\://github\.com/ansible\-collections/community\.general/pull/8898)\)\. -* keycloak\_user\_federation \- fix key error when removing mappers during an update and new mappers are specified in the module args \([https\://github\.com/ansible\-collections/community\.general/pull/8762](https\://github\.com/ansible\-collections/community\.general/pull/8762)\)\. -* keycloak\_user\_federation \- fix the UnboundLocalError that occurs when an ID is provided for a user federation mapper \([https\://github\.com/ansible\-collections/community\.general/pull/8831](https\://github\.com/ansible\-collections/community\.general/pull/8831)\)\. -* keycloak\_user\_federation \- get cleartext IDP clientSecret from full realm info to detect changes to it \([https\://github\.com/ansible\-collections/community\.general/issues/8294](https\://github\.com/ansible\-collections/community\.general/issues/8294)\, [https\://github\.com/ansible\-collections/community\.general/pull/8735](https\://github\.com/ansible\-collections/community\.general/pull/8735)\)\. -* keycloak\_user\_federation \- minimize change detection by setting krbPrincipalAttribute to \'\' in Keycloak responses if missing \([https\://github\.com/ansible\-collections/community\.general/pull/8785](https\://github\.com/ansible\-collections/community\.general/pull/8785)\)\. -* keycloak\_user\_federation \- remove lastSync parameter from Keycloak responses to minimize diff/changes \([https\://github\.com/ansible\-collections/community\.general/pull/8812](https\://github\.com/ansible\-collections/community\.general/pull/8812)\)\. -* keycloak\_user\_federation \- remove existing user federation mappers if they are not present in the federation configuration and will not be updated \([https\://github\.com/ansible\-collections/community\.general/issues/7169](https\://github\.com/ansible\-collections/community\.general/issues/7169)\, [https\://github\.com/ansible\-collections/community\.general/pull/8695](https\://github\.com/ansible\-collections/community\.general/pull/8695)\)\. -* keycloak\_user\_federation \- sort desired and after mapper list by name \(analog to before mapper list\) to minimize diff and make change detection more accurate \([https\://github\.com/ansible\-collections/community\.general/pull/8761](https\://github\.com/ansible\-collections/community\.general/pull/8761)\)\. -* keycloak\_userprofile \- fix empty response when fetching userprofile component by removing parent\=parent\_id filter \([https\://github\.com/ansible\-collections/community\.general/pull/8923](https\://github\.com/ansible\-collections/community\.general/pull/8923)\)\. -* keycloak\_userprofile \- improve diff by deserializing the fetched kc\.user\.profile\.config and serialize it only when sending back \([https\://github\.com/ansible\-collections/community\.general/pull/8940](https\://github\.com/ansible\-collections/community\.general/pull/8940)\)\. -* launched \- correctly report changed status in check mode \([https\://github\.com/ansible\-collections/community\.general/pull/8406](https\://github\.com/ansible\-collections/community\.general/pull/8406)\)\. -* locale\_gen \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. -* lxd\_container \- fix bug introduced in previous commit \([https\://github\.com/ansible\-collections/community\.general/pull/8895](https\://github\.com/ansible\-collections/community\.general/pull/8895)\, [https\://github\.com/ansible\-collections/community\.general/issues/8888](https\://github\.com/ansible\-collections/community\.general/issues/8888)\)\. -* mksysb \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. -* modprobe \- fix check mode not being honored for persistent option \([https\://github\.com/ansible\-collections/community\.general/issues/9051](https\://github\.com/ansible\-collections/community\.general/issues/9051)\, [https\://github\.com/ansible\-collections/community\.general/pull/9052](https\://github\.com/ansible\-collections/community\.general/pull/9052)\)\. -* nsupdate \- fix \'index out of range\' error when changing NS records by falling back to authority section of the response \([https\://github\.com/ansible\-collections/community\.general/issues/8612](https\://github\.com/ansible\-collections/community\.general/issues/8612)\, [https\://github\.com/ansible\-collections/community\.general/pull/8614](https\://github\.com/ansible\-collections/community\.general/pull/8614)\)\. -* one\_host \- fix if statements for cases when ID\=0 \([https\://github\.com/ansible\-collections/community\.general/issues/1199](https\://github\.com/ansible\-collections/community\.general/issues/1199)\, [https\://github\.com/ansible\-collections/community\.general/pull/8907](https\://github\.com/ansible\-collections/community\.general/pull/8907)\)\. -* one\_image \- fix module failing due to a class method typo \([https\://github\.com/ansible\-collections/community\.general/pull/9056](https\://github\.com/ansible\-collections/community\.general/pull/9056)\)\. -* one\_image\_info \- fix module failing due to a class method typo \([https\://github\.com/ansible\-collections/community\.general/pull/9056](https\://github\.com/ansible\-collections/community\.general/pull/9056)\)\. -* one\_service \- fix service creation after it was deleted with unique parameter \([https\://github\.com/ansible\-collections/community\.general/issues/3137](https\://github\.com/ansible\-collections/community\.general/issues/3137)\, [https\://github\.com/ansible\-collections/community\.general/pull/8887](https\://github\.com/ansible\-collections/community\.general/pull/8887)\)\. -* one\_vnet \- fix module failing due to a variable typo \([https\://github\.com/ansible\-collections/community\.general/pull/9019](https\://github\.com/ansible\-collections/community\.general/pull/9019)\)\. -* opennebula inventory plugin \- fix invalid reference to IP when inventory runs against NICs with no IPv4 address \([https\://github\.com/ansible\-collections/community\.general/pull/8489](https\://github\.com/ansible\-collections/community\.general/pull/8489)\)\. -* opentelemetry callback \- do not save the JSON response when using the ansible\.builtin\.uri module \([https\://github\.com/ansible\-collections/community\.general/pull/8430](https\://github\.com/ansible\-collections/community\.general/pull/8430)\)\. -* opentelemetry callback \- do not save the content response when using the ansible\.builtin\.slurp module \([https\://github\.com/ansible\-collections/community\.general/pull/8430](https\://github\.com/ansible\-collections/community\.general/pull/8430)\)\. -* pam\_limits \- pass absolute paths to module\.atomic\_move\(\) \([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\. -* paman \- do not fail if an empty list of packages has been provided and there is nothing to do \([https\://github\.com/ansible\-collections/community\.general/pull/8514](https\://github\.com/ansible\-collections/community\.general/pull/8514)\)\. -* pipx \- it was ignoring global when listing existing applications \([https\://github\.com/ansible\-collections/community\.general/pull/9044](https\://github\.com/ansible\-collections/community\.general/pull/9044)\)\. -* pipx module utils \- add missing command line formatter for argument spec\_metadata \([https\://github\.com/ansible\-collections/community\.general/pull/9044](https\://github\.com/ansible\-collections/community\.general/pull/9044)\)\. -* pipx\_info \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. -* proxmox \- fix idempotency on creation of mount volumes using Proxmox\' special \\:\ syntax \([https\://github\.com/ansible\-collections/community\.general/issues/8407](https\://github\.com/ansible\-collections/community\.general/issues/8407)\, [https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\. -* proxmox \- fixed an issue where the new volume handling incorrectly converted null values into \"None\" strings \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\. -* proxmox \- fixed an issue where volume strings where overwritten instead of appended to in the new build\_volume\(\) method \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\. -* proxmox \- removed the forced conversion of non\-string values to strings to be consistent with the module documentation \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\. -* proxmox inventory plugin \- fixed a possible error on concatenating responses from proxmox\. In case an API call unexpectedly returned an empty result\, the inventory failed with a fatal error\. Added check for empty response \([https\://github\.com/ansible\-collections/community\.general/issues/8798](https\://github\.com/ansible\-collections/community\.general/issues/8798)\, [https\://github\.com/ansible\-collections/community\.general/pull/8794](https\://github\.com/ansible\-collections/community\.general/pull/8794)\)\. -* python\_runner module utils \- parameter path\_prefix was being handled as string when it should be a list \([https\://github\.com/ansible\-collections/community\.general/pull/8944](https\://github\.com/ansible\-collections/community\.general/pull/8944)\)\. -* redfish\_utils module utils \- do not fail when language is not exactly \"en\" \([https\://github\.com/ansible\-collections/community\.general/pull/8613](https\://github\.com/ansible\-collections/community\.general/pull/8613)\)\. -* redfish\_utils module utils \- fix issue with URI parsing to gracefully handling trailing slashes when extracting member identifiers \([https\://github\.com/ansible\-collections/community\.general/issues/9047](https\://github\.com/ansible\-collections/community\.general/issues/9047)\, [https\://github\.com/ansible\-collections/community\.general/pull/9057](https\://github\.com/ansible\-collections/community\.general/pull/9057)\)\. -* snap \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. -* snap\_alias \- use new VarDict to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\. -* udm\_user \- the module now tries to use legacycrypt on Python 3\.13\+ \([https\://github\.com/ansible\-collections/community\.general/issues/4690](https\://github\.com/ansible\-collections/community\.general/issues/4690)\, [https\://github\.com/ansible\-collections/community\.general/pull/8987](https\://github\.com/ansible\-collections/community\.general/pull/8987)\)\. - - -### Known Issues - -* jenkins\_node \- the module is not able to update offline message when node is already offline due to internally using toggleOffline API \([https\://github\.com/ansible\-collections/community\.general/pull/9084](https\://github\.com/ansible\-collections/community\.general/pull/9084)\)\. - - -### New Plugins - - -#### Filter - -* community\.general\.keep\_keys \- Keep specific keys from dictionaries in a list\. -* community\.general\.remove\_keys \- Remove specific keys from dictionaries in a list\. -* community\.general\.replace\_keys \- Replace specific keys in a list of dictionaries\. -* community\.general\.reveal\_ansible\_type \- Return input type\. - - -#### Test - -* community\.general\.ansible\_type \- Validate input type\. - - -### New Modules - -* community\.general\.bootc\_manage \- Bootc Switch and Upgrade\. -* community\.general\.consul\_agent\_check \- Add\, modify\, and delete checks within a consul cluster\. -* community\.general\.consul\_agent\_service \- Add\, modify and delete services within a consul cluster\. -* community\.general\.django\_check \- Wrapper for C\(django\-admin check\)\. -* community\.general\.django\_createcachetable \- Wrapper for C\(django\-admin createcachetable\)\. -* community\.general\.homebrew\_services \- Services manager for Homebrew\. -* community\.general\.ipa\_getkeytab \- Manage keytab file in FreeIPA\. -* community\.general\.jenkins\_node \- Manage Jenkins nodes\. -* community\.general\.keycloak\_component \- Allows administration of Keycloak components via Keycloak API\. -* community\.general\.keycloak\_realm\_keys\_metadata\_info \- Allows obtaining Keycloak realm keys metadata via Keycloak API\. -* community\.general\.keycloak\_userprofile \- Allows managing Keycloak User Profiles\. -* community\.general\.krb\_ticket \- Kerberos utils for managing tickets\. -* community\.general\.one\_vnet \- Manages OpenNebula virtual networks\. -* community\.general\.zypper\_repository\_info \- List Zypper repositories\. +This file is a placeholder; a version-specific `CHANGELOG-vX.md` will be generated during releases from fragments +under `changelogs/fragments`. On release branches once a release has been created, consult the branch's version-specific +file for changes that have occurred in that branch. diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3264ba63ff..119e04e170 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,630 +1,6 @@ -=============================== -Community General Release Notes -=============================== +Placeholder changelog +===================== -.. contents:: Topics - -This changelog describes changes after version 9.0.0. - -v10.2.0 -======= - -Release Summary ---------------- - -Regular bugfix and feature release. - -Minor Changes -------------- - -- bitwarden lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- cgroup_memory_recap callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- chef_databag lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- chroot connection plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- chroot connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). -- cobbler inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- cobbler inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- collection_version lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- consul_kv lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- context_demo callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- counter_enabled callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- credstash lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- cyberarkpassword lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- cyberarkpassword lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- dense callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- dependent lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- dig lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- dig lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- diy callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- dnstxt lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- dnstxt lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- doas become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). -- dsv lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- dzdo become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). -- elastic callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- etcd lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- etcd3 lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- etcd3 lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- filetree lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- from_csv filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- from_ini filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- funcd connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). -- github_app_access_token lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- gitlab_instance_variable - add support for ``raw`` variables suboption (https://github.com/ansible-collections/community.general/pull/9425). -- gitlab_runners inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- gitlab_runners inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- hiera lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- icinga2 inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- incus connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). -- iocage connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). -- iocage inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- iptables_state action plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9318). -- jabber callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- jail connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). -- keycloak - add an action group for Keycloak modules to allow ``module_defaults`` to be set for Keycloak tasks (https://github.com/ansible-collections/community.general/pull/9284). -- keyring lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- ksu become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). -- lastpass lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- linode inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- lmdb_kv lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- lmdb_kv lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- locale_gen - invert the logic to determine ``ubuntu_mode``, making it look first for ``/etc/locale.gen`` (set ``ubuntu_mode`` to ``False``) and only then looking for ``/var/lib/locales/supported.d/`` (set ``ubuntu_mode`` to ``True``) (https://github.com/ansible-collections/community.general/pull/9238, https://github.com/ansible-collections/community.general/issues/9131, https://github.com/ansible-collections/community.general/issues/8487). -- locale_gen - new return value ``mechanism`` to better express the semantics of the ``ubuntu_mode``, with the possible values being either ``glibc`` (``ubuntu_mode=False``) or ``ubuntu_legacy`` (``ubuntu_mode=True``) (https://github.com/ansible-collections/community.general/pull/9238). -- log_plays callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- loganalytics callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- logdna callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- logentries callback plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- logentries callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- lxc connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). -- lxd connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). -- lxd inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- lxd inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- machinectl become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). -- mail callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- manageiq_alert_profiles - improve handling of parameter requirements (https://github.com/ansible-collections/community.general/pull/9449). -- manifold lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- manifold lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- memcached cache plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9320). -- merge_variables lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- nmap inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- nmap inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- nrdp callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- onepassword lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- onepassword lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- onepassword_doc lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- online inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- opennebula inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- opennebula inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- opentelemetry callback plugin - remove code handling Python versions prior to 3.7 (https://github.com/ansible-collections/community.general/pull/9482). -- opentelemetry callback plugin - remove code handling Python versions prior to 3.7 (https://github.com/ansible-collections/community.general/pull/9503). -- opentelemetry callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- pacemaker_cluster - remove unused code (https://github.com/ansible-collections/community.general/pull/9471). -- pacemaker_cluster - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/9471). -- passwordstore lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- pbrun become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). -- pfexec become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). -- pmrun become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). -- proxmox inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- proxmox inventory plugin - strip whitespace from ``user``, ``token_id``, and ``token_secret`` (https://github.com/ansible-collections/community.general/issues/9227, https://github.com/ansible-collections/community.general/pull/9228/). -- proxmox inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- proxmox module utils - add method ``api_task_complete`` that can wait for task completion and return error message (https://github.com/ansible-collections/community.general/pull/9256). -- proxmox_backup - refactor permission checking to improve code readability and maintainability (https://github.com/ansible-collections/community.general/pull/9239). -- qubes connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). -- random_pet lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- redis cache plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- redis cache plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9320). -- redis lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- revbitspss lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- saltstack connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). -- say callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- scaleway inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- scaleway inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- selective callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- sesu become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). -- shelvefile lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- shutdown action plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- shutdown action plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9318). -- slack callback plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- slack callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- splunk callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- stackpath_compute inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- sudosu become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319). -- timestamp callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- to_ini filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- tss lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- tss lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). -- unixy callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- virtualbox inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). -- virtualbox inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- xbps - add ``root`` and ``repository`` options to enable bootstrapping new void installations (https://github.com/ansible-collections/community.general/pull/9174). -- xen_orchestra inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). -- xfconf - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9226). -- xfconf_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9226). -- yaml callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). -- zone connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322). -- zypper - add ``quiet`` option (https://github.com/ansible-collections/community.general/pull/9270). -- zypper - add ``simple_errors`` option (https://github.com/ansible-collections/community.general/pull/9270). - -Deprecated Features -------------------- - -- atomic_container - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487). -- atomic_host - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487). -- atomic_image - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487). -- facter - module is deprecated and will be removed in community.general 12.0.0, use ``community.general.facter_facts`` instead (https://github.com/ansible-collections/community.general/pull/9451). -- locale_gen - ``ubuntu_mode=True``, or ``mechanism=ubuntu_legacy`` is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9238). -- pure module utils - the module utils is deprecated and will be removed from community.general 12.0.0. The modules using this were removed in community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/9432). -- purestorage doc fragments - the doc fragment is deprecated and will be removed from community.general 12.0.0. The modules using this were removed in community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/9432). -- sensu_check - module is deprecated and will be removed in community.general 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483). -- sensu_client - module is deprecated and will be removed in community.general 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483). -- sensu_handler - module is deprecated and will be removed in community.general 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483). -- sensu_silence - module is deprecated and will be removed in community.general 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483). -- sensu_subscription - module is deprecated and will be removed in community.general 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483). -- slack - the default value ``auto`` of the ``prepend_hash`` option is deprecated and will change to ``never`` in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/9443). -- yaml callback plugin - deprecate plugin in favor of ``result_format=yaml`` in plugin ``ansible.bulitin.default`` (https://github.com/ansible-collections/community.general/pull/9456). - -Security Fixes --------------- - -- keycloak_authentication - API calls did not properly set the ``priority`` during update resulting in incorrectly sorted authentication flows. This apparently only affects Keycloak 25 or newer (https://github.com/ansible-collections/community.general/pull/9263). - -Bugfixes --------- - -- dig lookup plugin - correctly handle ``NoNameserver`` exception (https://github.com/ansible-collections/community.general/pull/9363, https://github.com/ansible-collections/community.general/issues/9362). -- homebrew - fix incorrect handling of aliased homebrew modules when the alias is requested (https://github.com/ansible-collections/community.general/pull/9255, https://github.com/ansible-collections/community.general/issues/9240). -- htpasswd - report changes when file permissions are adjusted (https://github.com/ansible-collections/community.general/issues/9485, https://github.com/ansible-collections/community.general/pull/9490). -- proxmox_backup - fix incorrect key lookup in vmid permission check (https://github.com/ansible-collections/community.general/pull/9223). -- proxmox_disk - fix async method and make ``resize_disk`` method handle errors correctly (https://github.com/ansible-collections/community.general/pull/9256). -- proxmox_template - fix the wrong path called on ``proxmox_template.task_status`` (https://github.com/ansible-collections/community.general/issues/9276, https://github.com/ansible-collections/community.general/pull/9277). -- qubes connection plugin - fix the printing of debug information (https://github.com/ansible-collections/community.general/pull/9334). -- redfish_utils module utils - Fix ``VerifyBiosAttributes`` command on multi system resource nodes (https://github.com/ansible-collections/community.general/pull/9234). - -New Plugins ------------ - -Inventory -~~~~~~~~~ - -- community.general.iocage - iocage inventory source. - -New Modules ------------ - -- community.general.android_sdk - Manages Android SDK packages. -- community.general.ldap_inc - Use the Modify-Increment LDAP V3 feature to increment an attribute value. -- community.general.systemd_creds_decrypt - C(systemd)'s C(systemd-creds decrypt) plugin. -- community.general.systemd_creds_encrypt - C(systemd)'s C(systemd-creds encrypt) plugin. - -v10.1.0 -======= - -Release Summary ---------------- - -Regular bugfix and feature release. - -Minor Changes -------------- - -- alternatives - add ``family`` parameter that allows to utilize the ``--family`` option available in RedHat version of update-alternatives (https://github.com/ansible-collections/community.general/issues/5060, https://github.com/ansible-collections/community.general/pull/9096). -- cloudflare_dns - add support for ``comment`` and ``tags`` (https://github.com/ansible-collections/community.general/pull/9132). -- deps module utils - add ``deps.clear()`` to clear out previously declared dependencies (https://github.com/ansible-collections/community.general/pull/9179). -- homebrew - greatly speed up module when multiple packages are passed in the ``name`` option (https://github.com/ansible-collections/community.general/pull/9181). -- homebrew - remove duplicated package name validation (https://github.com/ansible-collections/community.general/pull/9076). -- iso_extract - adds ``password`` parameter that is passed to 7z (https://github.com/ansible-collections/community.general/pull/9159). -- launchd - add ``plist`` option for services such as sshd, where the plist filename doesn't match the service name (https://github.com/ansible-collections/community.general/pull/9102). -- nmcli - add ``sriov`` parameter that enables support for SR-IOV settings (https://github.com/ansible-collections/community.general/pull/9168). -- pipx - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9180). -- pipx_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9180). -- proxmox_template - add server side artifact fetching support (https://github.com/ansible-collections/community.general/pull/9113). -- redfish_command - add ``update_custom_oem_header``, ``update_custom_oem_params``, and ``update_custom_oem_mime_type`` options (https://github.com/ansible-collections/community.general/pull/9123). -- redfish_utils module utils - remove redundant code (https://github.com/ansible-collections/community.general/pull/9190). -- rpm_ostree_pkg - added the options ``apply_live`` (https://github.com/ansible-collections/community.general/pull/9167). -- rpm_ostree_pkg - added the return value ``needs_reboot`` (https://github.com/ansible-collections/community.general/pull/9167). -- scaleway_lb - minor simplification in the code (https://github.com/ansible-collections/community.general/pull/9189). -- ssh_config - add ``dynamicforward`` option (https://github.com/ansible-collections/community.general/pull/9192). - -Deprecated Features -------------------- - -- opkg - deprecate value ``""`` for parameter ``force`` (https://github.com/ansible-collections/community.general/pull/9172). -- redfish_utils module utils - deprecate method ``RedfishUtils._init_session()`` (https://github.com/ansible-collections/community.general/pull/9190). - -Bugfixes --------- - -- dnf_config_manager - fix hanging when prompting to import GPG keys (https://github.com/ansible-collections/community.general/pull/9124, https://github.com/ansible-collections/community.general/issues/8830). -- dnf_config_manager - forces locale to ``C`` before module starts. If the locale was set to non-English, the output of the ``dnf config-manager`` could not be parsed (https://github.com/ansible-collections/community.general/pull/9157, https://github.com/ansible-collections/community.general/issues/9046). -- flatpak - force the locale language to ``C`` when running the flatpak command (https://github.com/ansible-collections/community.general/pull/9187, https://github.com/ansible-collections/community.general/issues/8883). -- gio_mime - fix command line when determining version of ``gio`` (https://github.com/ansible-collections/community.general/pull/9171, https://github.com/ansible-collections/community.general/issues/9158). -- github_key - in check mode, a faulty call to ```datetime.strftime(...)``` was being made which generated an exception (https://github.com/ansible-collections/community.general/issues/9185). -- homebrew_cask - allow ``+`` symbol in Homebrew cask name validation regex (https://github.com/ansible-collections/community.general/pull/9128). -- keycloak_clientscope_type - sort the default and optional clientscope lists to improve the diff (https://github.com/ansible-collections/community.general/pull/9202). -- slack - fail if Slack API response is not OK with error message (https://github.com/ansible-collections/community.general/pull/9198). - -New Plugins ------------ - -Filter -~~~~~~ - -- community.general.accumulate - Produce a list of accumulated sums of the input list contents. - -New Modules ------------ - -- community.general.decompress - Decompresses compressed files. -- community.general.proxmox_backup - Start a VM backup in Proxmox VE cluster. - -v10.0.1 -======= - -Release Summary ---------------- - -Bugfix release for inclusion in Ansible 11.0.0rc1. - -Bugfixes --------- - -- keycloak_client - fix diff by removing code that turns the attributes dict which contains additional settings into a list (https://github.com/ansible-collections/community.general/pull/9077). -- keycloak_clientscope - fix diff and ``end_state`` by removing the code that turns the attributes dict, which contains additional config items, into a list (https://github.com/ansible-collections/community.general/pull/9082). -- redfish_utils module utils - remove undocumented default applytime (https://github.com/ansible-collections/community.general/pull/9114). - -v10.0.0 -======= - -Release Summary ---------------- - -This is release 10.0.0 of ``community.general``, released on 2024-11-04. - -Minor Changes -------------- - -- CmdRunner module util - argument formats can be specified as plain functions without calling ``cmd_runner_fmt.as_func()`` (https://github.com/ansible-collections/community.general/pull/8479). -- CmdRunner module utils - the parameter ``force_lang`` now supports the special value ``auto`` which will automatically try and determine the best parsable locale in the system (https://github.com/ansible-collections/community.general/pull/8517). -- MH module utils - add parameter ``when`` to ``cause_changes`` decorator (https://github.com/ansible-collections/community.general/pull/8766). -- MH module utils - minor refactor in decorators (https://github.com/ansible-collections/community.general/pull/8766). -- alternatives - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- ansible_galaxy_install - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9060). -- ansible_galaxy_install - add upgrade feature (https://github.com/ansible-collections/community.general/pull/8431, https://github.com/ansible-collections/community.general/issues/8351). -- ansible_galaxy_install - minor refactor in the module (https://github.com/ansible-collections/community.general/pull/8413). -- apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- cargo - add option ``directory``, which allows source directory to be specified (https://github.com/ansible-collections/community.general/pull/8480). -- cgroup_memory_recap, hipchat, jabber, log_plays, loganalytics, logentries, logstash, slack, splunk, sumologic, syslog_json callback plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8628). -- chef_databag, consul_kv, cyberarkpassword, dsv, etcd, filetree, hiera, onepassword, onepassword_doc, onepassword_raw, passwordstore, redis, shelvefile, tss lookup plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8626). -- chroot, funcd, incus, iocage, jail, lxc, lxd, qubes, zone connection plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8627). -- cmd_runner module utils - add decorator ``cmd_runner_fmt.stack`` (https://github.com/ansible-collections/community.general/pull/8415). -- cmd_runner module utils - refactor argument formatting code to its own Python module (https://github.com/ansible-collections/community.general/pull/8964). -- cmd_runner_fmt module utils - simplify implementation of ``cmd_runner_fmt.as_bool_not()`` (https://github.com/ansible-collections/community.general/pull/8512). -- cobbler, linode, lxd, nmap, online, scaleway, stackpath_compute, virtualbox inventory plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8625). -- consul_acl - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- consul_kv - add argument for the datacenter option on Consul API (https://github.com/ansible-collections/community.general/pull/9026). -- copr - Added ``includepkgs`` and ``excludepkgs`` parameters to limit the list of packages fetched or excluded from the repository(https://github.com/ansible-collections/community.general/pull/8779). -- cpanm - add return value ``cpanm_version`` (https://github.com/ansible-collections/community.general/pull/9061). -- credstash lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- csv module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- deco MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- dig lookup plugin - add ``port`` option to specify DNS server port (https://github.com/ansible-collections/community.general/pull/8966). -- django module utils - always retrieve version (https://github.com/ansible-collections/community.general/pull/9063). -- django_check - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). -- django_command - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). -- django_createcachetable - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). -- doas, dzdo, ksu, machinectl, pbrun, pfexec, pmrun, sesu, sudosu become plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8623). -- etcd3 - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- flatpak - improve the parsing of Flatpak application IDs based on official guidelines (https://github.com/ansible-collections/community.general/pull/8909). -- gconftool2 - make use of ``ModuleHelper`` features to simplify code (https://github.com/ansible-collections/community.general/pull/8711). -- gcontool2 - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064). -- gcontool2 module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9064). -- gcontool2_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064). -- gio_mime - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9067). -- gio_mime - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8855). -- gio_mime - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). -- gio_mime module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9067). -- github_app_access_token lookup plugin - adds new ``private_key`` parameter (https://github.com/ansible-collections/community.general/pull/8989). -- gitlab_deploy_key - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). -- gitlab_group - add many new parameters (https://github.com/ansible-collections/community.general/pull/8908). -- gitlab_group - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). -- gitlab_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- gitlab_issue - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). -- gitlab_merge_request - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). -- gitlab_project - add option ``container_expiration_policy`` to schedule container registry cleanup (https://github.com/ansible-collections/community.general/pull/8674). -- gitlab_project - add option ``issues_access_level`` to enable/disable project issues (https://github.com/ansible-collections/community.general/pull/8760). -- gitlab_project - add option ``model_registry_access_level`` to disable model registry (https://github.com/ansible-collections/community.general/pull/8688). -- gitlab_project - add option ``pages_access_level`` to disable project pages (https://github.com/ansible-collections/community.general/pull/8688). -- gitlab_project - add option ``repository_access_level`` to disable project repository (https://github.com/ansible-collections/community.general/pull/8674). -- gitlab_project - add option ``service_desk_enabled`` to disable service desk (https://github.com/ansible-collections/community.general/pull/8688). -- gitlab_project - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- gitlab_project - sorted parameters in order to avoid future merge conflicts (https://github.com/ansible-collections/community.general/pull/8759). -- gitlab_runner - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). -- hashids filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- homebrew - speed up brew install and upgrade (https://github.com/ansible-collections/community.general/pull/9022). -- hwc_ecs_instance - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- hwc_evs_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- hwc_vpc_eip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- hwc_vpc_peering_connect - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- hwc_vpc_port - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- hwc_vpc_subnet - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- icinga2_host - replace loop with dict comprehension (https://github.com/ansible-collections/community.general/pull/8876). -- imc_rest - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- ipa_dnsrecord - adds ``SSHFP`` record type for managing SSH fingerprints in FreeIPA DNS (https://github.com/ansible-collections/community.general/pull/8404). -- ipa_otptoken - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- jenkins_node - add ``offline_message`` parameter for updating a Jenkins node offline cause reason when the state is "disabled" (offline) (https://github.com/ansible-collections/community.general/pull/9084)." -- jira - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8856). -- jira - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). -- jira - replace deprecated params when using decorator ``cause_changes`` (https://github.com/ansible-collections/community.general/pull/8791). -- keep_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- keycloak module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- keycloak_client - add ``client-x509`` choice to ``client_authenticator_type`` (https://github.com/ansible-collections/community.general/pull/8973). -- keycloak_client - assign auth flow by name (https://github.com/ansible-collections/community.general/pull/8428). -- keycloak_client - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- keycloak_clientscope - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- keycloak_identity_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- keycloak_realm - add boolean toggle to configure organization support for a given keycloak realm (https://github.com/ansible-collections/community.general/issues/9027, https://github.com/ansible-collections/community.general/pull/8927/). -- keycloak_user_federation - add module argument allowing users to optout of the removal of unspecified mappers, for example to keep the keycloak default mappers (https://github.com/ansible-collections/community.general/pull/8764). -- keycloak_user_federation - add the user federation config parameter ``referral`` to the module arguments (https://github.com/ansible-collections/community.general/pull/8954). -- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- linode - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- locale_gen - add support for multiple locales (https://github.com/ansible-collections/community.general/issues/8677, https://github.com/ansible-collections/community.general/pull/8682). -- lxc_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- lxd_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- manageiq_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- mattermost - adds support for message priority (https://github.com/ansible-collections/community.general/issues/9068, https://github.com/ansible-collections/community.general/pull/9087). -- memcached, pickle, redis, yaml cache plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8624). -- memset_dns_reload - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). -- memset_memstore_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). -- memset_server_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). -- memset_zone - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). -- memset_zone_domain - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). -- memset_zone_record - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). -- nmcli - add ``conn_enable`` param to reload connection (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/pull/8897). -- nmcli - add ``state=up`` and ``state=down`` to enable/disable connections (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/issues/7152, https://github.com/ansible-collections/community.general/pull/8897). -- nmcli - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). -- npm - add ``force`` parameter to allow ``--force`` (https://github.com/ansible-collections/community.general/pull/8885). -- ocapi_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- one_image - add ``create``, ``template`` and ``datastore_id`` arguments for image creation (https://github.com/ansible-collections/community.general/pull/9075). -- one_image - add ``wait_timeout`` argument for adjustable timeouts (https://github.com/ansible-collections/community.general/pull/9075). -- one_image - add option ``persistent`` to manage image persistence (https://github.com/ansible-collections/community.general/issues/3578, https://github.com/ansible-collections/community.general/pull/8889). -- one_image - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889). -- one_image - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). -- one_image_info - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889). -- one_image_info - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). -- one_service - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- one_vm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- onepassword lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- open_iscsi - allow login to a portal with multiple targets without specifying any of them (https://github.com/ansible-collections/community.general/pull/8719). -- openbsd_pkg - adds diff support to show changes in installed package list. This does not yet work for check mode (https://github.com/ansible-collections/community.general/pull/8402). -- opennebula.py - add VM ``id`` and VM ``host`` to inventory host data (https://github.com/ansible-collections/community.general/pull/8532). -- opentelemetry callback plugin - fix default value for ``store_spans_in_file`` causing traces to be produced to a file named ``None`` (https://github.com/ansible-collections/community.general/issues/8566, https://github.com/ansible-collections/community.general/pull/8741). -- opkg - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9086). -- passwordstore lookup plugin - add subkey creation/update support (https://github.com/ansible-collections/community.general/pull/8952). -- passwordstore lookup plugin - add the current user to the lockfile file name to address issues on multi-user systems (https://github.com/ansible-collections/community.general/pull/8689). -- pids - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- pipx - add parameter ``suffix`` to module (https://github.com/ansible-collections/community.general/pull/8675, https://github.com/ansible-collections/community.general/issues/8656). -- pipx - added new states ``install_all``, ``uninject``, ``upgrade_shared``, ``pin``, and ``unpin`` (https://github.com/ansible-collections/community.general/pull/8809). -- pipx - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). -- pipx - refactor out parsing of ``pipx list`` output to module utils (https://github.com/ansible-collections/community.general/pull/9044). -- pipx - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- pipx_info - add new return value ``pinned`` (https://github.com/ansible-collections/community.general/pull/9044). -- pipx_info - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). -- pipx_info - refactor out parsing of ``pipx list`` output to module utils (https://github.com/ansible-collections/community.general/pull/9044). -- pipx_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- pkg5_publisher - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- pkgng - add option ``use_globs`` (default ``true``) to optionally disable glob patterns (https://github.com/ansible-collections/community.general/issues/8632, https://github.com/ansible-collections/community.general/pull/8633). -- proxmox - add ``disk_volume`` and ``mount_volumes`` keys for better readability (https://github.com/ansible-collections/community.general/pull/8542). -- proxmox - allow specification of the API port when using proxmox_* (https://github.com/ansible-collections/community.general/issues/8440, https://github.com/ansible-collections/community.general/pull/8441). -- proxmox - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- proxmox - translate the old ``disk`` and ``mounts`` keys to the new handling internally (https://github.com/ansible-collections/community.general/pull/8542). -- proxmox inventory plugin - add new fact for LXC interface details (https://github.com/ansible-collections/community.general/pull/8713). -- proxmox inventory plugin - clean up authentication code (https://github.com/ansible-collections/community.general/pull/8917). -- proxmox inventory plugin - fix urllib3 ``InsecureRequestWarnings`` not being suppressed when a token is used (https://github.com/ansible-collections/community.general/pull/9099). -- proxmox_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- proxmox_kvm - adds the ``ciupgrade`` parameter to specify whether cloud-init should upgrade system packages at first boot (https://github.com/ansible-collections/community.general/pull/9066). -- proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- proxmox_template - small refactor in logic for determining whether a template exists or not (https://github.com/ansible-collections/community.general/pull/8516). -- proxmox_vm_info - add ``network`` option to retrieve current network information (https://github.com/ansible-collections/community.general/pull/8471). -- redfish_* modules - adds ``ciphers`` option for custom cipher selection (https://github.com/ansible-collections/community.general/pull/8533). -- redfish_command - add ``UpdateUserAccountTypes`` command (https://github.com/ansible-collections/community.general/issues/9058, https://github.com/ansible-collections/community.general/pull/9059). -- redfish_command - add ``wait`` and ``wait_timeout`` options to allow a user to block a command until a service is accessible after performing the requested command (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434). -- redfish_command - add handling of the ``PasswordChangeRequired`` message from services in the ``UpdateUserPassword`` command to directly modify the user's password if the requested user is the one invoking the operation (https://github.com/ansible-collections/community.general/issues/8652, https://github.com/ansible-collections/community.general/pull/8653). -- redfish_confg - remove ``CapacityBytes`` from required paramaters of the ``CreateVolume`` command (https://github.com/ansible-collections/community.general/pull/8956). -- redfish_config - add parameter ``storage_none_volume_deletion`` to ``CreateVolume`` command in order to control the automatic deletion of non-RAID volumes (https://github.com/ansible-collections/community.general/pull/8990). -- redfish_info - add command ``CheckAvailability`` to check if a service is accessible (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434). -- redfish_info - adds ``RedfishURI`` and ``StorageId`` to Disk inventory (https://github.com/ansible-collections/community.general/pull/8937). -- redfish_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- redfish_utils module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- redfish_utils module utils - schedule a BIOS configuration job at next reboot when the BIOS config is changed (https://github.com/ansible-collections/community.general/pull/9012). -- redis cache plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- redis, redis_info - add ``client_cert`` and ``client_key`` options to specify path to certificate for Redis authentication (https://github.com/ansible-collections/community.general/pull/8654). -- redis_info - adds support for getting cluster info (https://github.com/ansible-collections/community.general/pull/8464). -- remove_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- replace_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- scaleway - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- scaleway module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- scaleway_compute - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- scaleway_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). -- scaleway_container_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). -- scaleway_container_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). -- scaleway_container_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). -- scaleway_container_registry - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). -- scaleway_container_registry_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). -- scaleway_function - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). -- scaleway_function_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). -- scaleway_function_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). -- scaleway_function_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858). -- scaleway_ip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- scaleway_lb - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- scaleway_user_data - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). -- scaleway_user_data - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- sensu_silence - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- snmp_facts - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- sorcery - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). -- sudosu become plugin - added an option (``alt_method``) to enhance compatibility with more versions of ``su`` (https://github.com/ansible-collections/community.general/pull/8214). -- udm_dns_record - replace loop with ``dict.update()`` (https://github.com/ansible-collections/community.general/pull/8876). -- ufw - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- unsafe plugin utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- vardict module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- vars MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). -- virtualbox inventory plugin - expose a new parameter ``enable_advanced_group_parsing`` to change how the VirtualBox dynamic inventory parses VM groups (https://github.com/ansible-collections/community.general/issues/8508, https://github.com/ansible-collections/community.general/pull/8510). -- vmadm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). -- wdc_redfish_command - minor change to handle upgrade file for Redfish WD platforms (https://github.com/ansible-collections/community.general/pull/8444). - -Breaking Changes / Porting Guide --------------------------------- - -- The collection no longer supports ansible-core 2.13 and ansible-core 2.14. While most (or even all) modules and plugins might still work with these versions, they are no longer tested in CI and breakages regarding them will not be fixed (https://github.com/ansible-collections/community.general/pull/8921). -- cmd_runner module utils - CLI arguments created directly from module parameters are no longer assigned a default formatter (https://github.com/ansible-collections/community.general/pull/8928). -- irc - the defaults of ``use_tls`` and ``validate_certs`` changed from ``false`` to ``true`` (https://github.com/ansible-collections/community.general/pull/8918). -- rhsm_repository - the states ``present`` and ``absent`` have been removed. Use ``enabled`` and ``disabled`` instead (https://github.com/ansible-collections/community.general/pull/8918). - -Deprecated Features -------------------- - -- CmdRunner module util - setting the value of the ``ignore_none`` parameter within a ``CmdRunner`` context is deprecated and that feature should be removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/8479). -- MH decorator cause_changes module utils - deprecate parameters ``on_success`` and ``on_failure`` (https://github.com/ansible-collections/community.general/pull/8791). -- git_config - the ``list_all`` option has been deprecated and will be removed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead (https://github.com/ansible-collections/community.general/pull/8453). -- git_config - using ``state=present`` without providing ``value`` is deprecated and will be disallowed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead to read a value (https://github.com/ansible-collections/community.general/pull/8453). -- hipchat - the hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. The module is therefore deprecated and will be removed from community.general 11.0.0 if nobody provides compelling reasons to still keep it (https://github.com/ansible-collections/community.general/pull/8919). -- pipx - support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8793). -- pipx_info - support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8793). - -Removed Features (previously deprecated) ----------------------------------------- - -- The consul_acl module has been removed. Use community.general.consul_token and/or community.general.consul_policy instead (https://github.com/ansible-collections/community.general/pull/8921). -- The hipchat callback plugin has been removed. The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020 (https://github.com/ansible-collections/community.general/pull/8921). -- The redhat module utils has been removed (https://github.com/ansible-collections/community.general/pull/8921). -- The rhn_channel module has been removed (https://github.com/ansible-collections/community.general/pull/8921). -- The rhn_register module has been removed (https://github.com/ansible-collections/community.general/pull/8921). -- consul - removed the ``ack_params_state_absent`` option. It had no effect anymore (https://github.com/ansible-collections/community.general/pull/8918). -- ejabberd_user - removed the ``logging`` option (https://github.com/ansible-collections/community.general/pull/8918). -- gitlab modules - remove basic auth feature (https://github.com/ansible-collections/community.general/pull/8405). -- proxmox_kvm - removed the ``proxmox_default_behavior`` option. Explicitly specify the old default values if you were using ``proxmox_default_behavior=compatibility``, otherwise simply remove it (https://github.com/ansible-collections/community.general/pull/8918). -- redhat_subscriptions - removed the ``pool`` option. Use ``pool_ids`` instead (https://github.com/ansible-collections/community.general/pull/8918). - -Bugfixes --------- - -- bitwarden lookup plugin - fix ``KeyError`` in ``search_field`` (https://github.com/ansible-collections/community.general/issues/8549, https://github.com/ansible-collections/community.general/pull/8557). -- bitwarden lookup plugin - support BWS v0.3.0 syntax breaking change (https://github.com/ansible-collections/community.general/pull/9028). -- cloudflare_dns - fix changing Cloudflare SRV records (https://github.com/ansible-collections/community.general/issues/8679, https://github.com/ansible-collections/community.general/pull/8948). -- cmd_runner module utils - call to ``get_best_parsable_locales()`` was missing parameter (https://github.com/ansible-collections/community.general/pull/8929). -- collection_version lookup plugin - use ``importlib`` directly instead of the deprecated and in ansible-core 2.19 removed ``ansible.module_utils.compat.importlib`` (https://github.com/ansible-collections/community.general/pull/9084). -- cpanm - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). -- dig lookup plugin - fix using only the last nameserver specified (https://github.com/ansible-collections/community.general/pull/8970). -- django module utils - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). -- django_command - option ``command`` is now split lexically before passed to underlying PythonRunner (https://github.com/ansible-collections/community.general/pull/8944). -- gconftool2_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). -- git_config - fix behavior of ``state=absent`` if ``value`` is present (https://github.com/ansible-collections/community.general/issues/8436, https://github.com/ansible-collections/community.general/pull/8452). -- gitlab_group_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). -- gitlab_label - update label's color (https://github.com/ansible-collections/community.general/pull/9010). -- gitlab_project - fix ``container_expiration_policy`` not being applied when creating a new project (https://github.com/ansible-collections/community.general/pull/8790). -- gitlab_project - fix crash caused by old Gitlab projects not having a ``container_expiration_policy`` attribute (https://github.com/ansible-collections/community.general/pull/8790). -- gitlab_project_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). -- gitlab_runner - fix ``paused`` parameter being ignored (https://github.com/ansible-collections/community.general/pull/8648). -- homebrew - do not fail when brew prints warnings (https://github.com/ansible-collections/community.general/pull/8406, https://github.com/ansible-collections/community.general/issues/7044). -- homebrew_cask - fix ``upgrade_all`` returns ``changed`` when nothing upgraded (https://github.com/ansible-collections/community.general/issues/8707, https://github.com/ansible-collections/community.general/pull/8708). -- homectl - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4691, https://github.com/ansible-collections/community.general/pull/8987). -- hponcfg - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). -- ini_file - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925). -- ipa_host - add ``force_create``, fix ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1094, https://github.com/ansible-collections/community.general/pull/8920). -- ipa_hostgroup - fix ``enabled `` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/8408, https://github.com/ansible-collections/community.general/pull/8900). -- java_keystore - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925). -- jenkins_node - fixed ``enabled``, ``disable`` and ``absent`` node state redirect authorization issues, same as was present for ``present`` (https://github.com/ansible-collections/community.general/pull/9084). -- jenkins_plugin - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925). -- kdeconfig - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925). -- kernel_blacklist - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). -- keycloak_client - fix TypeError when sanitizing the ``saml.signing.private.key`` attribute in the module's diff or state output. The ``sanitize_cr`` function expected a dict where in some cases a list might occur (https://github.com/ansible-collections/community.general/pull/8403). -- keycloak_clientscope - remove IDs from clientscope and its protocol mappers on comparison for changed check (https://github.com/ansible-collections/community.general/pull/8545). -- keycloak_clientscope_type - fix detect changes in check mode (https://github.com/ansible-collections/community.general/issues/9092, https://github.com/ansible-collections/community.general/pull/9093). -- keycloak_group - fix crash caused in subgroup creation. The crash was caused by a missing or empty ``subGroups`` property in Keycloak ≥23 (https://github.com/ansible-collections/community.general/issues/8788, https://github.com/ansible-collections/community.general/pull/8979). -- keycloak_realm - add normalizations for ``attributes`` and ``protocol_mappers`` (https://github.com/ansible-collections/community.general/pull/8496). -- keycloak_realm - fix change detection in check mode by sorting the lists in the realms beforehand (https://github.com/ansible-collections/community.general/pull/8877). -- keycloak_realm_key - fix invalid usage of ``parent_id`` (https://github.com/ansible-collections/community.general/issues/7850, https://github.com/ansible-collections/community.general/pull/8823). -- keycloak_user_federation - add module argument allowing users to configure the update mode for the parameter ``bindCredential`` (https://github.com/ansible-collections/community.general/pull/8898). -- keycloak_user_federation - fix key error when removing mappers during an update and new mappers are specified in the module args (https://github.com/ansible-collections/community.general/pull/8762). -- keycloak_user_federation - fix the ``UnboundLocalError`` that occurs when an ID is provided for a user federation mapper (https://github.com/ansible-collections/community.general/pull/8831). -- keycloak_user_federation - get cleartext IDP ``clientSecret`` from full realm info to detect changes to it (https://github.com/ansible-collections/community.general/issues/8294, https://github.com/ansible-collections/community.general/pull/8735). -- keycloak_user_federation - minimize change detection by setting ``krbPrincipalAttribute`` to ``''`` in Keycloak responses if missing (https://github.com/ansible-collections/community.general/pull/8785). -- keycloak_user_federation - remove ``lastSync`` parameter from Keycloak responses to minimize diff/changes (https://github.com/ansible-collections/community.general/pull/8812). -- keycloak_user_federation - remove existing user federation mappers if they are not present in the federation configuration and will not be updated (https://github.com/ansible-collections/community.general/issues/7169, https://github.com/ansible-collections/community.general/pull/8695). -- keycloak_user_federation - sort desired and after mapper list by name (analog to before mapper list) to minimize diff and make change detection more accurate (https://github.com/ansible-collections/community.general/pull/8761). -- keycloak_userprofile - fix empty response when fetching userprofile component by removing ``parent=parent_id`` filter (https://github.com/ansible-collections/community.general/pull/8923). -- keycloak_userprofile - improve diff by deserializing the fetched ``kc.user.profile.config`` and serialize it only when sending back (https://github.com/ansible-collections/community.general/pull/8940). -- launched - correctly report changed status in check mode (https://github.com/ansible-collections/community.general/pull/8406). -- locale_gen - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). -- lxd_container - fix bug introduced in previous commit (https://github.com/ansible-collections/community.general/pull/8895, https://github.com/ansible-collections/community.general/issues/8888). -- mksysb - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). -- modprobe - fix check mode not being honored for ``persistent`` option (https://github.com/ansible-collections/community.general/issues/9051, https://github.com/ansible-collections/community.general/pull/9052). -- nsupdate - fix 'index out of range' error when changing NS records by falling back to authority section of the response (https://github.com/ansible-collections/community.general/issues/8612, https://github.com/ansible-collections/community.general/pull/8614). -- one_host - fix if statements for cases when ``ID=0`` (https://github.com/ansible-collections/community.general/issues/1199, https://github.com/ansible-collections/community.general/pull/8907). -- one_image - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). -- one_image_info - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). -- one_service - fix service creation after it was deleted with ``unique`` parameter (https://github.com/ansible-collections/community.general/issues/3137, https://github.com/ansible-collections/community.general/pull/8887). -- one_vnet - fix module failing due to a variable typo (https://github.com/ansible-collections/community.general/pull/9019). -- opennebula inventory plugin - fix invalid reference to IP when inventory runs against NICs with no IPv4 address (https://github.com/ansible-collections/community.general/pull/8489). -- opentelemetry callback - do not save the JSON response when using the ``ansible.builtin.uri`` module (https://github.com/ansible-collections/community.general/pull/8430). -- opentelemetry callback - do not save the content response when using the ``ansible.builtin.slurp`` module (https://github.com/ansible-collections/community.general/pull/8430). -- pam_limits - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925). -- paman - do not fail if an empty list of packages has been provided and there is nothing to do (https://github.com/ansible-collections/community.general/pull/8514). -- pipx - it was ignoring ``global`` when listing existing applications (https://github.com/ansible-collections/community.general/pull/9044). -- pipx module utils - add missing command line formatter for argument ``spec_metadata`` (https://github.com/ansible-collections/community.general/pull/9044). -- pipx_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). -- proxmox - fix idempotency on creation of mount volumes using Proxmox' special ``:`` syntax (https://github.com/ansible-collections/community.general/issues/8407, https://github.com/ansible-collections/community.general/pull/8542). -- proxmox - fixed an issue where the new volume handling incorrectly converted ``null`` values into ``"None"`` strings (https://github.com/ansible-collections/community.general/pull/8646). -- proxmox - fixed an issue where volume strings where overwritten instead of appended to in the new ``build_volume()`` method (https://github.com/ansible-collections/community.general/pull/8646). -- proxmox - removed the forced conversion of non-string values to strings to be consistent with the module documentation (https://github.com/ansible-collections/community.general/pull/8646). -- proxmox inventory plugin - fixed a possible error on concatenating responses from proxmox. In case an API call unexpectedly returned an empty result, the inventory failed with a fatal error. Added check for empty response (https://github.com/ansible-collections/community.general/issues/8798, https://github.com/ansible-collections/community.general/pull/8794). -- python_runner module utils - parameter ``path_prefix`` was being handled as string when it should be a list (https://github.com/ansible-collections/community.general/pull/8944). -- redfish_utils module utils - do not fail when language is not exactly "en" (https://github.com/ansible-collections/community.general/pull/8613). -- redfish_utils module utils - fix issue with URI parsing to gracefully handling trailing slashes when extracting member identifiers (https://github.com/ansible-collections/community.general/issues/9047, https://github.com/ansible-collections/community.general/pull/9057). -- snap - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). -- snap_alias - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). -- udm_user - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4690, https://github.com/ansible-collections/community.general/pull/8987). - -Known Issues ------------- - -- jenkins_node - the module is not able to update offline message when node is already offline due to internally using toggleOffline API (https://github.com/ansible-collections/community.general/pull/9084). - -New Plugins ------------ - -Filter -~~~~~~ - -- community.general.keep_keys - Keep specific keys from dictionaries in a list. -- community.general.remove_keys - Remove specific keys from dictionaries in a list. -- community.general.replace_keys - Replace specific keys in a list of dictionaries. -- community.general.reveal_ansible_type - Return input type. - -Test -~~~~ - -- community.general.ansible_type - Validate input type. - -New Modules ------------ - -- community.general.bootc_manage - Bootc Switch and Upgrade. -- community.general.consul_agent_check - Add, modify, and delete checks within a consul cluster. -- community.general.consul_agent_service - Add, modify and delete services within a consul cluster. -- community.general.django_check - Wrapper for C(django-admin check). -- community.general.django_createcachetable - Wrapper for C(django-admin createcachetable). -- community.general.homebrew_services - Services manager for Homebrew. -- community.general.ipa_getkeytab - Manage keytab file in FreeIPA. -- community.general.jenkins_node - Manage Jenkins nodes. -- community.general.keycloak_component - Allows administration of Keycloak components via Keycloak API. -- community.general.keycloak_realm_keys_metadata_info - Allows obtaining Keycloak realm keys metadata via Keycloak API. -- community.general.keycloak_userprofile - Allows managing Keycloak User Profiles. -- community.general.krb_ticket - Kerberos utils for managing tickets. -- community.general.one_vnet - Manages OpenNebula virtual networks. -- community.general.zypper_repository_info - List Zypper repositories. +This file is a placeholder; a version-specific ``CHANGELOG-vX.rst`` will be generated during releases from fragments +under ``changelogs/fragments``. On release branches once a release has been created, consult the branch's version-specific +file for changes that have occurred in that branch. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 55a7098cc2..94c5299069 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -44,7 +44,49 @@ If you want to test a PR locally, refer to [our testing guide](https://github.co If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. -## Run sanity, unit or integration tests locally +## Run sanity or unit locally (with antsibull-nox) + +The easiest way to run sanity and unit tests locally is to use [antsibull-nox](https://ansible.readthedocs.io/projects/antsibull-nox/). +(If you have [nox](https://nox.thea.codes/en/stable/) installed, it will automatically install antsibull-nox in a virtual environment for you.) + +### Sanity tests + +The following commands show how to run ansible-test sanity tests: + +```.bash +# Run basic sanity tests for all files in the collection: +nox -Re ansible-test-sanity-devel + +# Run basic sanity tests for the given files and directories: +nox -Re ansible-test-sanity-devel -- plugins/modules/system/pids.py tests/integration/targets/pids/ + +# Run all other sanity tests for all files in the collection: +nox -R +``` + +If you replace `-Re` with `-e`, respectively. If you leave `-R` away, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist). + +### Unit tests + +The following commands show how to run unit tests: + +```.bash +# Run all unit tests: +nox -Re ansible-test-units-devel + +# Run all unit tests for one Python version (a lot faster): +nox -Re ansible-test-units-devel -- --python 3.13 + +# Run a specific unit test (for the nmcli module) for one Python version: +nox -Re ansible-test-units-devel -- --python 3.13 tests/unit/plugins/modules/net_tools/test_nmcli.py +``` + +If you replace `-Re` with `-e`, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist). + +## Run basic sanity, unit or integration tests locally (with ansible-test) + +Instead of using antsibull-nox, you can also run sanity and unit tests with ansible-test directly. +This also allows you to run integration tests. You have to check out the repository into a specific path structure to be able to run `ansible-test`. The path to the git checkout must end with `.../ansible_collections/community/general`. Please see [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how to check out the repository into a correct path structure. The short version of these instructions is: @@ -56,20 +98,27 @@ cd ~/dev/ansible_collections/community/general Then you can run `ansible-test` (which is a part of [ansible-core](https://pypi.org/project/ansible-core/)) inside the checkout. The following example commands expect that you have installed Docker or Podman. Note that Podman has only been supported by more recent ansible-core releases. If you are using Docker, the following will work with Ansible 2.9+. -### Sanity tests +### Basic sanity tests -The following commands show how to run sanity tests: +The following commands show how to run basic sanity tests: ```.bash -# Run sanity tests for all files in the collection: +# Run basic sanity tests for all files in the collection: ansible-test sanity --docker -v -# Run sanity tests for the given files and directories: +# Run basic sanity tests for the given files and directories: ansible-test sanity --docker -v plugins/modules/system/pids.py tests/integration/targets/pids/ ``` ### Unit tests +Note that for running unit tests, you need to install required collections in the same folder structure that `community.general` is checked out in. +Right now, you need to install [`community.internal_test_tools`](https://github.com/ansible-collections/community.internal_test_tools). +If you want to use the latest version from GitHub, you can run: +``` +git clone https://github.com/ansible-collections/community.internal_test_tools.git ~/dev/ansible_collections/community/internal_test_tools +``` + The following commands show how to run unit tests: ```.bash @@ -85,6 +134,16 @@ ansible-test units --docker -v --python 3.8 tests/unit/plugins/modules/net_tools ### Integration tests +Note that for running integration tests, you need to install required collections in the same folder structure that `community.general` is checked out in. +Right now, depending on the test, you need to install [`ansible.posix`](https://github.com/ansible-collections/ansible.posix), [`community.crypto`](https://github.com/ansible-collections/community.crypto), and [`community.docker`](https://github.com/ansible-collections/community.docker): +If you want to use the latest versions from GitHub, you can run: +``` +mkdir -p ~/dev/ansible_collections/ansible +git clone https://github.com/ansible-collections/ansible.posix.git ~/dev/ansible_collections/ansible/posix +git clone https://github.com/ansible-collections/community.crypto.git ~/dev/ansible_collections/community/crypto +git clone https://github.com/ansible-collections/community.docker.git ~/dev/ansible_collections/community/docker +``` + The following commands show how to run integration tests: #### In Docker @@ -92,8 +151,8 @@ The following commands show how to run integration tests: Integration tests on Docker have the following parameters: - `image_name` (required): The name of the Docker image. To get the list of supported Docker images, run `ansible-test integration --help` and look for _target docker images_. -- `test_name` (optional): The name of the integration test. - For modules, this equals the short name of the module; for example, `pacman` in case of `community.general.pacman`. +- `test_name` (optional): The name of the integration test. + For modules, this equals the short name of the module; for example, `pacman` in case of `community.general.pacman`. For plugins, the plugin type is added before the plugin's short name, for example `callback_yaml` for the `community.general.yaml` callback. ```.bash # Test all plugins/modules on fedora40 diff --git a/README.md b/README.md index be57b6bc2f..dbfc8c0f07 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,10 @@ SPDX-License-Identifier: GPL-3.0-or-later # Community General Collection -[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=stable-10)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) -[![EOL CI](https://github.com/ansible-collections/community.general/workflows/EOL%20CI/badge.svg?event=push)](https://github.com/ansible-collections/community.general/actions) +[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/community/general/) +[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) +[![EOL CI](https://github.com/ansible-collections/community.general/actions/workflows/ansible-test.yml/badge.svg?branch=main)](https://github.com/ansible-collections/community.general/actions) +[![Nox CI](https://github.com/ansible-collections/community.general/actions/workflows/nox.yml/badge.svg?branch=main)](https://github.com/ansible-collections/community.general/actions) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general) [![REUSE status](https://api.reuse.software/badge/github.com/ansible-collections/community.general)](https://api.reuse.software/info/github.com/ansible-collections/community.general) @@ -37,7 +39,7 @@ For more information about communication, see the [Ansible communication guide]( ## Tested with Ansible -Tested with the current ansible-core 2.15, ansible-core 2.16, ansible-core 2.17, ansible-core 2.18 releases and the current development version of ansible-core. Ansible-core versions before 2.15.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. +Tested with the current ansible-core 2.16, ansible-core 2.17, ansible-core 2.18, ansible-core 2.19 releases and the current development version of ansible-core. Ansible-core versions before 2.16.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. ## External requirements @@ -116,7 +118,7 @@ See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/ma ## Release notes -See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-10/CHANGELOG.md). +See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.md). ## Roadmap @@ -135,8 +137,8 @@ See [this issue](https://github.com/ansible-collections/community.general/issues This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later. -See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/stable-10/COPYING) for the full text. +See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/main/COPYING) for the full text. -Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/stable-10/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/stable-10/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/stable-10/LICENSES/PSF-2.0.txt). +Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/PSF-2.0.txt). -All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `.reuse/dep5`. This conforms to the [REUSE specification](https://reuse.software/spec/). +All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `REUSE.toml`. This conforms to the [REUSE specification](https://reuse.software/spec/). diff --git a/REUSE.toml b/REUSE.toml new file mode 100644 index 0000000000..ff95bb8217 --- /dev/null +++ b/REUSE.toml @@ -0,0 +1,11 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +version = 1 + +[[annotations]] +path = "changelogs/fragments/**" +precedence = "aggregate" +SPDX-FileCopyrightText = "Ansible Project" +SPDX-License-Identifier = "GPL-3.0-or-later" diff --git a/antsibull-nox.toml b/antsibull-nox.toml new file mode 100644 index 0000000000..c631d3a3af --- /dev/null +++ b/antsibull-nox.toml @@ -0,0 +1,89 @@ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# SPDX-FileCopyrightText: 2025 Felix Fontein + +[collection_sources] +"ansible.posix" = "git+https://github.com/ansible-collections/ansible.posix.git,main" +"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,main" +"community.docker" = "git+https://github.com/ansible-collections/community.docker.git,main" +"community.internal_test_tools" = "git+https://github.com/ansible-collections/community.internal_test_tools.git,main" + +[collection_sources_per_ansible.'2.16'] +# community.crypto's main branch needs ansible-core >= 2.17 +"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,stable-2" + +[sessions] + +[sessions.lint] +run_isort = false +run_black = false +run_flake8 = false +run_pylint = false +run_yamllint = true +yamllint_config = ".yamllint" +# yamllint_config_plugins = ".yamllint-docs" +# yamllint_config_plugins_examples = ".yamllint-examples" +run_mypy = false + +[sessions.docs_check] +validate_collection_refs="all" +codeblocks_restrict_types = [ + "ansible-output", + "console", + "ini", + "json", + "python", + "shell", + "yaml", + "yaml+jinja", + "text", +] +codeblocks_restrict_type_exact_case = true +codeblocks_allow_without_type = false +codeblocks_allow_literal_blocks = false + +[sessions.license_check] + +[sessions.extra_checks] +run_no_unwanted_files = true +no_unwanted_files_module_extensions = [".py"] +no_unwanted_files_yaml_extensions = [".yml"] +run_action_groups = true +run_no_trailing_whitespace = true +no_trailing_whitespace_skip_paths = [ + "tests/integration/targets/iso_extract/files/test.iso", + "tests/integration/targets/java_cert/files/testpkcs.p12", + "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz", + "tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz", + "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz", +] +no_trailing_whitespace_skip_directories = [ + "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/", + "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/", +] + +[[sessions.extra_checks.action_groups_config]] +name = "consul" +pattern = "^consul_.*$" +exclusions = [ + "consul_acl_bootstrap", + "consul_kv", +] +doc_fragment = "community.general.consul.actiongroup_consul" + +[[sessions.extra_checks.action_groups_config]] +name = "keycloak" +pattern = "^keycloak_.*$" +exclusions = [ + "keycloak_realm_info", +] +doc_fragment = "community.general.keycloak.actiongroup_keycloak" + +[sessions.build_import_check] +run_galaxy_importer = true + +[sessions.ansible_test_sanity] +include_devel = true + +[sessions.ansible_test_units] +include_devel = true diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index cd3771b026..f8129d5d73 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,1191 +1,3 @@ --- -ancestor: 9.0.0 -releases: - 10.0.0: - changes: - breaking_changes: - - The collection no longer supports ansible-core 2.13 and ansible-core 2.14. - While most (or even all) modules and plugins might still work with these - versions, they are no longer tested in CI and breakages regarding them will - not be fixed (https://github.com/ansible-collections/community.general/pull/8921). - - cmd_runner module utils - CLI arguments created directly from module parameters - are no longer assigned a default formatter (https://github.com/ansible-collections/community.general/pull/8928). - - irc - the defaults of ``use_tls`` and ``validate_certs`` changed from ``false`` - to ``true`` (https://github.com/ansible-collections/community.general/pull/8918). - - rhsm_repository - the states ``present`` and ``absent`` have been removed. - Use ``enabled`` and ``disabled`` instead (https://github.com/ansible-collections/community.general/pull/8918). - bugfixes: - - bitwarden lookup plugin - fix ``KeyError`` in ``search_field`` (https://github.com/ansible-collections/community.general/issues/8549, - https://github.com/ansible-collections/community.general/pull/8557). - - bitwarden lookup plugin - support BWS v0.3.0 syntax breaking change (https://github.com/ansible-collections/community.general/pull/9028). - - cloudflare_dns - fix changing Cloudflare SRV records (https://github.com/ansible-collections/community.general/issues/8679, - https://github.com/ansible-collections/community.general/pull/8948). - - cmd_runner module utils - call to ``get_best_parsable_locales()`` was missing - parameter (https://github.com/ansible-collections/community.general/pull/8929). - - collection_version lookup plugin - use ``importlib`` directly instead of - the deprecated and in ansible-core 2.19 removed ``ansible.module_utils.compat.importlib`` - (https://github.com/ansible-collections/community.general/pull/9084). - - cpanm - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, - https://github.com/ansible-collections/community.general/pull/8411). - - dig lookup plugin - fix using only the last nameserver specified (https://github.com/ansible-collections/community.general/pull/8970). - - django module utils - use new ``VarDict`` to prevent deprecation warning - (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411). - - django_command - option ``command`` is now split lexically before passed - to underlying PythonRunner (https://github.com/ansible-collections/community.general/pull/8944). - - gconftool2_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, - https://github.com/ansible-collections/community.general/pull/8411). - - git_config - fix behavior of ``state=absent`` if ``value`` is present (https://github.com/ansible-collections/community.general/issues/8436, - https://github.com/ansible-collections/community.general/pull/8452). - - gitlab_group_access_token - fix crash in check mode caused by attempted - access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). - - gitlab_label - update label's color (https://github.com/ansible-collections/community.general/pull/9010). - - gitlab_project - fix ``container_expiration_policy`` not being applied when - creating a new project (https://github.com/ansible-collections/community.general/pull/8790). - - gitlab_project - fix crash caused by old Gitlab projects not having a ``container_expiration_policy`` - attribute (https://github.com/ansible-collections/community.general/pull/8790). - - gitlab_project_access_token - fix crash in check mode caused by attempted - access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796). - - gitlab_runner - fix ``paused`` parameter being ignored (https://github.com/ansible-collections/community.general/pull/8648). - - homebrew - do not fail when brew prints warnings (https://github.com/ansible-collections/community.general/pull/8406, - https://github.com/ansible-collections/community.general/issues/7044). - - homebrew_cask - fix ``upgrade_all`` returns ``changed`` when nothing upgraded - (https://github.com/ansible-collections/community.general/issues/8707, https://github.com/ansible-collections/community.general/pull/8708). - - homectl - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4691, - https://github.com/ansible-collections/community.general/pull/8987). - - hponcfg - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, - https://github.com/ansible-collections/community.general/pull/8411). - - ini_file - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, - https://github.com/ansible-collections/community.general/pull/8925). - - ipa_host - add ``force_create``, fix ``enabled`` and ``disabled`` states - (https://github.com/ansible-collections/community.general/issues/1094, https://github.com/ansible-collections/community.general/pull/8920). - - ipa_hostgroup - fix ``enabled `` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/8408, - https://github.com/ansible-collections/community.general/pull/8900). - - java_keystore - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, - https://github.com/ansible-collections/community.general/pull/8925). - - jenkins_node - fixed ``enabled``, ``disable`` and ``absent`` node state - redirect authorization issues, same as was present for ``present`` (https://github.com/ansible-collections/community.general/pull/9084). - - jenkins_plugin - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, - https://github.com/ansible-collections/community.general/pull/8925). - - kdeconfig - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, - https://github.com/ansible-collections/community.general/pull/8925). - - kernel_blacklist - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, - https://github.com/ansible-collections/community.general/pull/8411). - - keycloak_client - fix TypeError when sanitizing the ``saml.signing.private.key`` - attribute in the module's diff or state output. The ``sanitize_cr`` function - expected a dict where in some cases a list might occur (https://github.com/ansible-collections/community.general/pull/8403). - - keycloak_clientscope - remove IDs from clientscope and its protocol mappers - on comparison for changed check (https://github.com/ansible-collections/community.general/pull/8545). - - keycloak_clientscope_type - fix detect changes in check mode (https://github.com/ansible-collections/community.general/issues/9092, - https://github.com/ansible-collections/community.general/pull/9093). - - "keycloak_group - fix crash caused in subgroup creation. The crash was caused\ - \ by a missing or empty ``subGroups`` property in Keycloak \u226523 (https://github.com/ansible-collections/community.general/issues/8788,\ - \ https://github.com/ansible-collections/community.general/pull/8979)." - - keycloak_realm - add normalizations for ``attributes`` and ``protocol_mappers`` - (https://github.com/ansible-collections/community.general/pull/8496). - - keycloak_realm - fix change detection in check mode by sorting the lists - in the realms beforehand (https://github.com/ansible-collections/community.general/pull/8877). - - keycloak_realm_key - fix invalid usage of ``parent_id`` (https://github.com/ansible-collections/community.general/issues/7850, - https://github.com/ansible-collections/community.general/pull/8823). - - keycloak_user_federation - add module argument allowing users to configure - the update mode for the parameter ``bindCredential`` (https://github.com/ansible-collections/community.general/pull/8898). - - keycloak_user_federation - fix key error when removing mappers during an - update and new mappers are specified in the module args (https://github.com/ansible-collections/community.general/pull/8762). - - keycloak_user_federation - fix the ``UnboundLocalError`` that occurs when - an ID is provided for a user federation mapper (https://github.com/ansible-collections/community.general/pull/8831). - - keycloak_user_federation - get cleartext IDP ``clientSecret`` from full - realm info to detect changes to it (https://github.com/ansible-collections/community.general/issues/8294, - https://github.com/ansible-collections/community.general/pull/8735). - - keycloak_user_federation - minimize change detection by setting ``krbPrincipalAttribute`` - to ``''`` in Keycloak responses if missing (https://github.com/ansible-collections/community.general/pull/8785). - - keycloak_user_federation - remove ``lastSync`` parameter from Keycloak responses - to minimize diff/changes (https://github.com/ansible-collections/community.general/pull/8812). - - keycloak_user_federation - remove existing user federation mappers if they - are not present in the federation configuration and will not be updated - (https://github.com/ansible-collections/community.general/issues/7169, https://github.com/ansible-collections/community.general/pull/8695). - - keycloak_user_federation - sort desired and after mapper list by name (analog - to before mapper list) to minimize diff and make change detection more accurate - (https://github.com/ansible-collections/community.general/pull/8761). - - keycloak_userprofile - fix empty response when fetching userprofile component - by removing ``parent=parent_id`` filter (https://github.com/ansible-collections/community.general/pull/8923). - - keycloak_userprofile - improve diff by deserializing the fetched ``kc.user.profile.config`` - and serialize it only when sending back (https://github.com/ansible-collections/community.general/pull/8940). - - launched - correctly report changed status in check mode (https://github.com/ansible-collections/community.general/pull/8406). - - locale_gen - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, - https://github.com/ansible-collections/community.general/pull/8411). - - lxd_container - fix bug introduced in previous commit (https://github.com/ansible-collections/community.general/pull/8895, - https://github.com/ansible-collections/community.general/issues/8888). - - mksysb - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, - https://github.com/ansible-collections/community.general/pull/8411). - - modprobe - fix check mode not being honored for ``persistent`` option (https://github.com/ansible-collections/community.general/issues/9051, - https://github.com/ansible-collections/community.general/pull/9052). - - nsupdate - fix 'index out of range' error when changing NS records by falling - back to authority section of the response (https://github.com/ansible-collections/community.general/issues/8612, - https://github.com/ansible-collections/community.general/pull/8614). - - one_host - fix if statements for cases when ``ID=0`` (https://github.com/ansible-collections/community.general/issues/1199, - https://github.com/ansible-collections/community.general/pull/8907). - - one_image - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). - - one_image_info - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056). - - one_service - fix service creation after it was deleted with ``unique`` - parameter (https://github.com/ansible-collections/community.general/issues/3137, - https://github.com/ansible-collections/community.general/pull/8887). - - one_vnet - fix module failing due to a variable typo (https://github.com/ansible-collections/community.general/pull/9019). - - opennebula inventory plugin - fix invalid reference to IP when inventory - runs against NICs with no IPv4 address (https://github.com/ansible-collections/community.general/pull/8489). - - opentelemetry callback - do not save the JSON response when using the ``ansible.builtin.uri`` - module (https://github.com/ansible-collections/community.general/pull/8430). - - opentelemetry callback - do not save the content response when using the - ``ansible.builtin.slurp`` module (https://github.com/ansible-collections/community.general/pull/8430). - - pam_limits - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, - https://github.com/ansible-collections/community.general/pull/8925). - - paman - do not fail if an empty list of packages has been provided and there - is nothing to do (https://github.com/ansible-collections/community.general/pull/8514). - - pipx - it was ignoring ``global`` when listing existing applications (https://github.com/ansible-collections/community.general/pull/9044). - - pipx module utils - add missing command line formatter for argument ``spec_metadata`` - (https://github.com/ansible-collections/community.general/pull/9044). - - pipx_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, - https://github.com/ansible-collections/community.general/pull/8411). - - proxmox - fix idempotency on creation of mount volumes using Proxmox' special - ``:`` syntax (https://github.com/ansible-collections/community.general/issues/8407, - https://github.com/ansible-collections/community.general/pull/8542). - - proxmox - fixed an issue where the new volume handling incorrectly converted - ``null`` values into ``"None"`` strings (https://github.com/ansible-collections/community.general/pull/8646). - - proxmox - fixed an issue where volume strings where overwritten instead - of appended to in the new ``build_volume()`` method (https://github.com/ansible-collections/community.general/pull/8646). - - proxmox - removed the forced conversion of non-string values to strings - to be consistent with the module documentation (https://github.com/ansible-collections/community.general/pull/8646). - - proxmox inventory plugin - fixed a possible error on concatenating responses - from proxmox. In case an API call unexpectedly returned an empty result, - the inventory failed with a fatal error. Added check for empty response - (https://github.com/ansible-collections/community.general/issues/8798, https://github.com/ansible-collections/community.general/pull/8794). - - python_runner module utils - parameter ``path_prefix`` was being handled - as string when it should be a list (https://github.com/ansible-collections/community.general/pull/8944). - - redfish_utils module utils - do not fail when language is not exactly "en" - (https://github.com/ansible-collections/community.general/pull/8613). - - redfish_utils module utils - fix issue with URI parsing to gracefully handling - trailing slashes when extracting member identifiers (https://github.com/ansible-collections/community.general/issues/9047, - https://github.com/ansible-collections/community.general/pull/9057). - - snap - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, - https://github.com/ansible-collections/community.general/pull/8411). - - snap_alias - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, - https://github.com/ansible-collections/community.general/pull/8411). - - udm_user - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4690, - https://github.com/ansible-collections/community.general/pull/8987). - deprecated_features: - - CmdRunner module util - setting the value of the ``ignore_none`` parameter - within a ``CmdRunner`` context is deprecated and that feature should be - removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/8479). - - MH decorator cause_changes module utils - deprecate parameters ``on_success`` - and ``on_failure`` (https://github.com/ansible-collections/community.general/pull/8791). - - git_config - the ``list_all`` option has been deprecated and will be removed - in community.general 11.0.0. Use the ``community.general.git_config_info`` - module instead (https://github.com/ansible-collections/community.general/pull/8453). - - git_config - using ``state=present`` without providing ``value`` is deprecated - and will be disallowed in community.general 11.0.0. Use the ``community.general.git_config_info`` - module instead to read a value (https://github.com/ansible-collections/community.general/pull/8453). - - hipchat - the hipchat service has been discontinued and the self-hosted - variant has been End of Life since 2020. The module is therefore deprecated - and will be removed from community.general 11.0.0 if nobody provides compelling - reasons to still keep it (https://github.com/ansible-collections/community.general/pull/8919). - - 'pipx - support for versions of the command line tool ``pipx`` older than - ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 - (https://github.com/ansible-collections/community.general/pull/8793). - - ' - - 'pipx_info - support for versions of the command line tool ``pipx`` older - than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 - (https://github.com/ansible-collections/community.general/pull/8793). - - ' - known_issues: - - jenkins_node - the module is not able to update offline message when node - is already offline due to internally using toggleOffline API (https://github.com/ansible-collections/community.general/pull/9084). - minor_changes: - - CmdRunner module util - argument formats can be specified as plain functions - without calling ``cmd_runner_fmt.as_func()`` (https://github.com/ansible-collections/community.general/pull/8479). - - CmdRunner module utils - the parameter ``force_lang`` now supports the special - value ``auto`` which will automatically try and determine the best parsable - locale in the system (https://github.com/ansible-collections/community.general/pull/8517). - - MH module utils - add parameter ``when`` to ``cause_changes`` decorator - (https://github.com/ansible-collections/community.general/pull/8766). - - MH module utils - minor refactor in decorators (https://github.com/ansible-collections/community.general/pull/8766). - - alternatives - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - ansible_galaxy_install - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9060). - - ansible_galaxy_install - add upgrade feature (https://github.com/ansible-collections/community.general/pull/8431, - https://github.com/ansible-collections/community.general/issues/8351). - - ansible_galaxy_install - minor refactor in the module (https://github.com/ansible-collections/community.general/pull/8413). - - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8833). - - cargo - add option ``directory``, which allows source directory to be specified - (https://github.com/ansible-collections/community.general/pull/8480). - - cgroup_memory_recap, hipchat, jabber, log_plays, loganalytics, logentries, - logstash, slack, splunk, sumologic, syslog_json callback plugins - make - sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8628). - - chef_databag, consul_kv, cyberarkpassword, dsv, etcd, filetree, hiera, onepassword, - onepassword_doc, onepassword_raw, passwordstore, redis, shelvefile, tss - lookup plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8626). - - chroot, funcd, incus, iocage, jail, lxc, lxd, qubes, zone connection plugins - - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8627). - - cmd_runner module utils - add decorator ``cmd_runner_fmt.stack`` (https://github.com/ansible-collections/community.general/pull/8415). - - cmd_runner module utils - refactor argument formatting code to its own Python - module (https://github.com/ansible-collections/community.general/pull/8964). - - cmd_runner_fmt module utils - simplify implementation of ``cmd_runner_fmt.as_bool_not()`` - (https://github.com/ansible-collections/community.general/pull/8512). - - cobbler, linode, lxd, nmap, online, scaleway, stackpath_compute, virtualbox - inventory plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8625). - - consul_acl - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - consul_kv - add argument for the datacenter option on Consul API (https://github.com/ansible-collections/community.general/pull/9026). - - copr - Added ``includepkgs`` and ``excludepkgs`` parameters to limit the - list of packages fetched or excluded from the repository(https://github.com/ansible-collections/community.general/pull/8779). - - cpanm - add return value ``cpanm_version`` (https://github.com/ansible-collections/community.general/pull/9061). - - credstash lookup plugin - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8822). - - csv module utils - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - deco MH module utils - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8822). - - dig lookup plugin - add ``port`` option to specify DNS server port (https://github.com/ansible-collections/community.general/pull/8966). - - django module utils - always retrieve version (https://github.com/ansible-collections/community.general/pull/9063). - - django_check - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). - - django_command - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). - - django_createcachetable - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063). - - doas, dzdo, ksu, machinectl, pbrun, pfexec, pmrun, sesu, sudosu become plugins - - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8623). - - etcd3 - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - flatpak - improve the parsing of Flatpak application IDs based on official - guidelines (https://github.com/ansible-collections/community.general/pull/8909). - - gconftool2 - make use of ``ModuleHelper`` features to simplify code (https://github.com/ansible-collections/community.general/pull/8711). - - gcontool2 - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064). - - gcontool2 module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9064). - - gcontool2_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064). - - gio_mime - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9067). - - gio_mime - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8855). - - gio_mime - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). - - gio_mime module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9067). - - github_app_access_token lookup plugin - adds new ``private_key`` parameter - (https://github.com/ansible-collections/community.general/pull/8989). - - gitlab_deploy_key - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - gitlab_group - add many new parameters (https://github.com/ansible-collections/community.general/pull/8908). - - gitlab_group - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - gitlab_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - gitlab_issue - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - gitlab_merge_request - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - gitlab_project - add option ``container_expiration_policy`` to schedule - container registry cleanup (https://github.com/ansible-collections/community.general/pull/8674). - - gitlab_project - add option ``issues_access_level`` to enable/disable project - issues (https://github.com/ansible-collections/community.general/pull/8760). - - gitlab_project - add option ``model_registry_access_level`` to disable model - registry (https://github.com/ansible-collections/community.general/pull/8688). - - gitlab_project - add option ``pages_access_level`` to disable project pages - (https://github.com/ansible-collections/community.general/pull/8688). - - gitlab_project - add option ``repository_access_level`` to disable project - repository (https://github.com/ansible-collections/community.general/pull/8674). - - gitlab_project - add option ``service_desk_enabled`` to disable service - desk (https://github.com/ansible-collections/community.general/pull/8688). - - gitlab_project - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - gitlab_project - sorted parameters in order to avoid future merge conflicts - (https://github.com/ansible-collections/community.general/pull/8759). - - gitlab_runner - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - hashids filter plugin - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - homebrew - speed up brew install and upgrade (https://github.com/ansible-collections/community.general/pull/9022). - - hwc_ecs_instance - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8822). - - hwc_evs_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - hwc_vpc_eip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - hwc_vpc_peering_connect - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8822). - - hwc_vpc_port - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - hwc_vpc_subnet - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - icinga2_host - replace loop with dict comprehension (https://github.com/ansible-collections/community.general/pull/8876). - - imc_rest - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - ipa_dnsrecord - adds ``SSHFP`` record type for managing SSH fingerprints - in FreeIPA DNS (https://github.com/ansible-collections/community.general/pull/8404). - - ipa_otptoken - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - jenkins_node - add ``offline_message`` parameter for updating a Jenkins - node offline cause reason when the state is "disabled" (offline) (https://github.com/ansible-collections/community.general/pull/9084)." - - jira - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8856). - - jira - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776). - - jira - replace deprecated params when using decorator ``cause_changes`` - (https://github.com/ansible-collections/community.general/pull/8791). - - keep_keys filter plugin - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - keycloak module utils - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8822). - - keycloak_client - add ``client-x509`` choice to ``client_authenticator_type`` - (https://github.com/ansible-collections/community.general/pull/8973). - - keycloak_client - assign auth flow by name (https://github.com/ansible-collections/community.general/pull/8428). - - keycloak_client - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - keycloak_clientscope - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - keycloak_identity_provider - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - keycloak_realm - add boolean toggle to configure organization support for - a given keycloak realm (https://github.com/ansible-collections/community.general/issues/9027, - https://github.com/ansible-collections/community.general/pull/8927/). - - keycloak_user_federation - add module argument allowing users to optout - of the removal of unspecified mappers, for example to keep the keycloak - default mappers (https://github.com/ansible-collections/community.general/pull/8764). - - keycloak_user_federation - add the user federation config parameter ``referral`` - to the module arguments (https://github.com/ansible-collections/community.general/pull/8954). - - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8822). - - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8833). - - linode - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - locale_gen - add support for multiple locales (https://github.com/ansible-collections/community.general/issues/8677, - https://github.com/ansible-collections/community.general/pull/8682). - - lxc_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - lxd_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - manageiq_provider - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - mattermost - adds support for message priority (https://github.com/ansible-collections/community.general/issues/9068, - https://github.com/ansible-collections/community.general/pull/9087). - - memcached, pickle, redis, yaml cache plugins - make sure that all options - are typed (https://github.com/ansible-collections/community.general/pull/8624). - - memset_dns_reload - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - memset_memstore_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - memset_server_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - memset_zone - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - memset_zone_domain - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - memset_zone_record - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876). - - nmcli - add ``conn_enable`` param to reload connection (https://github.com/ansible-collections/community.general/issues/3752, - https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/pull/8897). - - nmcli - add ``state=up`` and ``state=down`` to enable/disable connections - (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, - https://github.com/ansible-collections/community.general/issues/7152, https://github.com/ansible-collections/community.general/pull/8897). - - nmcli - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - npm - add ``force`` parameter to allow ``--force`` (https://github.com/ansible-collections/community.general/pull/8885). - - ocapi_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - one_image - add ``create``, ``template`` and ``datastore_id`` arguments - for image creation (https://github.com/ansible-collections/community.general/pull/9075). - - one_image - add ``wait_timeout`` argument for adjustable timeouts (https://github.com/ansible-collections/community.general/pull/9075). - - one_image - add option ``persistent`` to manage image persistence (https://github.com/ansible-collections/community.general/issues/3578, - https://github.com/ansible-collections/community.general/pull/8889). - - one_image - extend xsd scheme to make it return a lot more info about image - (https://github.com/ansible-collections/community.general/pull/8889). - - one_image - refactor code to make it more similar to ``one_template`` and - ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). - - one_image_info - extend xsd scheme to make it return a lot more info about - image (https://github.com/ansible-collections/community.general/pull/8889). - - one_image_info - refactor code to make it more similar to ``one_template`` - and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889). - - one_service - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - one_vm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - onepassword lookup plugin - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8833). - - open_iscsi - allow login to a portal with multiple targets without specifying - any of them (https://github.com/ansible-collections/community.general/pull/8719). - - openbsd_pkg - adds diff support to show changes in installed package list. - This does not yet work for check mode (https://github.com/ansible-collections/community.general/pull/8402). - - opennebula.py - add VM ``id`` and VM ``host`` to inventory host data (https://github.com/ansible-collections/community.general/pull/8532). - - opentelemetry callback plugin - fix default value for ``store_spans_in_file`` - causing traces to be produced to a file named ``None`` (https://github.com/ansible-collections/community.general/issues/8566, - https://github.com/ansible-collections/community.general/pull/8741). - - opkg - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9086). - - passwordstore lookup plugin - add subkey creation/update support (https://github.com/ansible-collections/community.general/pull/8952). - - passwordstore lookup plugin - add the current user to the lockfile file - name to address issues on multi-user systems (https://github.com/ansible-collections/community.general/pull/8689). - - pids - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - pipx - add parameter ``suffix`` to module (https://github.com/ansible-collections/community.general/pull/8675, - https://github.com/ansible-collections/community.general/issues/8656). - - pipx - added new states ``install_all``, ``uninject``, ``upgrade_shared``, - ``pin``, and ``unpin`` (https://github.com/ansible-collections/community.general/pull/8809). - - pipx - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). - - pipx - refactor out parsing of ``pipx list`` output to module utils (https://github.com/ansible-collections/community.general/pull/9044). - - pipx - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - pipx_info - add new return value ``pinned`` (https://github.com/ansible-collections/community.general/pull/9044). - - pipx_info - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793). - - pipx_info - refactor out parsing of ``pipx list`` output to module utils - (https://github.com/ansible-collections/community.general/pull/9044). - - pipx_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - pkg5_publisher - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - pkgng - add option ``use_globs`` (default ``true``) to optionally disable - glob patterns (https://github.com/ansible-collections/community.general/issues/8632, - https://github.com/ansible-collections/community.general/pull/8633). - - proxmox - add ``disk_volume`` and ``mount_volumes`` keys for better readability - (https://github.com/ansible-collections/community.general/pull/8542). - - proxmox - allow specification of the API port when using proxmox_* (https://github.com/ansible-collections/community.general/issues/8440, - https://github.com/ansible-collections/community.general/pull/8441). - - proxmox - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - proxmox - translate the old ``disk`` and ``mounts`` keys to the new handling - internally (https://github.com/ansible-collections/community.general/pull/8542). - - proxmox inventory plugin - add new fact for LXC interface details (https://github.com/ansible-collections/community.general/pull/8713). - - proxmox inventory plugin - clean up authentication code (https://github.com/ansible-collections/community.general/pull/8917). - - proxmox inventory plugin - fix urllib3 ``InsecureRequestWarnings`` not being - suppressed when a token is used (https://github.com/ansible-collections/community.general/pull/9099). - - proxmox_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - proxmox_kvm - adds the ``ciupgrade`` parameter to specify whether cloud-init - should upgrade system packages at first boot (https://github.com/ansible-collections/community.general/pull/9066). - - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814). - - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - proxmox_template - small refactor in logic for determining whether a template - exists or not (https://github.com/ansible-collections/community.general/pull/8516). - - proxmox_vm_info - add ``network`` option to retrieve current network information - (https://github.com/ansible-collections/community.general/pull/8471). - - redfish_* modules - adds ``ciphers`` option for custom cipher selection - (https://github.com/ansible-collections/community.general/pull/8533). - - redfish_command - add ``UpdateUserAccountTypes`` command (https://github.com/ansible-collections/community.general/issues/9058, - https://github.com/ansible-collections/community.general/pull/9059). - - redfish_command - add ``wait`` and ``wait_timeout`` options to allow a user - to block a command until a service is accessible after performing the requested - command (https://github.com/ansible-collections/community.general/issues/8051, - https://github.com/ansible-collections/community.general/pull/8434). - - redfish_command - add handling of the ``PasswordChangeRequired`` message - from services in the ``UpdateUserPassword`` command to directly modify the - user's password if the requested user is the one invoking the operation - (https://github.com/ansible-collections/community.general/issues/8652, https://github.com/ansible-collections/community.general/pull/8653). - - redfish_confg - remove ``CapacityBytes`` from required paramaters of the - ``CreateVolume`` command (https://github.com/ansible-collections/community.general/pull/8956). - - redfish_config - add parameter ``storage_none_volume_deletion`` to ``CreateVolume`` - command in order to control the automatic deletion of non-RAID volumes (https://github.com/ansible-collections/community.general/pull/8990). - - redfish_info - add command ``CheckAvailability`` to check if a service is - accessible (https://github.com/ansible-collections/community.general/issues/8051, - https://github.com/ansible-collections/community.general/pull/8434). - - redfish_info - adds ``RedfishURI`` and ``StorageId`` to Disk inventory (https://github.com/ansible-collections/community.general/pull/8937). - - redfish_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - redfish_utils module utils - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8822). - - redfish_utils module utils - schedule a BIOS configuration job at next reboot - when the BIOS config is changed (https://github.com/ansible-collections/community.general/pull/9012). - - redis cache plugin - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8833). - - redis, redis_info - add ``client_cert`` and ``client_key`` options to specify - path to certificate for Redis authentication (https://github.com/ansible-collections/community.general/pull/8654). - - redis_info - adds support for getting cluster info (https://github.com/ansible-collections/community.general/pull/8464). - - remove_keys filter plugin - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - replace_keys filter plugin - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - scaleway - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - scaleway module utils - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8822). - - scaleway_compute - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8833). - - scaleway_container - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_container_info - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_container_namespace - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_container_namespace_info - replace Python 2.6 construct with dict - comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_container_registry - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_container_registry_info - replace Python 2.6 construct with dict - comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_function - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_function_info - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_function_namespace - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_function_namespace_info - replace Python 2.6 construct with dict - comprehensions (https://github.com/ansible-collections/community.general/pull/8858). - - scaleway_ip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - scaleway_lb - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - scaleway_security_group - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8822). - - scaleway_security_group - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8833). - - scaleway_user_data - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876). - - scaleway_user_data - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8833). - - sensu_silence - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - snmp_facts - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - sorcery - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833). - - sudosu become plugin - added an option (``alt_method``) to enhance compatibility - with more versions of ``su`` (https://github.com/ansible-collections/community.general/pull/8214). - - udm_dns_record - replace loop with ``dict.update()`` (https://github.com/ansible-collections/community.general/pull/8876). - - ufw - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - unsafe plugin utils - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - vardict module utils - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - vars MH module utils - replace Python 2.6 construct with dict comprehensions - (https://github.com/ansible-collections/community.general/pull/8814). - - virtualbox inventory plugin - expose a new parameter ``enable_advanced_group_parsing`` - to change how the VirtualBox dynamic inventory parses VM groups (https://github.com/ansible-collections/community.general/issues/8508, - https://github.com/ansible-collections/community.general/pull/8510). - - vmadm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822). - - wdc_redfish_command - minor change to handle upgrade file for Redfish WD - platforms (https://github.com/ansible-collections/community.general/pull/8444). - release_summary: This is release 10.0.0 of ``community.general``, released on - 2024-11-04. - removed_features: - - The consul_acl module has been removed. Use community.general.consul_token - and/or community.general.consul_policy instead (https://github.com/ansible-collections/community.general/pull/8921). - - The hipchat callback plugin has been removed. The hipchat service has been - discontinued and the self-hosted variant has been End of Life since 2020 - (https://github.com/ansible-collections/community.general/pull/8921). - - The redhat module utils has been removed (https://github.com/ansible-collections/community.general/pull/8921). - - The rhn_channel module has been removed (https://github.com/ansible-collections/community.general/pull/8921). - - The rhn_register module has been removed (https://github.com/ansible-collections/community.general/pull/8921). - - consul - removed the ``ack_params_state_absent`` option. It had no effect - anymore (https://github.com/ansible-collections/community.general/pull/8918). - - ejabberd_user - removed the ``logging`` option (https://github.com/ansible-collections/community.general/pull/8918). - - gitlab modules - remove basic auth feature (https://github.com/ansible-collections/community.general/pull/8405). - - proxmox_kvm - removed the ``proxmox_default_behavior`` option. Explicitly - specify the old default values if you were using ``proxmox_default_behavior=compatibility``, - otherwise simply remove it (https://github.com/ansible-collections/community.general/pull/8918). - - redhat_subscriptions - removed the ``pool`` option. Use ``pool_ids`` instead - (https://github.com/ansible-collections/community.general/pull/8918). - fragments: - - 10.0.0.yml - - 8051-Redfish-Wait-For-Service.yml - - 8214-sudosu-not-working-on-some-BSD-machines.yml - - 8402-add-diif-mode-openbsd-pkg.yml - - 8403-fix-typeerror-in-keycloak-client.yaml - - 8404-ipa_dnsrecord_sshfp.yml - - 8405-gitlab-remove-basic-auth.yml - - 8406-fix-homebrew-cask-warning.yaml - - 8411-locale-gen-vardict.yml - - 8413-galaxy-refactor.yml - - 8415-cmd-runner-stack.yml - - 8428-assign-auth-flow-by-name-keycloak-client.yaml - - 8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml - - 8431-galaxy-upgrade.yml - - 8440-allow-api-port-specification.yaml - - 8444-fix-redfish-gen2-upgrade.yaml - - 8452-git_config-absent.yml - - 8453-git_config-deprecate-read.yml - - 8464-redis-add-cluster-info.yml - - 8471-proxmox-vm-info-network.yml - - 8476-launchd-check-mode-changed.yaml - - 8479-cmdrunner-improvements.yml - - 8480-directory-feature-cargo.yml - - 8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml - - 8496-keycloak_clientscope-add-normalizations.yaml - - 8508-virtualbox-inventory.yml - - 8512-as-bool-not.yml - - 8514-pacman-empty.yml - - 8516-proxmox-template-refactor.yml - - 8517-cmd-runner-lang-auto.yml - - 8532-expand-opennuebula-inventory-data.yml - - 8533-add-ciphers-option.yml - - 8542-fix-proxmox-volume-handling.yml - - 8545-keycloak-clientscope-remove-id-on-compare.yml - - 8557-fix-bug-with-bitwarden.yml - - 8613-redfish_utils-language.yaml - - 8614-nsupdate-index-out-of-range.yml - - 8623-become-types.yml - - 8624-cache-types.yml - - 8625-inventory-types.yml - - 8626-lookup-types.yml - - 8627-connection-types.yml - - 8628-callback-types.yml - - 8632-pkgng-add-option-use_globs.yml - - 8646-fix-bug-in-proxmox-volumes.yml - - 8648-fix-gitlab-runner-paused.yaml - - 8652-Redfish-Password-Change-Required.yml - - 8654-add-redis-tls-params.yml - - 8674-add-gitlab-project-cleanup-policy.yml - - 8675-pipx-install-suffix.yml - - 8679-fix-cloudflare-srv.yml - - 8682-locale-gen-multiple.yaml - - 8688-gitlab_project-add-new-params.yml - - 8689-passwordstore-lock-naming.yml - - 8695-keycloak_user_federation-mapper-removal.yml - - 8708-homebrew_cask-fix-upgrade-all.yml - - 8711-gconftool2-refactor.yml - - 8713-proxmox_lxc_interfaces.yml - - 8719-openiscsi-add-multiple-targets.yaml - - 8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml - - 8738-limit-packages-for-copr.yml - - 8741-fix-opentelemetry-callback.yml - - 8759-gitlab_project-sort-params.yml - - 8760-gitlab_project-add-issues-access-level.yml - - 8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml - - 8762-keycloac_user_federation-fix-key-error-when-updating.yml - - 8764-keycloak_user_federation-make-mapper-removal-optout.yml - - 8766-mh-deco-improve.yml - - 8776-mute-vardict-deprecation.yml - - 8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml - - 8790-gitlab_project-fix-cleanup-policy-on-project-create.yml - - 8791-mh-cause-changes-param-depr.yml - - 8793-pipx-global.yml - - 8794-Fixing-possible-concatination-error.yaml - - 8796-gitlab-access-token-check-mode.yml - - 8809-pipx-new-params.yml - - 8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml - - 8814-dict-comprehension.yml - - 8822-dict-comprehension.yml - - 8823-keycloak-realm-key.yml - - 8831-fix-error-when-mapper-id-is-provided.yml - - 8833-dict-comprehension.yml - - 8855-gio_mime_vardict.yml - - 8856-jira_vardict.yml - - 8858-dict-comprehension.yml - - 8876-dict-items-loop.yml - - 8877-keycloak_realm-sort-lists-before-change-detection.yaml - - 8885-add-force-flag-for-nmp.yml - - 8887-fix-one_service-unique.yml - - 8889-refactor-one-image-modules.yml - - 8895-fix-comprehension.yaml - - 8897-nmcli-add-reload-and-up-down.yml - - 8898-add-arg-to-exclude-bind-credential-from-change-check.yaml - - 8900-ipa-hostgroup-fix-states.yml - - 8907-fix-one-host-id.yml - - 8908-add-gitlab-group-params.yml - - 8909-flatpak-improve-name-parsing.yaml - - 8917-proxmox-clean-auth.yml - - 8920-ipa-host-fix-state.yml - - 8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml - - 8925-atomic.yml - - 8928-cmd-runner-10.0.0.yml - - 8929-cmd_runner-bugfix.yml - - 8937-add-StorageId-RedfishURI-to-disk-facts.yml - - 8940-keycloak_userprofile-improve-diff.yml - - 8944-django-command-fix.yml - - 8952-password-store-lookup-create-subkey-support.yml - - 8954-keycloak-user-federation-add-referral-parameter.yml - - 8956-remove-capacitybytes-from-the-required-parameters_list.yml - - 8964-cmd-runner-argformat-refactor.yml - - 8966-dig-add-port-option.yml - - 8970-fix-dig-multi-nameservers.yml - - 8973-keycloak_client-add-x509-auth.yml - - 8979-keycloak_group-fix-subgroups.yml - - 8987-legacycrypt.yml - - 8989-github-app-token-from-fact.yml - - 8990.yml - - 9010-edit-gitlab-label-color.yaml - - 9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml - - 9019-onevnet-bugfix.yml - - 9022-improve-homebrew-perf.yml - - 9026-consul_kv-datacenter.yml - - 9027-support-organizations-in-keycloak-realm.yml - - 9028-bitwarden-secrets-manager-syntax-fix.yml - - 9044-pipx-fixes.yml - - 9047-redfish-uri-parsing.yml - - 9052-modprobe-bugfix.yml - - 9056-fix-one_image-modules.yml - - 9059-redfish_command-updateuseraccounttypes.yml - - 9060-ansible-galaxy-install-version.yml - - 9061-cpanm-version.yml - - 9063-django-version.yml - - 9064-gconftool2-version.yml - - 9066-proxmox-kvm-ciupgrade.yml - - 9067-gio-mime-version.yml - - 9075-add-creation-oneimage.yml - - 9084-collection_version-importlib.yml - - 9084-jenkins_node-add-offline-message.yml - - 9086-gio-mime-version.yml - - 9087-mattermost-priority.yaml - - 9092-keycloak-clientscope-type-fix-check-mode.yml - - 9099-proxmox-fix-insecure.yml - - deprecate-hipchat.yml - - deprecations.yml - - removals.yml - modules: - - description: Bootc Switch and Upgrade. - name: bootc_manage - namespace: '' - - description: Add, modify, and delete checks within a consul cluster. - name: consul_agent_check - namespace: '' - - description: Add, modify and delete services within a consul cluster. - name: consul_agent_service - namespace: '' - - description: Wrapper for C(django-admin check). - name: django_check - namespace: '' - - description: Wrapper for C(django-admin createcachetable). - name: django_createcachetable - namespace: '' - - description: Services manager for Homebrew. - name: homebrew_services - namespace: '' - - description: Manage keytab file in FreeIPA. - name: ipa_getkeytab - namespace: '' - - description: Manage Jenkins nodes. - name: jenkins_node - namespace: '' - - description: Allows administration of Keycloak components via Keycloak API. - name: keycloak_component - namespace: '' - - description: Allows obtaining Keycloak realm keys metadata via Keycloak API. - name: keycloak_realm_keys_metadata_info - namespace: '' - - description: Allows managing Keycloak User Profiles. - name: keycloak_userprofile - namespace: '' - - description: Kerberos utils for managing tickets. - name: krb_ticket - namespace: '' - - description: Manages OpenNebula virtual networks. - name: one_vnet - namespace: '' - - description: List Zypper repositories. - name: zypper_repository_info - namespace: '' - plugins: - filter: - - description: Keep specific keys from dictionaries in a list. - name: keep_keys - namespace: null - - description: Remove specific keys from dictionaries in a list. - name: remove_keys - namespace: null - - description: Replace specific keys in a list of dictionaries. - name: replace_keys - namespace: null - - description: Return input type. - name: reveal_ansible_type - namespace: null - test: - - description: Validate input type. - name: ansible_type - namespace: null - release_date: '2024-11-04' - 10.0.1: - changes: - bugfixes: - - keycloak_client - fix diff by removing code that turns the attributes dict - which contains additional settings into a list (https://github.com/ansible-collections/community.general/pull/9077). - - keycloak_clientscope - fix diff and ``end_state`` by removing the code that - turns the attributes dict, which contains additional config items, into - a list (https://github.com/ansible-collections/community.general/pull/9082). - - redfish_utils module utils - remove undocumented default applytime (https://github.com/ansible-collections/community.general/pull/9114). - release_summary: Bugfix release for inclusion in Ansible 11.0.0rc1. - fragments: - - 10.0.1.yml - - 9077-keycloak_client-fix-attributes-dict-turned-into-list.yml - - 9082-keycloak_clientscope-fix-attributes-dict-turned-into-list.yml - - 9114-redfish-utils-update-remove-default-applytime.yml - release_date: '2024-11-11' - 10.1.0: - changes: - bugfixes: - - dnf_config_manager - fix hanging when prompting to import GPG keys (https://github.com/ansible-collections/community.general/pull/9124, - https://github.com/ansible-collections/community.general/issues/8830). - - dnf_config_manager - forces locale to ``C`` before module starts. If the - locale was set to non-English, the output of the ``dnf config-manager`` - could not be parsed (https://github.com/ansible-collections/community.general/pull/9157, - https://github.com/ansible-collections/community.general/issues/9046). - - flatpak - force the locale language to ``C`` when running the flatpak command - (https://github.com/ansible-collections/community.general/pull/9187, https://github.com/ansible-collections/community.general/issues/8883). - - gio_mime - fix command line when determining version of ``gio`` (https://github.com/ansible-collections/community.general/pull/9171, - https://github.com/ansible-collections/community.general/issues/9158). - - github_key - in check mode, a faulty call to ```datetime.strftime(...)``` - was being made which generated an exception (https://github.com/ansible-collections/community.general/issues/9185). - - homebrew_cask - allow ``+`` symbol in Homebrew cask name validation regex - (https://github.com/ansible-collections/community.general/pull/9128). - - keycloak_clientscope_type - sort the default and optional clientscope lists - to improve the diff (https://github.com/ansible-collections/community.general/pull/9202). - - slack - fail if Slack API response is not OK with error message (https://github.com/ansible-collections/community.general/pull/9198). - deprecated_features: - - opkg - deprecate value ``""`` for parameter ``force`` (https://github.com/ansible-collections/community.general/pull/9172). - - redfish_utils module utils - deprecate method ``RedfishUtils._init_session()`` - (https://github.com/ansible-collections/community.general/pull/9190). - minor_changes: - - alternatives - add ``family`` parameter that allows to utilize the ``--family`` - option available in RedHat version of update-alternatives (https://github.com/ansible-collections/community.general/issues/5060, - https://github.com/ansible-collections/community.general/pull/9096). - - cloudflare_dns - add support for ``comment`` and ``tags`` (https://github.com/ansible-collections/community.general/pull/9132). - - deps module utils - add ``deps.clear()`` to clear out previously declared - dependencies (https://github.com/ansible-collections/community.general/pull/9179). - - homebrew - greatly speed up module when multiple packages are passed in - the ``name`` option (https://github.com/ansible-collections/community.general/pull/9181). - - homebrew - remove duplicated package name validation (https://github.com/ansible-collections/community.general/pull/9076). - - iso_extract - adds ``password`` parameter that is passed to 7z (https://github.com/ansible-collections/community.general/pull/9159). - - launchd - add ``plist`` option for services such as sshd, where the plist - filename doesn't match the service name (https://github.com/ansible-collections/community.general/pull/9102). - - nmcli - add ``sriov`` parameter that enables support for SR-IOV settings - (https://github.com/ansible-collections/community.general/pull/9168). - - pipx - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9180). - - pipx_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9180). - - proxmox_template - add server side artifact fetching support (https://github.com/ansible-collections/community.general/pull/9113). - - redfish_command - add ``update_custom_oem_header``, ``update_custom_oem_params``, - and ``update_custom_oem_mime_type`` options (https://github.com/ansible-collections/community.general/pull/9123). - - redfish_utils module utils - remove redundant code (https://github.com/ansible-collections/community.general/pull/9190). - - rpm_ostree_pkg - added the options ``apply_live`` (https://github.com/ansible-collections/community.general/pull/9167). - - rpm_ostree_pkg - added the return value ``needs_reboot`` (https://github.com/ansible-collections/community.general/pull/9167). - - scaleway_lb - minor simplification in the code (https://github.com/ansible-collections/community.general/pull/9189). - - ssh_config - add ``dynamicforward`` option (https://github.com/ansible-collections/community.general/pull/9192). - release_summary: Regular bugfix and feature release. - fragments: - - 10.1.0.yml - - 5932-launchd-plist.yml - - 7402-proxmox-template-support-server-side-artifact-fetching.yaml - - 9076-remove-duplicated-homebrew-package-name-validation.yml - - 9096-alternatives-add-family-parameter.yml - - 9123-redfish-command-custom-oem-params.yml - - 9124-dnf_config_manager.yml - - 9128-homebrew_cask-name-regex-fix.yml - - 9132-cloudflare_dns-comment-and-tags.yml - - 9157-fix-dnf_config_manager-locale.yml - - 9159-iso-extract_add_password.yml - - 9167-rpm_ostree_pkg-apply_live.yml - - 9168-nmcli-add-sriov-parameter.yml - - 9171-gio-mime-fix-version.yml - - 9172-opkg-deprecate-force-none.yml - - 9179-deps-tests.yml - - 9180-pipx-version.yml - - 9181-improve-homebrew-module-performance.yml - - 9186-fix-broken-check-mode-in-github-key.yml - - 9187-flatpak-lang.yml - - 9189-scalway-lb-simplify-return.yml - - 9190-redfish-utils-unused-code.yml - - 9198-fail-if-slack-api-response-is-not-ok-with-error-message.yml - - 9202-keycloak_clientscope_type-sort-lists.yml - - ssh_config_add_dynamicforward_option.yml - modules: - - description: Decompresses compressed files. - name: decompress - namespace: '' - - description: Start a VM backup in Proxmox VE cluster. - name: proxmox_backup - namespace: '' - plugins: - filter: - - description: Produce a list of accumulated sums of the input list contents. - name: accumulate - namespace: null - release_date: '2024-12-02' - 10.2.0: - changes: - bugfixes: - - dig lookup plugin - correctly handle ``NoNameserver`` exception (https://github.com/ansible-collections/community.general/pull/9363, - https://github.com/ansible-collections/community.general/issues/9362). - - homebrew - fix incorrect handling of aliased homebrew modules when the alias - is requested (https://github.com/ansible-collections/community.general/pull/9255, - https://github.com/ansible-collections/community.general/issues/9240). - - htpasswd - report changes when file permissions are adjusted (https://github.com/ansible-collections/community.general/issues/9485, - https://github.com/ansible-collections/community.general/pull/9490). - - proxmox_backup - fix incorrect key lookup in vmid permission check (https://github.com/ansible-collections/community.general/pull/9223). - - proxmox_disk - fix async method and make ``resize_disk`` method handle errors - correctly (https://github.com/ansible-collections/community.general/pull/9256). - - proxmox_template - fix the wrong path called on ``proxmox_template.task_status`` - (https://github.com/ansible-collections/community.general/issues/9276, https://github.com/ansible-collections/community.general/pull/9277). - - qubes connection plugin - fix the printing of debug information (https://github.com/ansible-collections/community.general/pull/9334). - - redfish_utils module utils - Fix ``VerifyBiosAttributes`` command on multi - system resource nodes (https://github.com/ansible-collections/community.general/pull/9234). - deprecated_features: - - atomic_container - module is deprecated and will be removed in community.general - 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487). - - atomic_host - module is deprecated and will be removed in community.general - 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487). - - atomic_image - module is deprecated and will be removed in community.general - 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487). - - facter - module is deprecated and will be removed in community.general 12.0.0, - use ``community.general.facter_facts`` instead (https://github.com/ansible-collections/community.general/pull/9451). - - 'locale_gen - ``ubuntu_mode=True``, or ``mechanism=ubuntu_legacy`` is deprecated - and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9238). - - ' - - pure module utils - the module utils is deprecated and will be removed from - community.general 12.0.0. The modules using this were removed in community.general - 3.0.0 (https://github.com/ansible-collections/community.general/pull/9432). - - purestorage doc fragments - the doc fragment is deprecated and will be removed - from community.general 12.0.0. The modules using this were removed in community.general - 3.0.0 (https://github.com/ansible-collections/community.general/pull/9432). - - sensu_check - module is deprecated and will be removed in community.general - 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483). - - sensu_client - module is deprecated and will be removed in community.general - 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483). - - sensu_handler - module is deprecated and will be removed in community.general - 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483). - - sensu_silence - module is deprecated and will be removed in community.general - 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483). - - sensu_subscription - module is deprecated and will be removed in community.general - 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483). - - slack - the default value ``auto`` of the ``prepend_hash`` option is deprecated - and will change to ``never`` in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/9443). - - yaml callback plugin - deprecate plugin in favor of ``result_format=yaml`` - in plugin ``ansible.bulitin.default`` (https://github.com/ansible-collections/community.general/pull/9456). - minor_changes: - - bitwarden lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - cgroup_memory_recap callback plugin - use f-strings instead of interpolations - or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). - - chef_databag lookup plugin - use f-strings instead of interpolations or - ``format`` (https://github.com/ansible-collections/community.general/pull/9324). - - chroot connection plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - chroot connection plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9322). - - cobbler inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - cobbler inventory plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9323). - - collection_version lookup plugin - use f-strings instead of interpolations - or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). - - consul_kv lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - context_demo callback plugin - use f-strings instead of interpolations or - ``format`` (https://github.com/ansible-collections/community.general/pull/9321). - - counter_enabled callback plugin - use f-strings instead of interpolations - or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). - - credstash lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - cyberarkpassword lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - cyberarkpassword lookup plugin - use f-strings instead of interpolations - or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). - - dense callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - dependent lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - dig lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - dig lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - diy callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - dnstxt lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - dnstxt lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - doas become plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9319). - - dsv lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - dzdo become plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9319). - - elastic callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - etcd lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - etcd3 lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - etcd3 lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - filetree lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - from_csv filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - from_ini filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - funcd connection plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9322). - - github_app_access_token lookup plugin - use f-strings instead of interpolations - or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). - - gitlab_instance_variable - add support for ``raw`` variables suboption (https://github.com/ansible-collections/community.general/pull/9425). - - gitlab_runners inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - gitlab_runners inventory plugin - use f-strings instead of interpolations - or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). - - hiera lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - icinga2 inventory plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9323). - - incus connection plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9322). - - iocage connection plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9322). - - iocage inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - iptables_state action plugin - use f-strings instead of interpolations or - ``format`` (https://github.com/ansible-collections/community.general/pull/9318). - - jabber callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - jail connection plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9322). - - keycloak - add an action group for Keycloak modules to allow ``module_defaults`` - to be set for Keycloak tasks (https://github.com/ansible-collections/community.general/pull/9284). - - keyring lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - ksu become plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9319). - - lastpass lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - linode inventory plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9323). - - lmdb_kv lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - lmdb_kv lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - locale_gen - invert the logic to determine ``ubuntu_mode``, making it look - first for ``/etc/locale.gen`` (set ``ubuntu_mode`` to ``False``) and only - then looking for ``/var/lib/locales/supported.d/`` (set ``ubuntu_mode`` - to ``True``) (https://github.com/ansible-collections/community.general/pull/9238, - https://github.com/ansible-collections/community.general/issues/9131, https://github.com/ansible-collections/community.general/issues/8487). - - 'locale_gen - new return value ``mechanism`` to better express the semantics - of the ``ubuntu_mode``, with the possible values being either ``glibc`` - (``ubuntu_mode=False``) or ``ubuntu_legacy`` (``ubuntu_mode=True``) (https://github.com/ansible-collections/community.general/pull/9238). - - ' - - log_plays callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - loganalytics callback plugin - use f-strings instead of interpolations or - ``format`` (https://github.com/ansible-collections/community.general/pull/9321). - - logdna callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - logentries callback plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - logentries callback plugin - use f-strings instead of interpolations or - ``format`` (https://github.com/ansible-collections/community.general/pull/9321). - - lxc connection plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9322). - - lxd connection plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9322). - - lxd inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - lxd inventory plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9323). - - machinectl become plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9319). - - mail callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - manageiq_alert_profiles - improve handling of parameter requirements (https://github.com/ansible-collections/community.general/pull/9449). - - manifold lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - manifold lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - memcached cache plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9320). - - merge_variables lookup plugin - use f-strings instead of interpolations - or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). - - nmap inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - nmap inventory plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9323). - - nrdp callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - onepassword lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - onepassword lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - onepassword_doc lookup plugin - use f-strings instead of interpolations - or ``format`` (https://github.com/ansible-collections/community.general/pull/9324). - - online inventory plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9323). - - opennebula inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - opennebula inventory plugin - use f-strings instead of interpolations or - ``format`` (https://github.com/ansible-collections/community.general/pull/9323). - - opentelemetry callback plugin - remove code handling Python versions prior - to 3.7 (https://github.com/ansible-collections/community.general/pull/9482). - - opentelemetry callback plugin - remove code handling Python versions prior - to 3.7 (https://github.com/ansible-collections/community.general/pull/9503). - - opentelemetry callback plugin - use f-strings instead of interpolations - or ``format`` (https://github.com/ansible-collections/community.general/pull/9321). - - pacemaker_cluster - remove unused code (https://github.com/ansible-collections/community.general/pull/9471). - - pacemaker_cluster - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/9471). - - passwordstore lookup plugin - use f-strings instead of interpolations or - ``format`` (https://github.com/ansible-collections/community.general/pull/9324). - - pbrun become plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9319). - - pfexec become plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9319). - - pmrun become plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9319). - - proxmox inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - proxmox inventory plugin - strip whitespace from ``user``, ``token_id``, - and ``token_secret`` (https://github.com/ansible-collections/community.general/issues/9227, - https://github.com/ansible-collections/community.general/pull/9228/). - - proxmox inventory plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9323). - - proxmox module utils - add method ``api_task_complete`` that can wait for - task completion and return error message (https://github.com/ansible-collections/community.general/pull/9256). - - proxmox_backup - refactor permission checking to improve code readability - and maintainability (https://github.com/ansible-collections/community.general/pull/9239). - - qubes connection plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9322). - - random_pet lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - redis cache plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - redis cache plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9320). - - redis lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - revbitspss lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - saltstack connection plugin - use f-strings instead of interpolations or - ``format`` (https://github.com/ansible-collections/community.general/pull/9322). - - say callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - scaleway inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - scaleway inventory plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9323). - - selective callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - sesu become plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9319). - - shelvefile lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - shutdown action plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - shutdown action plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9318). - - slack callback plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - slack callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - splunk callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - stackpath_compute inventory plugin - use f-strings instead of interpolations - or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). - - sudosu become plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9319). - - timestamp callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - to_ini filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - tss lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - tss lookup plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9324). - - unixy callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - virtualbox inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379). - - virtualbox inventory plugin - use f-strings instead of interpolations or - ``format`` (https://github.com/ansible-collections/community.general/pull/9323). - - xbps - add ``root`` and ``repository`` options to enable bootstrapping new - void installations (https://github.com/ansible-collections/community.general/pull/9174). - - xen_orchestra inventory plugin - use f-strings instead of interpolations - or ``format`` (https://github.com/ansible-collections/community.general/pull/9323). - - xfconf - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9226). - - xfconf_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9226). - - yaml callback plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9321). - - zone connection plugin - use f-strings instead of interpolations or ``format`` - (https://github.com/ansible-collections/community.general/pull/9322). - - zypper - add ``quiet`` option (https://github.com/ansible-collections/community.general/pull/9270). - - zypper - add ``simple_errors`` option (https://github.com/ansible-collections/community.general/pull/9270). - release_summary: Regular bugfix and feature release. - security_fixes: - - keycloak_authentication - API calls did not properly set the ``priority`` - during update resulting in incorrectly sorted authentication flows. This - apparently only affects Keycloak 25 or newer (https://github.com/ansible-collections/community.general/pull/9263). - fragments: - - 10.2.0.yml - - 9174-xbps-support-rootdir-and-repository.yml - - 9223-proxmox-backup-bugfixes.yml - - 9226-xfconf-version.yml - - 9228-fix-issue-header.yml - - 9234-fix-verify-bios-attributes-multi-system.yml - - 9238-locale-gen-rewrite.yml - - 9239-proxmox-backup-refactor.yml - - 9255-fix-handling-of-aliased-homebrew-packages.yml - - 9256-proxmox_disk-fix-async-method-of-resize_disk.yml - - 9263-kc_authentication-api-priority.yaml - - 9270-zypper-add-simple_errors.yaml - - 9277-proxmox_template-fix-the-wrong-path-called-on-proxmox_template.task_status.yaml - - 9284-add-keycloak-action-group.yml - - 9318-fstr-actionplugins.yml - - 9319-fstr-become-plugins.yml - - 9320-fstr-cache-plugins.yml - - 9321-fstr-callback-plugins.yml - - 9322-fstr-connection-plugins.yml - - 9323-fstr-inventory-plugins.yml - - 9324-fstr-lookup-plugins.yml - - 9334-qubes-conn.yml - - 9363-dig-nonameservers.yml - - 9379-refactor.yml - - 9387-pacemaker-cluster-cmd.yml - - 9425-gitlab-instance-raw-variable.yml - - 9432-deprecate-pure.yml - - 9443-slack-prepend_hash.yml - - 9449-manageiq-alert-profiles-reqs.yml - - 9451-facter-deprecation.yml - - 9456-yaml-callback-deprecation.yml - - 9482-opentelemetry-python-37.yml - - 9483-sensu-deprecation.yml - - 9487-atomic-deprecation.yml - - 9490-htpasswd-permissions.yml - - 9503-opentelemetry-remove-unused-code.yml - modules: - - description: Manages Android SDK packages. - name: android_sdk - namespace: '' - - description: Use the Modify-Increment LDAP V3 feature to increment an attribute - value. - name: ldap_inc - namespace: '' - - description: C(systemd)'s C(systemd-creds decrypt) plugin. - name: systemd_creds_decrypt - namespace: '' - - description: C(systemd)'s C(systemd-creds encrypt) plugin. - name: systemd_creds_encrypt - namespace: '' - plugins: - inventory: - - description: iocage inventory source. - name: iocage - namespace: null - release_date: '2024-12-31' +ancestor: 11.0.0 +releases: {} diff --git a/changelogs/config.yaml b/changelogs/config.yaml index 32ffe27f2b..578b8c3765 100644 --- a/changelogs/config.yaml +++ b/changelogs/config.yaml @@ -7,9 +7,9 @@ changelog_filename_template: ../CHANGELOG.rst changelog_filename_version_depth: 0 changes_file: changelog.yaml changes_format: combined +ignore_other_fragment_extensions: true keep_fragments: false mention_ancestor: true -flatmap: true new_plugins_after_name: removed_features notesdir: fragments output_formats: @@ -40,3 +40,4 @@ use_fqcn: true add_plugin_period: true changelog_nice_yaml: true changelog_sort: version +vcs: auto diff --git a/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml new file mode 100644 index 0000000000..d1cfee7816 --- /dev/null +++ b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml @@ -0,0 +1,7 @@ +deprecated_features: + - pacemaker_cluster - the parameter ``state`` will become a required parameter in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/10227). + +minor_changes: + - pacemaker_cluster - add ``state=maintenance`` for managing pacemaker maintenance mode (https://github.com/ansible-collections/community.general/issues/10200, https://github.com/ansible-collections/community.general/pull/10227). + - pacemaker_cluster - rename ``node`` to ``name`` and add ``node`` alias (https://github.com/ansible-collections/community.general/pull/10227). + - pacemaker_resource - enhance module by removing duplicative code (https://github.com/ansible-collections/community.general/pull/10227). diff --git a/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml new file mode 100644 index 0000000000..eec12e8669 --- /dev/null +++ b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak - add support for ``grant_type=client_credentials`` to all keycloak modules, so that specifying ``auth_client_id`` and ``auth_client_secret`` is sufficient for authentication (https://github.com/ansible-collections/community.general/pull/10231). diff --git a/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml new file mode 100644 index 0000000000..29d71ca393 --- /dev/null +++ b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml @@ -0,0 +1,2 @@ +minor_changes: + - cloudflare_dns - adds support for PTR records (https://github.com/ansible-collections/community.general/pull/10267). diff --git a/changelogs/fragments/10269-cloudflare-dns-refactor.yml b/changelogs/fragments/10269-cloudflare-dns-refactor.yml new file mode 100644 index 0000000000..9f91040d63 --- /dev/null +++ b/changelogs/fragments/10269-cloudflare-dns-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - cloudflare_dns - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10269). diff --git a/changelogs/fragments/10271--disable_lookups.yml b/changelogs/fragments/10271--disable_lookups.yml new file mode 100644 index 0000000000..d28e2ac833 --- /dev/null +++ b/changelogs/fragments/10271--disable_lookups.yml @@ -0,0 +1,3 @@ +bugfixes: + - "icinga2 inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)." + - "linode inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)." diff --git a/changelogs/fragments/10285-fstr-plugins.yml b/changelogs/fragments/10285-fstr-plugins.yml new file mode 100644 index 0000000000..6fff590fee --- /dev/null +++ b/changelogs/fragments/10285-fstr-plugins.yml @@ -0,0 +1,7 @@ +minor_changes: + - dense callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - mail callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - wsl connection plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - jc filter plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - iocage inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - xen_orchestra inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). diff --git a/changelogs/fragments/10311-xfconf-refactor.yml b/changelogs/fragments/10311-xfconf-refactor.yml new file mode 100644 index 0000000000..9d71bd17d8 --- /dev/null +++ b/changelogs/fragments/10311-xfconf-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - xfconf - minor adjustments the the code (https://github.com/ansible-collections/community.general/pull/10311). diff --git a/changelogs/fragments/10323-nmcli-improvements.yml b/changelogs/fragments/10323-nmcli-improvements.yml new file mode 100644 index 0000000000..53436ea7d6 --- /dev/null +++ b/changelogs/fragments/10323-nmcli-improvements.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10323). diff --git a/changelogs/fragments/10328-redundant-brackets.yml b/changelogs/fragments/10328-redundant-brackets.yml new file mode 100644 index 0000000000..f8f74a336c --- /dev/null +++ b/changelogs/fragments/10328-redundant-brackets.yml @@ -0,0 +1,32 @@ +minor_changes: + - logstash callback plugin - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - keycloak module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - python_runner module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - cloudflare_dns - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - crypttab - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - datadog_monitor - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_deploy_key - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_group_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_hook - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_project_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_runner - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - ipa_group - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - jenkins_build - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - jenkins_build_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - nmcli - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_firewall_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_load_balancer - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_monitoring_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - onepassword_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - osx_defaults - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - ovh_ip_loadbalancing_backend - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - packet_device - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - pagerduty - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - pingdom - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - rhevm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - rocketchat - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - sensu_silence - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - sl_vm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - urpmi - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - xattr - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - xml - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). diff --git a/changelogs/fragments/10329-catapult-deprecation.yml b/changelogs/fragments/10329-catapult-deprecation.yml new file mode 100644 index 0000000000..5e5209edda --- /dev/null +++ b/changelogs/fragments/10329-catapult-deprecation.yml @@ -0,0 +1,2 @@ +deprecated_features: + - catapult - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10329). diff --git a/changelogs/fragments/10339-github_app_access_token.yml b/changelogs/fragments/10339-github_app_access_token.yml new file mode 100644 index 0000000000..00cd71f559 --- /dev/null +++ b/changelogs/fragments/10339-github_app_access_token.yml @@ -0,0 +1,2 @@ +bugfixes: + - github_release - support multiple types of GitHub tokens; no longer failing when ``ghs_`` token type is provided (https://github.com/ansible-collections/community.general/issues/10338, https://github.com/ansible-collections/community.general/pull/10339). \ No newline at end of file diff --git a/changelogs/fragments/10349-incus_connection-error-handling.yml b/changelogs/fragments/10349-incus_connection-error-handling.yml new file mode 100644 index 0000000000..b35da354d2 --- /dev/null +++ b/changelogs/fragments/10349-incus_connection-error-handling.yml @@ -0,0 +1,2 @@ +bugfixes: + - incus connection plugin - fix error handling to return more useful Ansible errors to the user (https://github.com/ansible-collections/community.general/issues/10344, https://github.com/ansible-collections/community.general/pull/10349). diff --git a/changelogs/fragments/10359-dependent.yml b/changelogs/fragments/10359-dependent.yml new file mode 100644 index 0000000000..e48a6142e8 --- /dev/null +++ b/changelogs/fragments/10359-dependent.yml @@ -0,0 +1,2 @@ +bugfixes: + - "dependent lookup plugin - avoid deprecated ansible-core 2.19 functionality (https://github.com/ansible-collections/community.general/pull/10359)." diff --git a/changelogs/fragments/10417-sysrc-refactor.yml b/changelogs/fragments/10417-sysrc-refactor.yml new file mode 100644 index 0000000000..b1b5db632b --- /dev/null +++ b/changelogs/fragments/10417-sysrc-refactor.yml @@ -0,0 +1,4 @@ +minor_changes: + - sysrc - adjustments to the code (https://github.com/ansible-collections/community.general/pull/10417). +bugfixes: + - sysrc - fixes parsing with multi-line variables (https://github.com/ansible-collections/community.general/issues/10394, https://github.com/ansible-collections/community.general/pull/10417). \ No newline at end of file diff --git a/changelogs/fragments/10442-apk-fix-empty-names.yml b/changelogs/fragments/10442-apk-fix-empty-names.yml new file mode 100644 index 0000000000..24d68b52df --- /dev/null +++ b/changelogs/fragments/10442-apk-fix-empty-names.yml @@ -0,0 +1,3 @@ +bugfixes: + - apk - handle empty name strings properly + (https://github.com/ansible-collections/community.general/issues/10441, https://github.com/ansible-collections/community.general/pull/10442). \ No newline at end of file diff --git a/changelogs/fragments/10445-cronvar-reject-empty-values.yml b/changelogs/fragments/10445-cronvar-reject-empty-values.yml new file mode 100644 index 0000000000..1bf39619cc --- /dev/null +++ b/changelogs/fragments/10445-cronvar-reject-empty-values.yml @@ -0,0 +1,2 @@ +bugfixes: + - "cronvar - handle empty strings on ``value`` properly (https://github.com/ansible-collections/community.general/issues/10439, https://github.com/ansible-collections/community.general/pull/10445)." diff --git a/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml new file mode 100644 index 0000000000..70af0932b3 --- /dev/null +++ b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml @@ -0,0 +1,2 @@ +bugfixes: + - "listen_port_facts - avoid crash when required commands are missing (https://github.com/ansible-collections/community.general/issues/10457, https://github.com/ansible-collections/community.general/pull/10458)." \ No newline at end of file diff --git a/changelogs/fragments/10459-deprecations.yml b/changelogs/fragments/10459-deprecations.yml new file mode 100644 index 0000000000..4b3f317454 --- /dev/null +++ b/changelogs/fragments/10459-deprecations.yml @@ -0,0 +1,6 @@ +bugfixes: + - "apache2_module - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "htpasswd - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "syspatch - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "sysupgrade - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "zypper_repository - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." diff --git a/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml new file mode 100644 index 0000000000..c4b77299f5 --- /dev/null +++ b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - "cronvar - fix crash on missing ``cron_file`` parent directories (https://github.com/ansible-collections/community.general/issues/10460, https://github.com/ansible-collections/community.general/pull/10461)." diff --git a/changelogs/fragments/10491-irc.yml b/changelogs/fragments/10491-irc.yml new file mode 100644 index 0000000000..74867e71a7 --- /dev/null +++ b/changelogs/fragments/10491-irc.yml @@ -0,0 +1,2 @@ +bugfixes: + - "irc - pass hostname to ``wrap_socket()`` if ``use_tls=true`` and ``validate_certs=true`` (https://github.com/ansible-collections/community.general/issues/10472, https://github.com/ansible-collections/community.general/pull/10491)." diff --git a/changelogs/fragments/10494-rfdn-1.yml b/changelogs/fragments/10494-rfdn-1.yml new file mode 100644 index 0000000000..09a0c442b0 --- /dev/null +++ b/changelogs/fragments/10494-rfdn-1.yml @@ -0,0 +1,27 @@ +minor_changes: + - aerospike_migrations - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - airbrake_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bigpanda - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bootc_manage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bower - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - btrfs_subvolume - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bundler - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - campfire - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - cargo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - catapult - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - cisco_webex - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - consul_kv - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - consul_policy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - copr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - datadog_downtime - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - datadog_monitor - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dconf - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dimensiondata_network - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dimensiondata_vlan - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dnf_config_manager - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dnsmadeeasy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dpkg_divert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - easy_install - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - elasticsearch_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - facter - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - filesystem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). diff --git a/changelogs/fragments/10505-rfdn-2.yml b/changelogs/fragments/10505-rfdn-2.yml new file mode 100644 index 0000000000..89aeab9356 --- /dev/null +++ b/changelogs/fragments/10505-rfdn-2.yml @@ -0,0 +1,39 @@ +minor_changes: + - gem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - git_config_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_deploy_key - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_repo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_webhook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_webhook_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_branch - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_group_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_group_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_hook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_instance_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_label - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_merge_request - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_milestone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - grove - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - hg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew_cask - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew_tap - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - honeybadger_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - htpasswd - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - icinga2_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - influxdb_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ini_file - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_dnsrecord - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_dnszone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_service - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipbase_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipwcli_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - irc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jabber - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_credential - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_job - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). diff --git a/changelogs/fragments/10507-rfdn-3.yml b/changelogs/fragments/10507-rfdn-3.yml new file mode 100644 index 0000000000..fae9d118bc --- /dev/null +++ b/changelogs/fragments/10507-rfdn-3.yml @@ -0,0 +1,35 @@ +minor_changes: + - keycloak_authz_authorization_scope - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_authz_permission - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_userprofile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keyring - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - kibana_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - layman - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - ldap_attrs - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - ldap_inc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - librato_annotation - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lldp - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - logentries - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lxca_cmms - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lxca_nodes - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - macports - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mail - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_alerts - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_policies - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_policies_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_tags - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_tenant - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - matrix - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mattermost - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - maven_artifact - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_dns_reload - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_zone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_zone_record - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mqtt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mssql_db - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mssql_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - netcup_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - newrelic_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - nsupdate - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). diff --git a/changelogs/fragments/10512-rfdn-4.yml b/changelogs/fragments/10512-rfdn-4.yml new file mode 100644 index 0000000000..6d8f9e7d77 --- /dev/null +++ b/changelogs/fragments/10512-rfdn-4.yml @@ -0,0 +1,42 @@ +minor_changes: + - oci_vcn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_image_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_template - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_vnet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - onepassword_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - oneview_fc_network_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - opendj_backendprop - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - ovh_monthly_billing - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty_change - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pam_limits - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pear - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pkgng - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pnpm - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - portage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_org - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_org_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_user_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pubnub_blocks - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pushbullet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pushover - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - redis_data - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - redis_data_incr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - riak - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - rocketchat - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - rollbar_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - say - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - scaleway_database_backup - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sendgrid - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sensu_silence - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sorcery - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - ssh_config - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - statusio_maintenance - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - svr4pkg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - swdepot - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - syslogger - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sysrc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - systemd_creds_decrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - systemd_creds_encrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). diff --git a/changelogs/fragments/10513-rfdn-5.yml b/changelogs/fragments/10513-rfdn-5.yml new file mode 100644 index 0000000000..d930d7345c --- /dev/null +++ b/changelogs/fragments/10513-rfdn-5.yml @@ -0,0 +1,18 @@ +minor_changes: + - taiga_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - twilio - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_aaa_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_ca_host_key_cert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_dns_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_network_interface_address - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_auth_profile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_exception - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_frontend - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_location - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_configuration - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - xbps - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - yarn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - zypper - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - zypper_repository - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). diff --git a/changelogs/fragments/10531-wsl-paramiko.yml b/changelogs/fragments/10531-wsl-paramiko.yml new file mode 100644 index 0000000000..08257d6c78 --- /dev/null +++ b/changelogs/fragments/10531-wsl-paramiko.yml @@ -0,0 +1,3 @@ +bugfixes: + - "wsl connection plugin - avoid deprecated ansible-core paramiko import helper, import paramiko directly instead + (https://github.com/ansible-collections/community.general/issues/10515, https://github.com/ansible-collections/community.general/pull/10531)." diff --git a/changelogs/fragments/9499-typetalk-deprecation.yml b/changelogs/fragments/9499-typetalk-deprecation.yml new file mode 100644 index 0000000000..8323bbe959 --- /dev/null +++ b/changelogs/fragments/9499-typetalk-deprecation.yml @@ -0,0 +1,2 @@ +deprecated_features: + - typetalk - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9499). diff --git a/changelogs/fragments/logstash.yml b/changelogs/fragments/logstash.yml new file mode 100644 index 0000000000..1c7ec89b7d --- /dev/null +++ b/changelogs/fragments/logstash.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - logstash callback plugin - remove reference to Python 2 library (https://github.com/ansible-collections/community.general/pull/10345). diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml index f73d0fe012..4594ab4c2d 100644 --- a/docs/docsite/extra-docs.yml +++ b/docs/docsite/extra-docs.yml @@ -8,9 +8,10 @@ sections: toctree: - filter_guide - test_guide - - title: Cloud Guides + - title: Technology Guides toctree: - guide_alicloud + - guide_iocage - guide_online - guide_packet - guide_scaleway @@ -20,3 +21,4 @@ sections: - guide_vardict - guide_cmdrunner - guide_modulehelper + - guide_uthelper diff --git a/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 b/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 index 77281549ba..64ac1ff0c2 100644 --- a/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 +++ b/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 @@ -36,7 +36,7 @@ gives result: {{ tests.0.result | to_yaml(indent=2) | indent(5) }} - + .. versionadded:: 9.1.0 * The results of the below examples 1-5 are all the same: diff --git a/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 b/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 index 62b25c344c..6c201d5b4e 100644 --- a/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 +++ b/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 @@ -36,7 +36,7 @@ gives result: {{ tests.0.result | to_yaml(indent=2) | indent(5) }} - + .. versionadded:: 9.1.0 * The results of the below examples 1-5 are all the same: diff --git a/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 b/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 index fb0af32f2f..0c0ba8f0be 100644 --- a/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 +++ b/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 @@ -37,7 +37,7 @@ gives result: {{ tests.0.result | to_yaml(indent=2) | indent(5) }} - + .. versionadded:: 9.1.0 * The results of the below examples 1-3 are all the same: diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst index 488cb2ce7d..3549d29ba7 100644 --- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst @@ -44,7 +44,7 @@ gives - {k0_x0: A0, k1_x1: B0} - {k0_x0: A1, k1_x1: B1} - + .. versionadded:: 9.1.0 * The results of the below examples 1-5 are all the same: diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst index 03d4710f3a..4ac87ab79c 100644 --- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst @@ -46,7 +46,7 @@ gives - k2_x2: [C1] k3_x3: bar - + .. versionadded:: 9.1.0 * The results of the below examples 1-5 are all the same: diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst index ba1bcad502..d0eb202bfe 100644 --- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst @@ -53,7 +53,7 @@ gives k2_x2: [C1] k3_x3: bar - + .. versionadded:: 9.1.0 * The results of the below examples 1-3 are all the same: diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst index 42737c44b7..64a82536d8 100644 --- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst @@ -4,7 +4,7 @@ SPDX-License-Identifier: GPL-3.0-or-later .. _ansible_collections.community.general.docsite.filter_guide.filter_guide_abstract_informations.lists_of_dicts: - + Lists of dictionaries ^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/docsite/rst/filter_guide.rst b/docs/docsite/rst/filter_guide.rst index 1c6468ddec..da8a90af3c 100644 --- a/docs/docsite/rst/filter_guide.rst +++ b/docs/docsite/rst/filter_guide.rst @@ -8,7 +8,7 @@ community.general Filter Guide ============================== -The :ref:`community.general collection ` offers several useful filter plugins. +The :anscollection:`community.general collection ` offers several useful filter plugins. .. toctree:: :maxdepth: 2 diff --git a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst index 3059b00321..e5b5bb7e36 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst @@ -26,8 +26,8 @@ You can use the :ansplugin:`community.general.dict_kv filter `_ +* `man iocage `_ +* `Jails and Containers `_ + +.. note:: + The output of the examples is YAML formatted. See the option :ansopt:`ansible.builtin.default#callback:result_format`. + +.. toctree:: + :caption: Table of Contents + :maxdepth: 1 + + guide_iocage_inventory_basics + guide_iocage_inventory_dhcp + guide_iocage_inventory_hooks + guide_iocage_inventory_properties + guide_iocage_inventory_tags + guide_iocage_inventory_aliases diff --git a/docs/docsite/rst/guide_iocage_inventory_aliases.rst b/docs/docsite/rst/guide_iocage_inventory_aliases.rst new file mode 100644 index 0000000000..431403d733 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_aliases.rst @@ -0,0 +1,200 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_aliases: + +Aliases +------- + +Quoting :ref:`inventory_aliases`: + + The ``inventory_hostname`` is the unique identifier for a host in Ansible, this can be an IP or a hostname, but also just an 'alias' or short name for the host. + +As root at the iocage host, stop and destroy all jails: + +.. code-block:: console + + shell> iocage stop ALL + * Stopping srv_1 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1000 OK + + Removing jail process OK + + Executing poststop OK + * Stopping srv_2 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1001 OK + + Removing jail process OK + + Executing poststop OK + * Stopping srv_3 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1002 OK + + Removing jail process OK + + Executing poststop OK + ansible_client is not running! + + shell> iocage destroy -f srv_1 srv_2 srv_3 + Destroying srv_1 + Destroying srv_2 + Destroying srv_3 + +Create three VNET jails with a DHCP interface from the template *ansible_client*. Use the option ``--count``: + +.. code-block:: console + + shell> iocage create --short --template ansible_client --count 3 bpf=1 dhcp=1 vnet=1 + 1c11de2d successfully created! + 9d94cc9e successfully created! + 052b9557 successfully created! + +The names are random. Start the jails: + +.. code-block:: console + + shell> iocage start ALL + No default gateway found for ipv6. + * Starting 052b9557 + + Started OK + + Using devfs_ruleset: 1000 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.137/24 + No default gateway found for ipv6. + * Starting 1c11de2d + + Started OK + + Using devfs_ruleset: 1001 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.146/24 + No default gateway found for ipv6. + * Starting 9d94cc9e + + Started OK + + Using devfs_ruleset: 1002 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.115/24 + Please convert back to a jail before trying to start ansible_client + +List the jails: + +.. code-block:: console + + shell> iocage list -l + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+==========+======+=======+======+=================+====================+=====+================+==========+ + | 207 | 052b9557 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.137 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 208 | 1c11de2d | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.146 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 209 | 9d94cc9e | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.115 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Set notes. The tag *alias* will be used to create inventory aliases: + +.. code-block:: console + + shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_1" 052b9557 + notes: none -> vmm=iocage_02 project=foo alias=srv_1 + shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_2" 1c11de2d + notes: none -> vmm=iocage_02 project=foo alias=srv_2 + shell> iocage set notes="vmm=iocage_02 project=bar alias=srv_3" 9d94cc9e + notes: none -> vmm=iocage_02 project=bar alias=srv_3 + +Update the inventory configuration. Set the option +:ansopt:`community.general.iocage#inventory:inventory_hostname_tag` to :ansval:`alias`. This tag keeps the +value of the alias. The option :ansopt:`community.general.iocage#inventory:get_properties` must be +enabled. For example, ``hosts/02_iocage.yml`` contains: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + inventory_hostname_tag: alias + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + iocage_tags: dict(iocage_properties.notes | split | map('split', '=')) + keyed_groups: + - prefix: vmm + key: iocage_tags.vmm + - prefix: project + key: iocage_tags.project + +Display tags and groups. Create a playbook ``pb-test-groups.yml`` with the following content: + +.. code-block:: yaml+jinja + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_tags + + - debug: + msg: | + {% for group in groups %} + {{ group }}: {{ groups[group] }} + {% endfor %} + run_once: true + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + iocage_tags: + alias: srv_1 + project: foo + vmm: iocage_02 + ok: [srv_2] => + iocage_tags: + alias: srv_2 + project: foo + vmm: iocage_02 + ok: [srv_3] => + iocage_tags: + alias: srv_3 + project: bar + vmm: iocage_02 + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + msg: |- + all: ['srv_1', 'srv_2', 'srv_3'] + ungrouped: [] + vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3'] + project_foo: ['srv_1', 'srv_2'] + project_bar: ['srv_3'] + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_iocage_inventory_basics.rst b/docs/docsite/rst/guide_iocage_inventory_basics.rst new file mode 100644 index 0000000000..f198edc4f4 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_basics.rst @@ -0,0 +1,128 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_basics: + +Basics +------ + +As root at the iocage host, create three VNET jails with a DHCP interface from the template +*ansible_client*: + +.. code-block:: console + + shell> iocage create --template ansible_client --name srv_1 bpf=1 dhcp=1 vnet=1 + srv_1 successfully created! + shell> iocage create --template ansible_client --name srv_2 bpf=1 dhcp=1 vnet=1 + srv_2 successfully created! + shell> iocage create --template ansible_client --name srv_3 bpf=1 dhcp=1 vnet=1 + srv_3 successfully created! + +See: `Configuring a VNET Jail `_. + +As admin at the controller, list the jails: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 iocage list -l + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +======+=======+======+=======+======+=================+====================+=====+================+==========+ + | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Create the inventory file ``hosts/02_iocage.yml`` + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + +Display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + +Optionally, create shared IP jails: + +.. code-block:: console + + shell> iocage create --template ansible_client --name srv_1 ip4_addr="em0|10.1.0.101/24" + srv_1 successfully created! + shell> iocage create --template ansible_client --name srv_2 ip4_addr="em0|10.1.0.102/24" + srv_2 successfully created! + shell> iocage create --template ansible_client --name srv_3 ip4_addr="em0|10.1.0.103/24" + srv_3 successfully created! + shell> iocage list -l + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +======+=======+======+=======+======+=================+===================+=====+================+==========+ + | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.101/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.102/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.103/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + +See: `Configuring a Shared IP Jail `_ + +If iocage needs environment variable(s), use the option :ansopt:`community.general.iocage#inventory:env`. For example, + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 diff --git a/docs/docsite/rst/guide_iocage_inventory_dhcp.rst b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst new file mode 100644 index 0000000000..3c37366ca6 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst @@ -0,0 +1,175 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_dhcp: + +DHCP +---- + +As root at the iocage host, start the jails: + +.. code-block:: console + + shell> iocage start ALL + No default gateway found for ipv6. + * Starting srv_1 + + Started OK + + Using devfs_ruleset: 1000 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.183/24 + No default gateway found for ipv6. + * Starting srv_2 + + Started OK + + Using devfs_ruleset: 1001 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.204/24 + No default gateway found for ipv6. + * Starting srv_3 + + Started OK + + Using devfs_ruleset: 1002 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.169/24 + Please convert back to a jail before trying to start ansible_client + +List the jails: + +.. code-block:: console + + shell> iocage list -l + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+====================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +As admin at the controller, list the jails. The IP4 tab says "... address requires root": + +.. code-block:: console + + shell> ssh admin@10.1.0.73 iocage list -l + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+=========================================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + +Use sudo if enabled: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 sudo iocage list -l + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+====================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Create the inventory file ``hosts/02_iocage.yml``. Use the option +:ansopt:`community.general.iocage#inventory:sudo`: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + sudo: true + +Display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.183 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.183 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '204' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.204 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.204 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '205' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.169 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.169 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '206' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + +Note: If the option :ansopt:`community.general.iocage#inventory:env` is used and :ansopt:`community.general.iocage#inventory:sudo` is enabled, enable also :ansopt:`community.general.iocage#inventory:sudo_preserve_env`. For example, + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + sudo: true + sudo_preserve_env: true + +In this case, make sure the sudo tag ``SETENV`` is used: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 sudo cat /usr/local/etc/sudoers | grep admin + admin ALL=(ALL) NOPASSWD:SETENV: ALL diff --git a/docs/docsite/rst/guide_iocage_inventory_hooks.rst b/docs/docsite/rst/guide_iocage_inventory_hooks.rst new file mode 100644 index 0000000000..45364fc798 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_hooks.rst @@ -0,0 +1,187 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_hooks: + +Hooks +----- + +The iocage utility internally opens a console to a jail to get the jail's DHCP address. This +requires root. If you run the command ``iocage list -l`` as unprivileged user, you'll see the +message ``DHCP (running -- address requires root)``. If you are not granted the root privilege, use +``/etc/dhclient-exit-hooks``. For example, in the jail *srv_1*, create the file +``/zroot/iocage/jails/srv_1/root/etc/dhclient-exit-hooks`` + +.. code-block:: shell + + case "$reason" in + "BOUND"|"REBIND"|"REBOOT"|"RENEW") + echo $new_ip_address > /var/db/dhclient-hook.address.$interface + ;; + esac + +where ``/zroot/iocage`` is the activated pool. + +.. code-block:: console + + shell> zfs list | grep /zroot/iocage + zroot/iocage 4.69G 446G 5.08M /zroot/iocage + zroot/iocage/download 927M 446G 384K /zroot/iocage/download + zroot/iocage/download/14.1-RELEASE 465M 446G 465M /zroot/iocage/download/14.1-RELEASE + zroot/iocage/download/14.2-RELEASE 462M 446G 462M /zroot/iocage/download/14.2-RELEASE + zroot/iocage/images 384K 446G 384K /zroot/iocage/images + zroot/iocage/jails 189M 446G 480K /zroot/iocage/jails + zroot/iocage/jails/srv_1 62.9M 446G 464K /zroot/iocage/jails/srv_1 + zroot/iocage/jails/srv_1/root 62.4M 446G 3.53G /zroot/iocage/jails/srv_1/root + zroot/iocage/jails/srv_2 62.8M 446G 464K /zroot/iocage/jails/srv_2 + zroot/iocage/jails/srv_2/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_2/root + zroot/iocage/jails/srv_3 62.8M 446G 464K /zroot/iocage/jails/srv_3 + zroot/iocage/jails/srv_3/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_3/root + zroot/iocage/log 688K 446G 688K /zroot/iocage/log + zroot/iocage/releases 2.93G 446G 384K /zroot/iocage/releases + zroot/iocage/releases/14.2-RELEASE 2.93G 446G 384K /zroot/iocage/releases/14.2-RELEASE + zroot/iocage/releases/14.2-RELEASE/root 2.93G 446G 2.88G /zroot/iocage/releases/14.2-RELEASE/root + zroot/iocage/templates 682M 446G 416K /zroot/iocage/templates + zroot/iocage/templates/ansible_client 681M 446G 432K /zroot/iocage/templates/ansible_client + zroot/iocage/templates/ansible_client/root 681M 446G 3.53G /zroot/iocage/templates/ansible_client/root + +See: `man dhclient-script `_ + +Create the inventory configuration. Use the option :ansopt:`community.general.iocage#inventory:hooks_results` instead of :ansopt:`community.general.iocage#inventory:sudo`: + +.. code-block:: console + + shell> cat hosts/02_iocage.yml + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + hooks_results: + - /var/db/dhclient-hook.address.epair0b + +.. note:: + + The option :ansopt:`community.general.iocage#inventory:hooks_results` expects the poolname to be mounted to ``/poolname``. For example, if you + activate the pool iocage, this plugin expects to find the :ansopt:`community.general.iocage#inventory:hooks_results` items in the path + /iocage/iocage/jails//root. If you mount the poolname to a different path, the easiest + remedy is to create a symlink. + +As admin at the controller, display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.183 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '204' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.204 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '205' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.169 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '206' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + +Compose the variable ``ansible_host``. For example, ``hosts/02_iocage.yml`` could look like: + +.. code-block:: yaml+jinja + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + +Test the jails. Create a playbook ``pb-test-uname.yml``: + +.. code-block:: yaml + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - command: uname -a + register: out + + - debug: + var: out.stdout + +See: :ref:`working_with_bsd` + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-uname.yml + + PLAY [all] ********************************************************************************************************** + + TASK [command] ****************************************************************************************************** + changed: [srv_3] + changed: [srv_1] + changed: [srv_2] + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + out.stdout: FreeBSD srv-1 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + ok: [srv_3] => + out.stdout: FreeBSD srv-3 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + ok: [srv_2] => + out.stdout: FreeBSD srv-2 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + +Note: This playbook and the inventory configuration works also for the *Shared IP Jails*. diff --git a/docs/docsite/rst/guide_iocage_inventory_properties.rst b/docs/docsite/rst/guide_iocage_inventory_properties.rst new file mode 100644 index 0000000000..d044f2e7f2 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_properties.rst @@ -0,0 +1,201 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_properties: + +Properties +---------- + +Optionally, in the inventory file ``hosts/02_iocage.yml``, get the iocage properties. Enable +:ansopt:`community.general.iocage#inventory:get_properties`: + +.. code-block:: yaml+jinja + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + +Display the properties. Create the playbook ``pb-test-properties.yml``: + +.. code-block:: yaml + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_properties + +Run the playbook. Limit the inventory to *srv_3*: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml -l srv_3 pb-test-properties.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_3] => + iocage_properties: + CONFIG_VERSION: '33' + allow_chflags: '0' + allow_mlock: '0' + allow_mount: '1' + allow_mount_devfs: '0' + allow_mount_fdescfs: '0' + allow_mount_fusefs: '0' + allow_mount_linprocfs: '0' + allow_mount_linsysfs: '0' + allow_mount_nullfs: '0' + allow_mount_procfs: '0' + allow_mount_tmpfs: '0' + allow_mount_zfs: '0' + allow_nfsd: '0' + allow_quotas: '0' + allow_raw_sockets: '0' + allow_set_hostname: '1' + allow_socket_af: '0' + allow_sysvipc: '0' + allow_tun: '0' + allow_vmm: '0' + assign_localhost: '0' + available: readonly + basejail: '0' + boot: '0' + bpf: '1' + children_max: '0' + cloned_release: 14.2-RELEASE + comment: none + compression: 'on' + compressratio: readonly + coredumpsize: 'off' + count: '1' + cpuset: 'off' + cputime: 'off' + datasize: 'off' + dedup: 'off' + defaultrouter: auto + defaultrouter6: auto + depends: none + devfs_ruleset: '4' + dhcp: '1' + enforce_statfs: '2' + exec_clean: '1' + exec_created: /usr/bin/true + exec_fib: '0' + exec_jail_user: root + exec_poststart: /usr/bin/true + exec_poststop: /usr/bin/true + exec_prestart: /usr/bin/true + exec_prestop: /usr/bin/true + exec_start: /bin/sh /etc/rc + exec_stop: /bin/sh /etc/rc.shutdown + exec_system_jail_user: '0' + exec_system_user: root + exec_timeout: '60' + host_domainname: none + host_hostname: srv-3 + host_hostuuid: srv_3 + host_time: '1' + hostid: ea2ba7d1-4fcd-f13f-82e4-8b32c0a03403 + hostid_strict_check: '0' + interfaces: vnet0:bridge0 + ip4: new + ip4_addr: none + ip4_saddrsel: '1' + ip6: new + ip6_addr: none + ip6_saddrsel: '1' + ip_hostname: '0' + jail_zfs: '0' + jail_zfs_dataset: iocage/jails/srv_3/data + jail_zfs_mountpoint: none + last_started: '2025-06-11 04:29:23' + localhost_ip: none + login_flags: -f root + mac_prefix: 02a098 + maxproc: 'off' + memorylocked: 'off' + memoryuse: 'off' + min_dyn_devfs_ruleset: '1000' + mount_devfs: '1' + mount_fdescfs: '1' + mount_linprocfs: '0' + mount_procfs: '0' + mountpoint: readonly + msgqqueued: 'off' + msgqsize: 'off' + nat: '0' + nat_backend: ipfw + nat_forwards: none + nat_interface: none + nat_prefix: '172.16' + nmsgq: 'off' + notes: none + nsem: 'off' + nsemop: 'off' + nshm: 'off' + nthr: 'off' + openfiles: 'off' + origin: readonly + owner: root + pcpu: 'off' + plugin_name: none + plugin_repository: none + priority: '99' + pseudoterminals: 'off' + quota: none + readbps: 'off' + readiops: 'off' + release: 14.2-RELEASE-p3 + reservation: none + resolver: /etc/resolv.conf + rlimits: 'off' + rtsold: '0' + securelevel: '2' + shmsize: 'off' + source_template: ansible_client + stacksize: 'off' + state: up + stop_timeout: '30' + swapuse: 'off' + sync_state: none + sync_target: none + sync_tgt_zpool: none + sysvmsg: new + sysvsem: new + sysvshm: new + template: '0' + type: jail + used: readonly + vmemoryuse: 'off' + vnet: '1' + vnet0_mac: 02a0983da05d 02a0983da05e + vnet0_mtu: auto + vnet1_mac: none + vnet1_mtu: auto + vnet2_mac: none + vnet2_mtu: auto + vnet3_mac: none + vnet3_mtu: auto + vnet_default_interface: auto + vnet_default_mtu: '1500' + vnet_interfaces: none + wallclock: 'off' + writebps: 'off' + writeiops: 'off' + + PLAY RECAP ********************************************************************************************************** + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_iocage_inventory_tags.rst b/docs/docsite/rst/guide_iocage_inventory_tags.rst new file mode 100644 index 0000000000..afb645198c --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_tags.rst @@ -0,0 +1,117 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_tags: + +Tags +---- + +Quoting `man iocage `_ + +.. code-block:: text + + PROPERTIES + ... + notes="any string" + Custom notes for miscellaneous tagging. + Default: none + Source: local + +We will use the format `notes="tag1=value1 tag2=value2 ..."`. + +.. note:: + + The iocage tags have nothing to do with the :ref:`tags`. + +As root at the iocage host, set notes. For example, + +.. code-block:: console + + shell> iocage set notes="vmm=iocage_02 project=foo" srv_1 + notes: none -> vmm=iocage_02 project=foo + shell> iocage set notes="vmm=iocage_02 project=foo" srv_2 + notes: none -> vmm=iocage_02 project=foo + shell> iocage set notes="vmm=iocage_02 project=bar" srv_3 + notes: none -> vmm=iocage_02 project=bar + +Update the inventory configuration. Compose a dictionary *iocage_tags* and create groups. The option +:ansopt:`community.general.iocage#inventory:get_properties` must be enabled. +For example, ``hosts/02_iocage.yml`` could look like: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + iocage_tags: dict(iocage_properties.notes | split | map('split', '=')) + keyed_groups: + - prefix: vmm + key: iocage_tags.vmm + - prefix: project + key: iocage_tags.project + +Display tags and groups. Create a playbook ``pb-test-groups.yml``: + +.. code-block:: yaml+jinja + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_tags + + - debug: + msg: | + {% for group in groups %} + {{ group }}: {{ groups[group] }} + {% endfor %} + run_once: true + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + iocage_tags: + project: foo + vmm: iocage_02 + ok: [srv_2] => + iocage_tags: + project: foo + vmm: iocage_02 + ok: [srv_3] => + iocage_tags: + project: bar + vmm: iocage_02 + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + msg: |- + all: ['srv_1', 'srv_2', 'srv_3'] + ungrouped: [] + vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3'] + project_foo: ['srv_1', 'srv_2'] + project_bar: ['srv_3'] + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_modulehelper.rst b/docs/docsite/rst/guide_modulehelper.rst index e3c7a124cf..12657f4479 100644 --- a/docs/docsite/rst/guide_modulehelper.rst +++ b/docs/docsite/rst/guide_modulehelper.rst @@ -38,7 +38,6 @@ But bear in mind that it does not showcase all of MH's features: ), supports_check_mode=True, ) - use_old_vardict = False def __run__(self): self.vars.original_message = '' @@ -76,13 +75,14 @@ section above, but there are more elements that will take part in it. from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper class MyTest(ModuleHelper): + # behavior for module paramaters ONLY, see below for further information output_params = () change_params = () diff_params = () - facts_name = None facts_params = () - use_old_vardict = True - mute_vardict_deprecation = False + + facts_name = None # used if generating facts, from parameters or otherwise + module = dict( argument_spec=dict(...), # ... @@ -202,27 +202,14 @@ By using ``self.vars``, you get a central mechanism to access the parameters but As described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, variables in ``VarDict`` have metadata associated to them. One of the attributes in that metadata marks the variable for output, and MH makes use of that to generate the module's return values. -.. important:: +.. note:: - The ``VarDict`` feature described was introduced in community.general 7.1.0, but there was a first - implementation of it embedded within ``ModuleHelper``. - That older implementation is now deprecated and will be removed in community.general 11.0.0. - After community.general 7.1.0, MH modules generate a deprecation message about *using the old VarDict*. - There are two ways to prevent that from happening: + The ``VarDict`` class was introduced in community.general 7.1.0, as part of ``ModuleHelper`` itself. + However, it has been factored out to become an utility on its own, described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, + and the older implementation was removed in community.general 11.0.0. - #. Set ``mute_vardict_deprecation = True`` and the deprecation will be silenced. If the module still uses the old ``VarDict``, - it will not be able to update to community.general 11.0.0 (Spring 2026) upon its release. - #. Set ``use_old_vardict = False`` to make the MH module use the new ``VarDict`` immediatelly. - The new ``VarDict`` and its use is documented and this is the recommended way to handle this. - - .. code-block:: python - - class MyTest(ModuleHelper): - use_old_vardict = False - mute_vardict_deprecation = True - ... - - These two settings are mutually exclusive, but that is not enforced and the behavior when setting both is not specified. + Some code might still refer to the class variables ``use_old_vardict`` and ``mute_vardict_deprecation``, used for the transtition to the new + implementation but from community.general 11.0.0 onwards they are no longer used and can be safely removed from the code. Contrary to new variables created in ``VarDict``, module parameters are not set for output by default. If you want to include some module parameters in the output, list them in the ``output_params`` class variable. @@ -233,6 +220,11 @@ If you want to include some module parameters in the output, list them in the `` output_params = ('state', 'name') ... +.. important:: + + The variable names listed in ``output_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + Another neat feature provided by MH by using ``VarDict`` is the automatic tracking of changes when setting the metadata ``change=True``. Again, to enable this feature for module parameters, you must list them in the ``change_params`` class variable. @@ -243,6 +235,11 @@ Again, to enable this feature for module parameters, you must list them in the ` change_params = ('value', ) ... +.. important:: + + The variable names listed in ``change_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + .. seealso:: See more about this in @@ -260,6 +257,11 @@ With that, MH will automatically generate the diff output for variables that hav # example from community.general.gio_mime self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True) +.. important:: + + The variable names listed in ``diff_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + Moreover, if a module is set to return *facts* instead of return values, then again use the metadata ``fact=True`` and ``fact_params`` for module parameters. Additionally, you must specify ``facts_name``, as in: @@ -283,6 +285,11 @@ That generates an Ansible fact like: debug: msg: Volume fact is {{ ansible_facts.volume_facts.volume }} +.. important:: + + The variable names listed in ``fact_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + .. important:: If ``facts_name`` is not set, the module does not generate any facts. @@ -384,7 +391,6 @@ By using ``StateModuleHelper`` you can make your code like the excerpt from the module = dict( ... ) - use_old_vardict = False def __init_module__(self): self.runner = gconftool2_runner(self.module, check_rc=True) @@ -468,6 +474,11 @@ Additionally, MH will also delegate: - ``diff_mode`` to ``self.module._diff`` - ``verbosity`` to ``self.module._verbosity`` +Starting in community.general 10.3.0, MH will also delegate the method ``debug`` to ``self.module``. +If any existing module already has a ``debug`` attribute defined, a warning message will be generated, +requesting it to be renamed. Upon the release of community.general 12.0.0, the delegation will be +preemptive and will override any existing method or property in the subclasses. + Decorators """""""""" diff --git a/docs/docsite/rst/guide_packet.rst b/docs/docsite/rst/guide_packet.rst index 9de5e3f614..95b38dddd0 100644 --- a/docs/docsite/rst/guide_packet.rst +++ b/docs/docsite/rst/guide_packet.rst @@ -67,16 +67,16 @@ The following code block is a simple playbook that creates one `Type 0 - hostnames: myserver - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 + - community.general.packet_device: + project_id: + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 After running ``ansible-playbook playbook_create.yml``, you should have a server provisioned on Packet. You can verify through a CLI or in the `Packet portal `__. @@ -110,10 +110,10 @@ If your playbook acts on existing Packet devices, you can only pass the ``hostna hosts: localhost tasks: - - community.general.packet_device: - project_id: - hostnames: myserver - state: rebooted + - community.general.packet_device: + project_id: + hostnames: myserver + state: rebooted You can also identify specific Packet devices with the ``device_ids`` parameter. The device's UUID can be found in the `Packet Portal `_ or by using a `CLI `_. The following playbook removes a Packet device using the ``device_ids`` field: @@ -125,10 +125,10 @@ You can also identify specific Packet devices with the ``device_ids`` parameter. hosts: localhost tasks: - - community.general.packet_device: - project_id: - device_ids: - state: absent + - community.general.packet_device: + project_id: + device_ids: + state: absent More Complex Playbooks @@ -153,43 +153,43 @@ The following playbook will create an SSH key, 3 Packet servers, and then wait u hosts: localhost tasks: - - community.general.packet_sshkey: - key_file: ./id_rsa.pub - label: new + - community.general.packet_sshkey: + key_file: ./id_rsa.pub + label: new - - community.general.packet_device: - hostnames: [coreos-one, coreos-two, coreos-three] - operating_system: coreos_beta - plan: baremetal_0 - facility: ewr1 - project_id: - wait_for_public_IPv: 4 - user_data: | - #cloud-config - coreos: - etcd2: - discovery: https://discovery.etcd.io/ - advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001 - initial-advertise-peer-urls: http://$private_ipv4:2380 - listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 - listen-peer-urls: http://$private_ipv4:2380 - fleet: - public-ip: $private_ipv4 - units: - - name: etcd2.service - command: start - - name: fleet.service - command: start - register: newhosts + - community.general.packet_device: + hostnames: [coreos-one, coreos-two, coreos-three] + operating_system: coreos_beta + plan: baremetal_0 + facility: ewr1 + project_id: + wait_for_public_IPv: 4 + user_data: | + # cloud-config + coreos: + etcd2: + discovery: https://discovery.etcd.io/ + advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001 + initial-advertise-peer-urls: http://$private_ipv4:2380 + listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 + listen-peer-urls: http://$private_ipv4:2380 + fleet: + public-ip: $private_ipv4 + units: + - name: etcd2.service + command: start + - name: fleet.service + command: start + register: newhosts - - name: wait for ssh - ansible.builtin.wait_for: - delay: 1 - host: "{{ item.public_ipv4 }}" - port: 22 - state: started - timeout: 500 - loop: "{{ newhosts.results[0].devices }}" + - name: wait for ssh + ansible.builtin.wait_for: + delay: 1 + host: "{{ item.public_ipv4 }}" + port: 22 + state: started + timeout: 500 + loop: "{{ newhosts.results[0].devices }}" As with most Ansible modules, the default states of the Packet modules are idempotent, meaning the resources in your project will remain the same after re-runs of a playbook. Thus, we can keep the ``packet_sshkey`` module call in our playbook. If the public key is already in your Packet account, the call will have no effect. diff --git a/docs/docsite/rst/guide_uthelper.rst b/docs/docsite/rst/guide_uthelper.rst new file mode 100644 index 0000000000..657ced66cf --- /dev/null +++ b/docs/docsite/rst/guide_uthelper.rst @@ -0,0 +1,394 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_uthelper: + +UTHelper Guide +============== + +Introduction +^^^^^^^^^^^^ + +``UTHelper`` was written to reduce the boilerplate code used in unit tests for modules. +It was originally written to handle tests of modules that run external commands using ``AnsibleModule.run_command()``. +At the time of writing (Feb 2025) that remains the only type of tests you can use +``UTHelper`` for, but it aims to provide support for other types of interactions. + +Until now, there are many different ways to implement unit tests that validate a module based on the execution of external commands. See some examples: + +* `test_apk.py `_ - A very simple one +* `test_bootc_manage.py `_ - + This one has more test cases, but do notice how the code is repeated amongst them. +* `test_modprobe.py `_ - + This one has 15 tests in it, but to achieve that it declares 8 classes repeating quite a lot of code. + +As you can notice, there is no consistency in the way these tests are executed - +they all do the same thing eventually, but each one is written in a very distinct way. + +``UTHelper`` aims to: + +* provide a consistent idiom to define unit tests +* reduce the code to a bare minimal, and +* define tests as data instead +* allow the test cases definition to be expressed not only as a Python data structure but also as YAML content + +Quickstart +"""""""""" + +To use UTHelper, your test module will need only a bare minimal of code: + +.. code-block:: python + + # tests/unit/plugin/modules/test_ansible_module.py + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + + UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock]) + +Then, in the test specification file, you have: + +.. code-block:: yaml + + # tests/unit/plugin/modules/test_ansible_module.yaml + test_cases: + - id: test_ansible_module + flags: + diff: true + input: + state: present + name: Roger the Shrubber + output: + shrubbery: + looks: nice + price: not too expensive + changed: true + diff: + before: + shrubbery: null + after: + shrubbery: + looks: nice + price: not too expensive + mocks: + run_command: + - command: [/testbin/shrubber, --version] + rc: 0 + out: "2.80.0\n" + err: '' + - command: [/testbin/shrubber, --make-shrubbery] + rc: 0 + out: 'Shrubbery created' + err: '' + +.. note:: + + If you prefer to pick a different YAML file for the test cases, or if you prefer to define them in plain Python, + you can use the convenience methods ``UTHelper.from_file()`` and ``UTHelper.from_spec()``, respectively. + See more details below. + + +Using ``UTHelper`` +^^^^^^^^^^^^^^^^^^ + +Test Module +""""""""""" + +``UTHelper`` is **strictly for unit tests**. To use it, you import the ``.uthelper.UTHelper`` class. +As mentioned in different parts of this guide, there are three different mechanisms to load the test cases. + +.. seealso:: + + See the UTHelper class reference below for API details on the three different mechanisms. + + +The easies and most recommended way of using ``UTHelper`` is literally the example shown. +See a real world example at +`test_gconftool2.py `_. + +The ``from_module()`` method will pick the filename of the test module up (in the example above, ``tests/unit/plugins/modules/test_gconftool2.py``) +and it will search for ``tests/unit/plugins/modules/test_gconftool2.yaml`` (or ``.yml`` if that is not found). +In that file it will expect to find the test specification expressed in YAML format, conforming to the structure described below LINK LINK LINK. + +If you prefer to read the test specifications a different file path, use ``from_file()`` passing the file handle for the YAML file. + +And, if for any reason you prefer or need to pass the data structure rather than dealing with YAML files, use the ``from_spec()`` method. +A real world example for that can be found at +`test_snap.py `_. + + +Test Specification +"""""""""""""""""" + +The structure of the test specification data is described below. + +Top level +--------- + +At the top level there are two accepted keys: + +- ``anchors: dict`` + Optional. Placeholder for you to define YAML anchors that can be repeated in the test cases. + Its contents are never accessed directly by test Helper. +- ``test_cases: list`` + Mandatory. List of test cases, see below for definition. + +Test cases +---------- + +You write the test cases with five elements: + +- ``id: str`` + Mandatory. Used to identify the test case. + +- ``flags: dict`` + Optional. Flags controling the behavior of the test case. All flags are optional. Accepted flags: + + * ``check: bool``: set to ``true`` if the module is to be executed in **check mode**. + * ``diff: bool``: set to ``true`` if the module is to be executed in **diff mode**. + * ``skip: str``: set the test case to be skipped, providing the message for ``pytest.skip()``. + * ``xfail: str``: set the test case to expect failure, providing the message for ``pytest.xfail()``. + +- ``input: dict`` + Optional. Parameters for the Ansible module, it can be empty. + +- ``output: dict`` + Optional. Expected return values from the Ansible module. + All RV names are used here are expected to be found in the module output, but not all RVs in the output must be here. + It can include special RVs such as ``changed`` and ``diff``. + It can be empty. + +- ``mocks: dict`` + Optional. Mocked interactions, ``run_command`` being the only one supported for now. + Each key in this dictionary refers to one subclass of ``TestCaseMock`` and its + structure is dictated by the ``TestCaseMock`` subclass implementation. + All keys are expected to be named using snake case, as in ``run_command``. + The ``TestCaseMock`` subclass is responsible for defining the name used in the test specification. + The structure for that specification is dependent on the implementing class. + See more details below for the implementation of ``RunCommandMock`` + +Example using YAML +------------------ + +We recommend you use ``UTHelper`` reading the test specifications from a YAML file. +See an example below of how one actually looks like (excerpt from ``test_opkg.yaml``): + +.. code-block:: yaml + + --- + anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} + test_cases: + - id: install_zlibdev + input: + name: zlib-dev + state: present + output: + msg: installed 1 package(s) + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, install, zlib-dev] + environ: *env-def + rc: 0 + out: | + Installing zlib-dev (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk + Installing zlib (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk + Configuring zlib. + Configuring zlib-dev. + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + - id: install_zlibdev_present + input: + name: zlib-dev + state: present + output: + msg: package(s) already present + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + +TestCaseMocks Specifications +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``TestCaseMock`` subclass is free to define the expected data structure. + +RunCommandMock Specification +"""""""""""""""""""""""""""" + +``RunCommandMock`` mocks can be specified with the key ``run_command`` and it expects a ``list`` in which elements follow the structure: + +- ``command: Union[list, str]`` + Mandatory. The command that is expected to be executed by the module. It corresponds to the parameter ``args`` of the ``AnsibleModule.run_command()`` call. + It can be either a list or a string, though the list form is generally recommended. +- ``environ: dict`` + Mandatory. All other parameters passed to the ``AnsibleModule.run_command()`` call. + Most commonly used are ``environ_update`` and ``check_rc``. + Must include all parameters the Ansible module uses in the ``AnsibleModule.run_command()`` call, otherwise the test will fail. +- ``rc: int`` + Mandatory. The return code for the command execution. + As per usual in bash scripting, a value of ``0`` means success, whereas any other number is an error code. +- ``out: str`` + Mandatory. The *stdout* result of the command execution, as one single string containing zero or more lines. +- ``err: str`` + Mandatory. The *stderr* result of the command execution, as one single string containing zero or more lines. + + +``UTHelper`` Reference +^^^^^^^^^^^^^^^^^^^^^^ + +.. py:module:: .uthelper + + .. py:class:: UTHelper + + A class to encapsulate unit tests. + + .. py:staticmethod:: from_spec(ansible_module, test_module, test_spec, mocks=None) + + Creates an ``UTHelper`` instance from a given test specification. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: module + :param test_module: The test module. + :type test_module: module + :param test_spec: The test specification. + :type test_spec: dict + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_spec()``: + + .. code-block:: python + + import sys + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + TEST_SPEC = dict( + test_cases=[ + ... + ] + ) + + helper = UTHelper.from_spec(ansible_module, sys.modules[__name__], TEST_SPEC, mocks=[RunCommandMock]) + + .. py:staticmethod:: from_file(ansible_module, test_module, test_spec_filehandle, mocks=None) + + Creates an ``UTHelper`` instance from a test specification file. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: module + :param test_module: The test module. + :type test_module: module + :param test_spec_filehandle: A file handle to an file stream handle providing the test specification in YAML format. + :type test_spec_filehandle: file + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_file()``: + + .. code-block:: python + + import sys + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + with open("test_spec.yaml", "r") as test_spec_filehandle: + helper = UTHelper.from_file(ansible_module, sys.modules[__name__], test_spec_filehandle, mocks=[RunCommandMock]) + + .. py:staticmethod:: from_module(ansible_module, test_module_name, mocks=None) + + Creates an ``UTHelper`` instance from a given Ansible module and test module. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: module + :param test_module_name: The name of the test module. It works if passed ``__name__``. + :type test_module_name: str + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_module()``: + + .. code-block:: python + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + # Example usage + helper = UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock]) + + +Creating TestCaseMocks +^^^^^^^^^^^^^^^^^^^^^^ + +To create a new ``TestCaseMock`` you must extend that class and implement the relevant parts: + +.. code-block:: python + + class ShrubberyMock(TestCaseMock): + # this name is mandatory, it is the name used in the test specification + name = "shrubbery" + + def setup(self, mocker): + # perform setup, commonly using mocker to patch some other piece of code + ... + + def check(self, test_case, results): + # verify the tst execution met the expectations of the test case + # for example the function was called as many times as it should + ... + + def fixtures(self): + # returns a dict mapping names to pytest fixtures that should be used for the test case + # for example, in RunCommandMock it creates a fixture that patches AnsibleModule.get_bin_path + ... + +Caveats +^^^^^^^ + +Known issues/opportunities for improvement: + +* Only one ``UTHelper`` per test module: UTHelper injects a test function with a fixed name into the module's namespace, + so placing a second ``UTHelper`` instance is going to overwrite the function created by the first one. +* Order of elements in module's namespace is not consistent across executions in Python 3.5, so if adding more tests to the test module + might make Test Helper add its function before or after the other test functions. + In the community.general collection the CI processes uses ``pytest-xdist`` to paralellize and distribute the tests, + and it requires the order of the tests to be consistent. + +.. versionadded:: 7.5.0 diff --git a/docs/docsite/rst/guide_vardict.rst b/docs/docsite/rst/guide_vardict.rst index f65b09055b..1beef0c57f 100644 --- a/docs/docsite/rst/guide_vardict.rst +++ b/docs/docsite/rst/guide_vardict.rst @@ -51,7 +51,7 @@ And by the time the module is about to exit: That makes the return value of the module: -.. code-block:: javascript +.. code-block:: json { "abc": 123, diff --git a/docs/docsite/rst/test_guide.rst b/docs/docsite/rst/test_guide.rst index 7a261c7552..a1f5723df4 100644 --- a/docs/docsite/rst/test_guide.rst +++ b/docs/docsite/rst/test_guide.rst @@ -8,7 +8,7 @@ community.general Test (Plugin) Guide ===================================== -The :ref:`community.general collection ` offers currently one test plugin. +The :anscollection:`community.general collection ` offers currently one test plugin. .. contents:: Topics diff --git a/galaxy.yml b/galaxy.yml index 4ff0768938..a39ffcc7e5 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 10.2.0 +version: 11.2.0 readme: README.md authors: - Ansible (https://github.com/ansible) diff --git a/meta/runtime.yml b/meta/runtime.yml index 6e96683059..4efdc68688 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -3,7 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -requires_ansible: '>=2.15.0' +requires_ansible: '>=2.16.0' action_groups: consul: - consul_agent_check @@ -15,23 +15,9 @@ action_groups: - consul_session - consul_token proxmox: - - proxmox - - proxmox_backup - - proxmox_disk - - proxmox_domain_info - - proxmox_group_info - - proxmox_kvm - - proxmox_nic - - proxmox_node_info - - proxmox_pool - - proxmox_pool_member - - proxmox_snap - - proxmox_storage_contents_info - - proxmox_storage_info - - proxmox_tasks_info - - proxmox_template - - proxmox_user_info - - proxmox_vm_info + - metadata: + extend_group: + - community.proxmox.proxmox keycloak: - keycloak_authentication - keycloak_authentication_required_actions @@ -85,18 +71,28 @@ plugin_routing: = yes' option. yaml: deprecation: - removal_version: 13.0.0 - warning_text: The plugin has been superseded by the the option `result_format=yaml` in callback plugin ansible.builtin.default from ansible-core 2.13 onwards. + removal_version: 12.0.0 + warning_text: >- + The plugin has been superseded by the the option `result_format=yaml` in callback plugin ansible.builtin.default from ansible-core 2.13 onwards. connection: docker: redirect: community.docker.docker oc: redirect: community.okd.oc + proxmox_pct_remote: + redirect: community.proxmox.proxmox_pct_remote + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. lookup: gcp_storage_file: redirect: community.google.gcp_storage_file hashi_vault: redirect: community.hashi_vault.hashi_vault + manifold: + tombstone: + removal_version: 11.0.0 + warning_text: Company was acquired in 2021 and service was ceased afterwards. nios: redirect: infoblox.nios_modules.nios_lookup nios_next_ip: @@ -104,176 +100,68 @@ plugin_routing: nios_next_network: redirect: infoblox.nios_modules.nios_next_network modules: - atomic_container: - deprecation: - removal_version: 13.0.0 - warning_text: Poject Atomic was sunset by the end of 2019. - atomic_host: - deprecation: - removal_version: 13.0.0 - warning_text: Poject Atomic was sunset by the end of 2019. - atomic_image: - deprecation: - removal_version: 13.0.0 - warning_text: Poject Atomic was sunset by the end of 2019. - consul_acl: - tombstone: - removal_version: 10.0.0 - warning_text: Use community.general.consul_token and/or community.general.consul_policy instead. - facter: - deprecation: - removal_version: 12.0.0 - warning_text: Use community.general.facter_facts instead. - hipchat: - deprecation: - removal_version: 11.0.0 - warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. - rax_cbs_attachments: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_cbs: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_cdb_database: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_cdb_user: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_cdb: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_clb_nodes: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_clb_ssl: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_clb: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_dns_record: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_dns: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_facts: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_files_objects: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_files: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_identity: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_keypair: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_meta: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_mon_alarm: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_mon_check: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_mon_entity: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_mon_notification_plan: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_mon_notification: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_network: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_queue: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_scaling_group: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rax_scaling_policy: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on the deprecated package pyrax. - rhn_channel: - tombstone: - removal_version: 10.0.0 - warning_text: RHN is EOL, please contact the community.general maintainers - if still using this; see the module documentation for more details. - rhn_register: - tombstone: - removal_version: 10.0.0 - warning_text: RHN is EOL, please contact the community.general maintainers - if still using this; see the module documentation for more details. - sensu_check: - deprecation: - removal_version: 13.0.0 - warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. - sensu_client: - deprecation: - removal_version: 13.0.0 - warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. - sensu_handler: - deprecation: - removal_version: 13.0.0 - warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. - sensu_silence: - deprecation: - removal_version: 13.0.0 - warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. - sensu_subscription: - deprecation: - removal_version: 13.0.0 - warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. - stackdriver: - tombstone: - removal_version: 9.0.0 - warning_text: This module relied on HTTPS APIs that do not exist anymore, - and any new development in the direction of providing an alternative should - happen in the context of the google.cloud collection. ali_instance_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.ali_instance_info instead. + atomic_container: + deprecation: + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. + atomic_host: + deprecation: + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. + atomic_image: + deprecation: + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. + catapult: + deprecation: + removal_version: 13.0.0 + warning_text: DNS fails to resolve the API endpoint used by the module since Oct 2024. See https://github.com/ansible-collections/community.general/issues/10318 for details. cisco_spark: redirect: community.general.cisco_webex + clc_alert_policy: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_blueprint_package: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_firewall_policy: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_group: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_loadbalancer: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_modify_server: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_publicip: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_server: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_server_snapshot: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + consul_acl: + tombstone: + removal_version: 10.0.0 + warning_text: Use community.general.consul_token and/or community.general.consul_policy instead. docker_compose: redirect: community.docker.docker_compose docker_config: @@ -328,6 +216,10 @@ plugin_routing: redirect: community.docker.docker_volume docker_volume_info: redirect: community.docker.docker_volume_info + facter: + deprecation: + removal_version: 12.0.0 + warning_text: Use community.general.facter_facts instead. flowdock: tombstone: removal_version: 9.0.0 @@ -421,6 +313,10 @@ plugin_routing: redirect: community.hrobot.firewall hetzner_firewall_info: redirect: community.hrobot.firewall_info + hipchat: + tombstone: + removal_version: 11.0.0 + warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. hpilo_facts: tombstone: removal_version: 3.0.0 @@ -742,6 +638,116 @@ plugin_routing: redirect: community.postgresql.postgresql_user postgresql_user_obj_stat_info: redirect: community.postgresql.postgresql_user_obj_stat_info + profitbricks: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_datacenter: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_nic: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_volume: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_volume_attachments: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_backup: + redirect: community.proxmox.proxmox_backup + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_backup_info: + redirect: community.proxmox.proxmox_backup_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_disk: + redirect: community.proxmox.proxmox_disk + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_domain_info: + redirect: community.proxmox.proxmox_domain_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_group_info: + redirect: community.proxmox.proxmox_group_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_kvm: + redirect: community.proxmox.proxmox_kvm + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_nic: + redirect: community.proxmox.proxmox_nic + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_node_info: + redirect: community.proxmox.proxmox_node_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_pool: + redirect: community.proxmox.proxmox_pool + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_pool_member: + redirect: community.proxmox.proxmox_pool_member + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_snap: + redirect: community.proxmox.proxmox_snap + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_storage_contents_info: + redirect: community.proxmox.proxmox_storage_contents_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_storage_info: + redirect: community.proxmox.proxmox_storage_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_tasks_info: + redirect: community.proxmox.proxmox_tasks_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_template: + redirect: community.proxmox.proxmox_template + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_user_info: + redirect: community.proxmox.proxmox_user_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_vm_info: + redirect: community.proxmox.proxmox_vm_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. purefa_facts: tombstone: removal_version: 3.0.0 @@ -754,10 +760,122 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use community.general.python_requirements_info instead. + rax_cbs_attachments: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_cbs: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_cdb_database: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_cdb_user: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_cdb: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_clb_nodes: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_clb_ssl: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_clb: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_dns_record: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_dns: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_facts: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_files_objects: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_files: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_identity: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_keypair: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_meta: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_mon_alarm: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_mon_check: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_mon_entity: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_mon_notification_plan: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_mon_notification: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_network: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_queue: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_scaling_group: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_scaling_policy: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. redfish_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.redfish_info instead. + rhn_channel: + tombstone: + removal_version: 10.0.0 + warning_text: RHN is EOL. + rhn_register: + tombstone: + removal_version: 10.0.0 + warning_text: RHN is EOL. sapcar_extract: redirect: community.sap_libs.sapcar_extract sap_task_list_execute: @@ -790,6 +908,26 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_volume_info instead. + sensu_check: + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_client: + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_handler: + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_silence: + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_subscription: + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sf_account_manager: tombstone: removal_version: 2.0.0 @@ -814,6 +952,16 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use community.general.smartos_image_info instead. + stackdriver: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore, + and any new development in the direction of providing an alternative should + happen in the context of the google.cloud collection. + typetalk: + deprecation: + removal_version: 13.0.0 + warning_text: The typetalk service will be discontinued on Dec 2025. vertica_facts: tombstone: removal_version: 3.0.0 @@ -862,6 +1010,11 @@ plugin_routing: redirect: infoblox.nios_modules.nios postgresql: redirect: community.postgresql.postgresql + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. purestorage: deprecation: removal_version: 12.0.0 @@ -890,6 +1043,11 @@ plugin_routing: redirect: infoblox.nios_modules.api postgresql: redirect: community.postgresql.postgresql + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. pure: deprecation: removal_version: 12.0.0 @@ -907,8 +1065,17 @@ plugin_routing: redirect: community.docker.docker_machine docker_swarm: redirect: community.docker.docker_swarm + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. kubevirt: redirect: community.kubevirt.kubevirt + stackpath_compute: + tombstone: + removal_version: 11.0.0 + warning_text: The company and the service were sunset in June 2024. filter: path_join: # The ansible.builtin.path_join filter has been added in ansible-base 2.10. diff --git a/noxfile.py b/noxfile.py new file mode 100644 index 0000000000..9b2f92a9e1 --- /dev/null +++ b/noxfile.py @@ -0,0 +1,38 @@ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# SPDX-FileCopyrightText: 2025 Felix Fontein + +# /// script +# dependencies = ["nox>=2025.02.09", "antsibull-nox"] +# /// + +import sys + +import nox + + +try: + import antsibull_nox +except ImportError: + print("You need to install antsibull-nox in the same Python environment as nox.") + sys.exit(1) + + +antsibull_nox.load_antsibull_nox_toml() + + +@nox.session(name="aliases", python=False, default=True) +def aliases(session: nox.Session) -> None: + session.run("python", "tests/sanity/extra/aliases.py") + + +@nox.session(name="botmeta", default=True) +def botmeta(session: nox.Session) -> None: + session.install("PyYAML", "voluptuous") + session.run("python", "tests/sanity/extra/botmeta.py") + + +# Allow to run the noxfile with `python noxfile.py`, `pipx run noxfile.py`, or similar. +# Requires nox >= 2025.02.09 +if __name__ == "__main__": + nox.main() diff --git a/plugins/action/iptables_state.py b/plugins/action/iptables_state.py index 39ee85d778..595d0ece66 100644 --- a/plugins/action/iptables_state.py +++ b/plugins/action/iptables_state.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import time diff --git a/plugins/action/shutdown.py b/plugins/action/shutdown.py index e5c2d15a5c..d5db878812 100644 --- a/plugins/action/shutdown.py +++ b/plugins/action/shutdown.py @@ -5,9 +5,8 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type from ansible.errors import AnsibleError, AnsibleConnectionFailure from ansible.module_utils.common.text.converters import to_native, to_text diff --git a/plugins/become/doas.py b/plugins/become/doas.py index 9011fa69e9..b7b82a0f80 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -2,8 +2,7 @@ # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: doas @@ -73,7 +72,7 @@ options: prompt_l10n: description: - List of localized strings to match for prompt detection. - - If empty we will use the built in one. + - If empty the plugin uses the built-in one. type: list elements: string default: [] diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py index 70e2e0d777..d890bede09 100644 --- a/plugins/become/dzdo.py +++ b/plugins/become/dzdo.py @@ -2,8 +2,7 @@ # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: dzdo diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index 88a29e7362..be56fd6128 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -2,8 +2,7 @@ # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: ksu @@ -74,7 +73,7 @@ options: prompt_l10n: description: - List of localized strings to match for prompt detection. - - If empty we will use the built in one. + - If empty the plugin uses the built-in one. type: list elements: string default: [] diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py index 1dd80bc80f..4f608939f1 100644 --- a/plugins/become/machinectl.py +++ b/plugins/become/machinectl.py @@ -2,8 +2,7 @@ # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: machinectl @@ -72,10 +71,10 @@ options: - section: machinectl_become_plugin key: password notes: - - When not using this plugin with user V(root), it only works correctly with a polkit rule which will alter the behaviour - of machinectl. This rule must alter the prompt behaviour to ask directly for the user credentials, if the user is allowed - to perform the action (take a look at the examples section). If such a rule is not present the plugin only work if it - is used in context with the root user, because then no further prompt will be shown by machinectl. + - When not using this plugin with user V(root), it only works correctly with a polkit rule which alters the behaviour + of C(machinectl). This rule must alter the prompt behaviour to ask directly for the user credentials, if the user is allowed + to perform the action (take a look at the examples section). If such a rule is not present the plugin only works if it + is used in context with the root user, because then no further prompt is shown by C(machinectl). """ EXAMPLES = r""" diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py index 56f3b2c315..92a49fe349 100644 --- a/plugins/become/pbrun.py +++ b/plugins/become/pbrun.py @@ -2,8 +2,7 @@ # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: pbrun diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py index 62d22bdb61..9faf1ffc63 100644 --- a/plugins/become/pfexec.py +++ b/plugins/become/pfexec.py @@ -2,12 +2,11 @@ # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: pfexec -short_description: profile based execution +short_description: Profile based execution description: - This become plugins allows your remote/login user to execute commands as another user using the C(pfexec) utility. author: Ansible Core Team @@ -15,8 +14,8 @@ options: become_user: description: - User you 'become' to execute the task. - - This plugin ignores this setting as pfexec uses its own C(exec_attr) to figure this out, but it is supplied here - for Ansible to make decisions needed for the task execution, like file permissions. + - This plugin ignores this setting as pfexec uses its own C(exec_attr) to figure this out, but it is supplied here for + Ansible to make decisions needed for the task execution, like file permissions. type: string default: root ini: diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py index 64820ecde5..a2432d92ee 100644 --- a/plugins/become/pmrun.py +++ b/plugins/become/pmrun.py @@ -2,8 +2,7 @@ # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: pmrun diff --git a/plugins/become/run0.py b/plugins/become/run0.py index 0c0d6bfffb..dce7c22448 100644 --- a/plugins/become/run0.py +++ b/plugins/become/run0.py @@ -3,9 +3,8 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" name: run0 @@ -62,7 +61,7 @@ options: - name: ANSIBLE_RUN0_FLAGS type: string notes: - - This plugin will only work when a C(polkit) rule is in place. + - This plugin only works when a C(polkit) rule is in place. """ EXAMPLES = r""" diff --git a/plugins/become/sesu.py b/plugins/become/sesu.py index 6fe64e41f8..cf921e2e47 100644 --- a/plugins/become/sesu.py +++ b/plugins/become/sesu.py @@ -2,8 +2,7 @@ # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: sesu diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py index fe85c9ee91..509b2725df 100644 --- a/plugins/become/sudosu.py +++ b/plugins/become/sudosu.py @@ -2,8 +2,7 @@ # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: sudosu diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index 94cc7058d8..9c4fbec595 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py index 60b1ea74e0..1e9ffcb264 100644 --- a/plugins/cache/pickle.py +++ b/plugins/cache/pickle.py @@ -5,8 +5,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: pickle @@ -18,7 +17,7 @@ options: _uri: required: true description: - - Path in which the cache plugin will save the files. + - Path in which the cache plugin saves the files. env: - name: ANSIBLE_CACHE_PLUGIN_CONNECTION ini: @@ -57,6 +56,7 @@ class CacheModule(BaseFileCacheModule): """ A caching module backed by pickle files. """ + _persistent = False # prevent unnecessary JSON serialization and key munging def _load(self, filepath): # Pickle is a binary format diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 30d5364032..41f69d659f 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -3,8 +3,7 @@ # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py index 88cdad2acb..8bf61f6898 100644 --- a/plugins/cache/yaml.py +++ b/plugins/cache/yaml.py @@ -5,8 +5,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: yaml @@ -18,7 +17,7 @@ options: _uri: required: true description: - - Path in which the cache plugin will save the files. + - Path in which the cache plugin saves the files. env: - name: ANSIBLE_CACHE_PLUGIN_CONNECTION ini: diff --git a/plugins/callback/cgroup_memory_recap.py b/plugins/callback/cgroup_memory_recap.py index 079d1ccd08..b4099eae49 100644 --- a/plugins/callback/cgroup_memory_recap.py +++ b/plugins/callback/cgroup_memory_recap.py @@ -4,8 +4,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py index 96acd2f947..e846aa2786 100644 --- a/plugins/callback/context_demo.py +++ b/plugins/callback/context_demo.py @@ -4,14 +4,13 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) name: context_demo type: aggregate -short_description: demo callback that adds play/task context +short_description: Demo callback that adds play/task context description: - Displays some play and task context along with normal output. - This is mostly for demo purposes. diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py index 845a7823e0..2377d46585 100644 --- a/plugins/callback/counter_enabled.py +++ b/plugins/callback/counter_enabled.py @@ -6,18 +6,17 @@ Counter enabled Ansible callback plugin (See DOCUMENTATION for more information) ''' -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) name: counter_enabled type: stdout -short_description: adds counters to the output items (tasks and hosts/task) +short_description: Adds counters to the output items (tasks and hosts/task) description: - Use this callback when you need a kind of progress bar on a large environments. - - You will know how many tasks has the playbook to run, and which one is actually running. - - You will know how many hosts may run a task, and which of them is actually running. + - You can see how many tasks has the playbook to run, and which one is actually running. + - You can see how many hosts may run a task, and which of them is actually running. extends_documentation_fragment: - default_callback requirements: diff --git a/plugins/callback/default_without_diff.py b/plugins/callback/default_without_diff.py index 8f300d8e4f..3ea55100bf 100644 --- a/plugins/callback/default_without_diff.py +++ b/plugins/callback/default_without_diff.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: default_without_diff @@ -29,7 +28,7 @@ ansible_config: | stdout_callback = community.general.default_without_diff # Enable callback with environment variables: -environment_variable: | +environment_variable: |- ANSIBLE_STDOUT_CALLBACK=community.general.default_without_diff """ diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py index cf1130e3d1..1fd68b5d60 100644 --- a/plugins/callback/dense.py +++ b/plugins/callback/dense.py @@ -4,17 +4,16 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: dense type: stdout -short_description: minimal stdout output +short_description: Minimal stdout output extends_documentation_fragment: - default_callback description: - - When in verbose mode it will act the same as the default callback. + - When in verbose mode it acts the same as the default callback. author: - Dag Wieers (@dagwieers) requirements: @@ -264,12 +263,8 @@ class CallbackModule(CallbackModule_default): sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset) sys.stdout.flush() -# if result._result.get('diff', False): -# sys.stdout.write('\n' + vt100.linewrap) sys.stdout.write(vt100.linewrap) -# self.keep = True - def _display_task_banner(self): if not self.shown_title: self.shown_title = True @@ -313,12 +308,12 @@ class CallbackModule(CallbackModule_default): delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - sys.stdout.write(f"{vt100.reset + result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}") + sys.stdout.write(f"{vt100.reset}{result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}") else: sys.stdout.write(result._host.get_name()) sys.stdout.write(f": {dump}\n") - sys.stdout.write(vt100.reset + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.reset}{vt100.save}{vt100.clearline}") sys.stdout.flush() if status == 'changed': diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py index 5e46563aa4..f84789d010 100644 --- a/plugins/callback/diy.py +++ b/plugins/callback/diy.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: diy @@ -24,15 +23,15 @@ notes: that is available using the other various execution contexts, such as playbook, play, task, and so on so forth. - Options being set by their respective variable input can only be set using the variable if the variable was set in a context that is available to the respective callback. Use the C(ansible_callback_diy) dictionary to see what is available to a - callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output the top level variable names available + callback. Additionally, C(ansible_callback_diy.top_level_var_names) outputs the top level variable names available to the callback. - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For - example, C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}"). - - 'B(Condition) for all C(msg) options: if value C(is None or omit), then the option is not being used. B(Effect): use - of the C(default) callback plugin for output.' - - 'B(Condition) for all C(msg) options: if value C(is not None and not omit and length is not greater than 0), then the + example, V("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}"). + - 'B(Condition) for all C(msg) options: if value V(is None or omit), then the option is not being used. B(Effect): use of + the C(default) callback plugin for output.' + - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is not greater than 0), then the option is being used without output. B(Effect): suppress output.' - - 'B(Condition) for all C(msg) options: if value C(is not None and not omit and length is greater than 0), then the option + - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is greater than 0), then the option is being used with output. B(Effect): render value as template and output.' - 'Valid color values: V(black), V(bright gray), V(blue), V(white), V(green), V(bright blue), V(cyan), V(bright green), V(red), V(bright cyan), V(purple), V(bright red), V(yellow), V(bright purple), V(dark gray), V(bright yellow), V(magenta), @@ -786,6 +785,12 @@ from ansible.vars.manager import VariableManager from ansible.plugins.callback.default import CallbackModule as Default from ansible.module_utils.common.text.converters import to_text +try: + from ansible.template import trust_as_template # noqa: F401, pylint: disable=unused-import + SUPPORTS_DATA_TAGGING = True +except ImportError: + SUPPORTS_DATA_TAGGING = False + class DummyStdout(object): def flush(self): @@ -839,7 +844,10 @@ class CallbackModule(Default): return _ret def _using_diy(self, spec): - return (spec['msg'] is not None) and (spec['msg'] != spec['vars']['omit']) + sentinel = object() + omit = spec['vars'].get('omit', sentinel) + # With Data Tagging, omit is sentinel + return (spec['msg'] is not None) and (spec['msg'] != omit or omit is sentinel) def _parent_has_callback(self): return hasattr(super(CallbackModule, self), sys._getframe(1).f_code.co_name) @@ -895,7 +903,7 @@ class CallbackModule(Default): ) _ret.update(_all) - _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: CallbackDIYDict()})) + _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: {} if SUPPORTS_DATA_TAGGING else CallbackDIYDict()})) _ret[self.DIY_NS].update({'playbook': {}}) _playbook_attributes = ['entries', 'file_name', 'basedir'] diff --git a/plugins/callback/elastic.py b/plugins/callback/elastic.py index 6866e52712..a4b0974f0b 100644 --- a/plugins/callback/elastic.py +++ b/plugins/callback/elastic.py @@ -2,8 +2,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Victor Martinez (@v1v) @@ -88,6 +87,7 @@ from contextlib import closing from os.path import basename from ansible.errors import AnsibleError, AnsibleRuntimeError +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.six import raise_from from ansible.plugins.callback import CallbackBase @@ -141,7 +141,6 @@ class HostData: class ElasticSource(object): def __init__(self, display): self.ansible_playbook = "" - self.ansible_version = None self.session = str(uuid.uuid4()) self.host = socket.gethostname() try: @@ -184,9 +183,6 @@ class ElasticSource(object): task = tasks_data[task_uuid] - if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'): - self.ansible_version = result._task_fields['args'].get('_ansible_version') - task.add_host(HostData(host_uuid, host_name, status, result)) def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name, @@ -210,8 +206,7 @@ class ElasticSource(object): else: apm_cli.begin_transaction("Session", start=parent_start_time) # Populate trace metadata attributes - if self.ansible_version is not None: - label(ansible_version=self.ansible_version) + label(ansible_version=ansible_version) label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user) if self.ip_address is not None: label(ansible_host_ip=self.ip_address) diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index 8f9d7cd833..c5a0881e14 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -4,14 +4,13 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) name: jabber type: notification -short_description: post task events to a Jabber server +short_description: Post task events to a Jabber server description: - The chatty part of ChatOps with a Hipchat server as a target. - This callback plugin sends status updates to a HipChat channel during playbook execution. @@ -37,7 +36,7 @@ options: env: - name: JABBER_PASS to: - description: Chat identifier that will receive the message. + description: Chat identifier that receives the message. type: str required: true env: diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index ed1ed39a72..3de6c0bec0 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -4,14 +4,13 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) name: log_plays type: notification -short_description: write playbook output to log file +short_description: Write playbook output to log file description: - This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory. requirements: @@ -20,7 +19,7 @@ requirements: options: log_folder: default: /var/log/ansible/hosts - description: The folder where log files will be created. + description: The folder where log files are created. type: str env: - name: ANSIBLE_LOG_FOLDER diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index fa891bd10c..bd6b89fde1 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: loganalytics @@ -12,7 +11,7 @@ type: notification short_description: Posts task results to Azure Log Analytics author: "Cyrus Li (@zhcli) " description: - - This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace. + - This callback plugin posts task results in JSON formatted to an Azure Log Analytics workspace. - Credits to authors of splunk callback plugin. version_added: "2.4.0" requirements: @@ -63,6 +62,7 @@ import getpass from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase @@ -76,7 +76,6 @@ class AzureLogAnalyticsSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.user = getpass.getuser() @@ -103,10 +102,6 @@ class AzureLogAnalyticsSource(object): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -120,7 +115,7 @@ class AzureLogAnalyticsSource(object): data['host'] = self.host data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index 35c5b86c1e..9ceb6547b2 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) @@ -12,7 +11,7 @@ name: logdna type: notification short_description: Sends playbook logs to LogDNA description: - - This callback will report logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)). + - This callback reports logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)). requirements: - LogDNA Python Library (U(https://github.com/logdna/python)) - whitelisting in configuration diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index 0b3e2baaf0..796398d6b6 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -3,8 +3,7 @@ # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) @@ -12,7 +11,7 @@ name: logentries type: notification short_description: Sends events to Logentries description: - - This callback plugin will generate JSON objects and send them to Logentries using TCP for auditing/debugging purposes. + - This callback plugin generates JSON objects and send them to Logentries using TCP for auditing/debugging purposes. requirements: - whitelisting in configuration - certifi (Python library) diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index 088a84bf78..8b5acc6b9f 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Yevhen Khmelenko (@ujenmr) @@ -13,7 +12,7 @@ name: logstash type: notification short_description: Sends events to Logstash description: - - This callback will report facts and task events to Logstash U(https://www.elastic.co/products/logstash). + - This callback reports facts and task events to Logstash U(https://www.elastic.co/products/logstash). requirements: - whitelisting in configuration - logstash (Python library) @@ -128,9 +127,7 @@ class CallbackModule(CallbackBase): if not HAS_LOGSTASH: self.disabled = True - self._display.warning("The required python-logstash/python3-logstash is not installed. " - "pip install python-logstash for Python 2" - "pip install python3-logstash for Python 3") + self._display.warning("The required python3-logstash is not installed.") self.start_time = now() @@ -183,7 +180,7 @@ class CallbackModule(CallbackBase): data['status'] = "OK" data['ansible_playbook'] = playbook._file_name - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "START PLAYBOOK | %s", data['ansible_playbook'], extra=data ) @@ -208,7 +205,7 @@ class CallbackModule(CallbackBase): data['ansible_playbook_duration'] = runtime.total_seconds() data['ansible_result'] = json.dumps(summarize_stat) # deprecated field - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data ) @@ -227,7 +224,7 @@ class CallbackModule(CallbackBase): data['ansible_play_id'] = self.play_id data['ansible_play_name'] = self.play_name - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("START PLAY | %s", self.play_name, extra=data) else: self.logger.info("ansible play", extra=data) @@ -252,7 +249,7 @@ class CallbackModule(CallbackBase): data['ansible_task'] = task_name data['ansible_facts'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "SETUP FACTS | %s", self._dump_results(result._result), extra=data ) @@ -273,7 +270,7 @@ class CallbackModule(CallbackBase): data['ansible_task_id'] = self.task_id data['ansible_result'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "TASK OK | %s | RESULT | %s", task_name, self._dump_results(result._result), extra=data @@ -294,7 +291,7 @@ class CallbackModule(CallbackBase): data['ansible_task_id'] = self.task_id data['ansible_result'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("TASK SKIPPED | %s", task_name, extra=data) else: self.logger.info("ansible skipped", extra=data) @@ -308,7 +305,7 @@ class CallbackModule(CallbackBase): data['ansible_play_name'] = self.play_name data['imported_file'] = imported_file - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("IMPORT | %s", imported_file, extra=data) else: self.logger.info("ansible import", extra=data) @@ -322,7 +319,7 @@ class CallbackModule(CallbackBase): data['ansible_play_name'] = self.play_name data['imported_file'] = missing_file - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("NOT IMPORTED | %s", missing_file, extra=data) else: self.logger.info("ansible import", extra=data) @@ -346,7 +343,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "TASK FAILED | %s | HOST | %s | RESULT | %s", task_name, self.hostname, @@ -369,7 +366,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "UNREACHABLE | %s | HOST | %s | RESULT | %s", task_name, self.hostname, @@ -392,7 +389,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "ASYNC FAILED | %s | HOST | %s | RESULT | %s", task_name, self.hostname, diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py index 7571993ea4..d05982cd61 100644 --- a/plugins/callback/mail.py +++ b/plugins/callback/mail.py @@ -4,15 +4,14 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: mail type: notification short_description: Sends failure events through email description: - - This callback will report failures through email. + - This callback reports failures through email. author: - Dag Wieers (@dagwieers) requirements: @@ -213,7 +212,8 @@ class CallbackModule(CallbackBase): if self.itembody: body += self.itembody elif result._result.get('failed_when_result') is True: - fail_cond = self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + fail_cond_list = '\n- '.join(result._task.failed_when) + fail_cond = self.indent(f"failed_when:\n- {fail_cond_list}") body += f"due to the following condition:\n\n{fail_cond}\n\n" elif result._result.get('msg'): body += self.body_blob(result._result['msg'], 'message') diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py index fa5d7cfd05..375876973a 100644 --- a/plugins/callback/nrdp.py +++ b/plugins/callback/nrdp.py @@ -4,8 +4,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: nrdp diff --git a/plugins/callback/null.py b/plugins/callback/null.py index 0cc722f63b..0527c1c467 100644 --- a/plugins/callback/null.py +++ b/plugins/callback/null.py @@ -4,8 +4,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) @@ -13,7 +12,7 @@ name: 'null' type: stdout requirements: - set as main display callback -short_description: do not display stuff to screen +short_description: Do not display stuff to screen description: - This callback prevents outputting events to screen. """ diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index 38388e8270..80f24924b9 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Victor Martinez (@v1v) @@ -36,8 +35,8 @@ options: - Whether to enable this callback only if the given environment variable exists and it is set to V(true). - This is handy when you use Configuration as Code and want to send distributed traces if running in the CI rather when running Ansible locally. - - For such, it evaluates the given O(enable_from_environment) value as environment variable and if set to true this - plugin will be enabled. + - For such, it evaluates the given O(enable_from_environment) value as environment variable and if set to V(true) this + plugin is enabled. env: - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT ini: @@ -144,6 +143,7 @@ from collections import OrderedDict from os.path import basename from ansible.errors import AnsibleError +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.six import raise_from from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.plugins.callback import CallbackBase @@ -213,7 +213,6 @@ class HostData: class OpenTelemetrySource(object): def __init__(self, display): self.ansible_playbook = "" - self.ansible_version = None self.session = str(uuid.uuid4()) self.host = socket.gethostname() try: @@ -261,9 +260,6 @@ class OpenTelemetrySource(object): task = tasks_data[task_uuid] - if self.ansible_version is None and hasattr(result, '_task_fields') and result._task_fields['args'].get('_ansible_version'): - self.ansible_version = result._task_fields['args'].get('_ansible_version') - task.dump = dump task.add_host(HostData(host_uuid, host_name, status, result)) @@ -311,8 +307,7 @@ class OpenTelemetrySource(object): start_time=parent_start_time, kind=SpanKind.SERVER) as parent: parent.set_status(status) # Populate trace metadata attributes - if self.ansible_version is not None: - parent.set_attribute("ansible.version", self.ansible_version) + parent.set_attribute("ansible.version", ansible_version) parent.set_attribute("ansible.session", self.session) parent.set_attribute("ansible.host.name", self.host) if self.ip_address is not None: diff --git a/plugins/callback/print_task.py b/plugins/callback/print_task.py new file mode 100644 index 0000000000..809baddb95 --- /dev/null +++ b/plugins/callback/print_task.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Max Mitschke +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r""" +name: print_task +type: aggregate +short_description: Prints playbook task snippet to job output +description: + - This plugin prints the currently executing playbook task to the job output. +version_added: 10.7.0 +requirements: + - enable in configuration +""" + +EXAMPLES = r""" +ansible.cfg: |- + # Enable plugin + [defaults] + callbacks_enabled=community.general.print_task +""" + +from yaml import load, dump + +try: + from yaml import CSafeDumper as SafeDumper + from yaml import CSafeLoader as SafeLoader +except ImportError: + from yaml import SafeDumper, SafeLoader + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + """ + This callback module tells you how long your plays ran for. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'community.general.print_task' + + CALLBACK_NEEDS_ENABLED = True + + def __init__(self): + super(CallbackModule, self).__init__() + self._printed_message = False + + def _print_task(self, task): + if hasattr(task, '_ds'): + task_snippet = load(str([task._ds.copy()]), Loader=SafeLoader) + task_yaml = dump(task_snippet, sort_keys=False, Dumper=SafeDumper) + self._display.display(f"\n{task_yaml}\n") + self._printed_message = True + + def v2_playbook_on_task_start(self, task, is_conditional): + self._printed_message = False + + def v2_runner_on_start(self, host, task): + if not self._printed_message: + self._print_task(task) diff --git a/plugins/callback/say.py b/plugins/callback/say.py index 94f49cc822..8a4e93f353 100644 --- a/plugins/callback/say.py +++ b/plugins/callback/say.py @@ -5,8 +5,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) @@ -15,9 +14,9 @@ type: notification requirements: - whitelisting in configuration - the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program -short_description: notify using software speech synthesizer +short_description: Notify using software speech synthesizer description: - - This plugin will use the C(say) or C(espeak) program to "speak" about play events. + - This plugin uses C(say) or C(espeak) to "speak" about play events. """ import platform diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 27ac63658c..53d40671bc 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) @@ -13,7 +12,7 @@ name: selective type: stdout requirements: - set as main display callback -short_description: only print certain tasks +short_description: Only print certain tasks description: - This callback only prints tasks that have been tagged with C(print_action) or that have failed. This allows operators to focus on the tasks that provide value only. diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index fda430b778..8bb081a541 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -5,8 +5,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) @@ -19,6 +18,11 @@ short_description: Sends play events to a Slack channel description: - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. options: + http_agent: + description: + - HTTP user agent to use for requests to Slack. + type: string + version_added: "10.5.0" webhook_url: required: true description: Slack Webhook URL. @@ -107,7 +111,7 @@ class CallbackModule(CallbackBase): self.username = self.get_option('username') self.show_invocation = (self._display.verbosity > 1) self.validate_certs = self.get_option('validate_certs') - + self.http_agent = self.get_option('http_agent') if self.webhook_url is None: self.disabled = True self._display.warning('Slack Webhook URL was not provided. The ' @@ -133,8 +137,13 @@ class CallbackModule(CallbackBase): self._display.debug(data) self._display.debug(self.webhook_url) try: - response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs, - headers=headers) + response = open_url( + self.webhook_url, + data=data, + validate_certs=self.validate_certs, + headers=headers, + http_agent=self.http_agent, + ) return response.read() except Exception as e: self._display.warning(f'Could not submit message to Slack: {e}') diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py index 05cca87a69..c385050d67 100644 --- a/plugins/callback/splunk.py +++ b/plugins/callback/splunk.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: splunk @@ -12,7 +11,7 @@ type: notification short_description: Sends task result events to Splunk HTTP Event Collector author: "Stuart Hirst (!UNKNOWN) " description: - - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector. + - This callback plugin sends task results as JSON formatted events to a Splunk HTTP collector. - The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/). - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based. requirements: @@ -92,6 +91,7 @@ import getpass from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase @@ -105,7 +105,6 @@ class SplunkHTTPCollectorSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.ip_address = socket.gethostbyname(socket.gethostname()) @@ -115,10 +114,6 @@ class SplunkHTTPCollectorSource(object): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -144,7 +139,7 @@ class SplunkHTTPCollectorSource(object): data['ip_address'] = self.ip_address data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py index 108f324b29..7a762c30e8 100644 --- a/plugins/callback/sumologic.py +++ b/plugins/callback/sumologic.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: sumologic @@ -12,7 +11,7 @@ type: notification short_description: Sends task result events to Sumologic author: "Ryan Currah (@ryancurrah)" description: - - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source. + - This callback plugin sends task results as JSON formatted events to a Sumologic HTTP collector source. requirements: - Whitelisting this callback plugin - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of V(yyyy-MM-dd HH:mm:ss ZZZZ) and @@ -49,6 +48,7 @@ import getpass from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase @@ -62,7 +62,6 @@ class SumologicHTTPCollectorSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.ip_address = socket.gethostbyname(socket.gethostname()) @@ -72,10 +71,6 @@ class SumologicHTTPCollectorSource(object): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -93,7 +88,7 @@ class SumologicHTTPCollectorSource(object): data['ip_address'] = self.ip_address data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py index d1797455ac..cab3973be1 100644 --- a/plugins/callback/syslog_json.py +++ b/plugins/callback/syslog_json.py @@ -4,8 +4,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) @@ -13,12 +12,12 @@ name: syslog_json type: notification requirements: - whitelist in configuration -short_description: sends JSON events to syslog +short_description: Sends JSON events to syslog description: - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format. options: server: - description: Syslog server that will receive the event. + description: Syslog server that receives the event. type: str env: - name: SYSLOG_SERVER diff --git a/plugins/callback/tasks_only.py b/plugins/callback/tasks_only.py new file mode 100644 index 0000000000..f64c4c57db --- /dev/null +++ b/plugins/callback/tasks_only.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2025, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: Felix Fontein (@felixfontein) +name: tasks_only +type: stdout +version_added: 11.1.0 +short_description: Only show tasks +description: + - Removes play start and stats marker from P(ansible.builtin.default#callback)'s output. + - Can be used to generate output for documentation examples. + For this, the O(number_of_columns) option should be set to an explicit value. +extends_documentation_fragment: + - default_callback +options: + number_of_columns: + description: + - Sets the number of columns for Ansible's display. + type: int + env: + - name: ANSIBLE_COLLECTIONS_TASKS_ONLY_NUMBER_OF_COLUMNS +""" + +EXAMPLES = r""" +--- +# Enable callback in ansible.cfg: +ansible_config: |- + [defaults] + stdout_callback = community.general.tasks_only + +--- +# Enable callback with environment variables: +environment_variable: |- + ANSIBLE_STDOUT_CALLBACK=community.general.tasks_only +""" + +from ansible.plugins.callback.default import CallbackModule as Default + + +class CallbackModule(Default): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.tasks_only' + + def v2_playbook_on_play_start(self, play): + pass + + def v2_playbook_on_stats(self, stats): + pass + + def set_options(self, *args, **kwargs): + result = super(CallbackModule, self).set_options(*args, **kwargs) + self.number_of_columns = self.get_option("number_of_columns") + if self.number_of_columns is not None: + self._display.columns = self.number_of_columns + return result diff --git a/plugins/callback/timestamp.py b/plugins/callback/timestamp.py index 89249c6562..a43ddcbef9 100644 --- a/plugins/callback/timestamp.py +++ b/plugins/callback/timestamp.py @@ -5,9 +5,8 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" name: timestamp diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index 8f80bf8f12..8fd8c10c94 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -5,14 +5,13 @@ # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" name: unixy type: stdout author: Al Bowles (@akatch) -short_description: condensed Ansible output +short_description: Condensed Ansible output description: - Consolidated Ansible output in the style of LINUX/UNIX startup logs. extends_documentation_fragment: diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py index 0484f80a34..f02840c9c6 100644 --- a/plugins/callback/yaml.py +++ b/plugins/callback/yaml.py @@ -4,8 +4,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Unknown (!UNKNOWN) @@ -13,8 +12,9 @@ name: yaml type: stdout short_description: YAML-ized Ansible screen output deprecated: - removed_in: 13.0.0 - why: Starting in ansible-core 2.13, the P(ansible.builtin.default#callback) callback has support for printing output in YAML format. + removed_in: 12.0.0 + why: Starting in ansible-core 2.13, the P(ansible.builtin.default#callback) callback has support for printing output in + YAML format. alternative: Use O(ansible.builtin.default#callback:result_format=yaml). description: - Ansible output that can be quite a bit easier to read than the default JSON formatting. @@ -37,9 +37,9 @@ import yaml import json import re import string +from collections.abc import Mapping, Sequence from ansible.module_utils.common.text.converters import to_text -from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.plugins.callback import strip_internal_keys, module_response_deepcopy from ansible.plugins.callback.default import CallbackModule as Default @@ -53,29 +53,80 @@ def should_use_block(value): return False -class MyDumper(AnsibleDumper): - def represent_scalar(self, tag, value, style=None): - """Uses block style for multi-line strings""" - if style is None: - if should_use_block(value): - style = '|' - # we care more about readable than accuracy, so... - # ...no trailing space - value = value.rstrip() - # ...and non-printable characters - value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0) - # ...tabs prevent blocks from expanding - value = value.expandtabs() - # ...and odd bits of whitespace - value = re.sub(r'[\x0b\x0c\r]', '', value) - # ...as does trailing space - value = re.sub(r' +\n', '\n', value) - else: - style = self.default_style - node = yaml.representer.ScalarNode(tag, value, style=style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - return node +def adjust_str_value_for_block(value): + # we care more about readable than accuracy, so... + # ...no trailing space + value = value.rstrip() + # ...and non-printable characters + value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0) + # ...tabs prevent blocks from expanding + value = value.expandtabs() + # ...and odd bits of whitespace + value = re.sub(r'[\x0b\x0c\r]', '', value) + # ...as does trailing space + value = re.sub(r' +\n', '\n', value) + return value + + +def create_string_node(tag, value, style, default_style): + if style is None: + if should_use_block(value): + style = '|' + value = adjust_str_value_for_block(value) + else: + style = default_style + return yaml.representer.ScalarNode(tag, value, style=style) + + +try: + from ansible.module_utils.common.yaml import HAS_LIBYAML + # import below was added in https://github.com/ansible/ansible/pull/85039, + # first contained in ansible-core 2.19.0b2: + from ansible.utils.vars import transform_to_native_types + + if HAS_LIBYAML: + from yaml.cyaml import CSafeDumper as SafeDumper + else: + from yaml import SafeDumper + + class MyDumper(SafeDumper): + def represent_scalar(self, tag, value, style=None): + """Uses block style for multi-line strings""" + node = create_string_node(tag, value, style, self.default_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + +except ImportError: + # In case transform_to_native_types cannot be imported, we either have ansible-core 2.19.0b1 + # (or some random commit from the devel or stable-2.19 branch after merging the DT changes + # and before transform_to_native_types was added), or we have a version without the DT changes. + + # Here we simply assume we have a version without the DT changes, and thus can continue as + # with ansible-core 2.18 and before. + + transform_to_native_types = None + + from ansible.parsing.yaml.dumper import AnsibleDumper + + class MyDumper(AnsibleDumper): # pylint: disable=inherit-non-class + def represent_scalar(self, tag, value, style=None): + """Uses block style for multi-line strings""" + node = create_string_node(tag, value, style, self.default_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + +def transform_recursively(value, transform): + # Since 2.19.0b7, this should no longer be needed: + # https://github.com/ansible/ansible/issues/85325 + # https://github.com/ansible/ansible/pull/85389 + if isinstance(value, Mapping): + return {transform(k): transform(v) for k, v in value.items()} + if isinstance(value, Sequence) and not isinstance(value, (str, bytes)): + return [transform(e) for e in value] + return transform(value) class CallbackModule(Default): @@ -132,6 +183,8 @@ class CallbackModule(Default): if abridged_result: dumped += '\n' + if transform_to_native_types is not None: + abridged_result = transform_recursively(abridged_result, lambda v: transform_to_native_types(v, redact=False)) dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False)) # indent by a couple of spaces diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index 7c4000ec5c..842c3f05d3 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -7,8 +7,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Maykel Moya (!UNKNOWN) diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 31a9431ce1..ad01326aff 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -6,8 +6,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Michael Scherer (@mscherer) diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py index 9d5a3e7a57..4f73d05532 100644 --- a/plugins/connection/incus.py +++ b/plugins/connection/incus.py @@ -5,8 +5,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Stéphane Graber (@stgraber) @@ -33,6 +32,15 @@ options: vars: - name: ansible_executable - name: ansible_incus_executable + incus_become_method: + description: + - Become command used to switch to a non-root user. + - Is only used when O(remote_user) is not V(root). + type: str + default: /bin/su + vars: + - name: incus_become_method + version_added: 10.4.0 remote: description: - The name of the Incus remote to use (per C(incus remote list)). @@ -41,6 +49,22 @@ options: default: local vars: - name: ansible_incus_remote + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + default: root + vars: + - name: ansible_user + env: + - name: ANSIBLE_REMOTE_USER + ini: + - section: defaults + key: remote_user + keyword: + - name: remote_user + version_added: 10.4.0 project: description: - The name of the Incus project to use (per C(incus project list)). @@ -65,7 +89,6 @@ class Connection(ConnectionBase): transport = "incus" has_pipelining = True - default_user = 'root' def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) @@ -80,10 +103,34 @@ class Connection(ConnectionBase): super(Connection, self)._connect() if not self._connected: - self._display.vvv("ESTABLISH Incus CONNECTION FOR USER: root", + self._display.vvv(f"ESTABLISH Incus CONNECTION FOR USER: {self.get_option('remote_user')}", host=self._instance()) self._connected = True + def _build_command(self, cmd) -> str: + """build the command to execute on the incus host""" + + exec_cmd = [ + self._incus_cmd, + "--project", self.get_option("project"), + "exec", + f"{self.get_option('remote')}:{self._instance()}", + "--"] + + if self.get_option("remote_user") != "root": + self._display.vvv( + f"INFO: Running as non-root user: {self.get_option('remote_user')}, \ + trying to run 'incus exec' with become method: {self.get_option('incus_become_method')}", + host=self._instance(), + ) + exec_cmd.extend( + [self.get_option("incus_become_method"), self.get_option("remote_user"), "-c"] + ) + + exec_cmd.extend([self.get_option("executable"), "-c", cmd]) + + return exec_cmd + def _instance(self): # Return only the leading part of the FQDN as the instance name # as Incus instance names cannot be a FQDN. @@ -96,13 +143,8 @@ class Connection(ConnectionBase): self._display.vvv(f"EXEC {cmd}", host=self._instance()) - local_cmd = [ - self._incus_cmd, - "--project", self.get_option("project"), - "exec", - f"{self.get_option('remote')}:{self._instance()}", - "--", - self._play_context.executable, "-c", cmd] + local_cmd = self._build_command(cmd) + self._display.vvvvv(f"EXEC {local_cmd}", host=self._instance()) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') @@ -113,14 +155,57 @@ class Connection(ConnectionBase): stdout = to_text(stdout) stderr = to_text(stderr) - if stderr == "Error: Instance is not running.\n": - raise AnsibleConnectionFailure(f"instance not running: {self._instance()}") + if stderr.startswith("Error: ") and stderr.rstrip().endswith( + ": Instance is not running" + ): + raise AnsibleConnectionFailure( + f"instance not running: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) - if stderr == "Error: Instance not found\n": - raise AnsibleConnectionFailure(f"instance not found: {self._instance()}") + if stderr.startswith("Error: ") and stderr.rstrip().endswith( + ": Instance not found" + ): + raise AnsibleConnectionFailure( + f"instance not found: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + if ( + stderr.startswith("Error: ") + and ": User does not have permission " in stderr + ): + raise AnsibleConnectionFailure( + f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + if ( + stderr.startswith("Error: ") + and ": User does not have entitlement " in stderr + ): + raise AnsibleConnectionFailure( + f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) return process.returncode, stdout, stderr + def _get_remote_uid_gid(self) -> tuple[int, int]: + """Get the user and group ID of 'remote_user' from the instance.""" + + rc, uid_out, err = self.exec_command("/bin/id -u") + if rc != 0: + raise AnsibleError( + f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}" + ) + uid = uid_out.strip() + + rc, gid_out, err = self.exec_command("/bin/id -g") + if rc != 0: + raise AnsibleError( + f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}" + ) + gid = gid_out.strip() + + return int(uid), int(gid) + def put_file(self, in_path, out_path): """ put a file from local to Incus """ super(Connection, self).put_file(in_path, out_path) @@ -131,12 +216,35 @@ class Connection(ConnectionBase): if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): raise AnsibleFileNotFound(f"input path is not a file: {in_path}") - local_cmd = [ - self._incus_cmd, - "--project", self.get_option("project"), - "file", "push", "--quiet", - in_path, - f"{self.get_option('remote')}:{self._instance()}/{out_path}"] + if self.get_option("remote_user") != "root": + uid, gid = self._get_remote_uid_gid() + local_cmd = [ + self._incus_cmd, + "--project", + self.get_option("project"), + "file", + "push", + "--uid", + str(uid), + "--gid", + str(gid), + "--quiet", + in_path, + f"{self.get_option('remote')}:{self._instance()}/{out_path}", + ] + else: + local_cmd = [ + self._incus_cmd, + "--project", + self.get_option("project"), + "file", + "push", + "--quiet", + in_path, + f"{self.get_option('remote')}:{self._instance()}/{out_path}", + ] + + self._display.vvvvv(f"PUT {local_cmd}", host=self._instance()) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index 4d3f415194..35d5ab0658 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -7,8 +7,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Stephan Lohse (!UNKNOWN) diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index 6e6c156330..6f06c96774 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -7,8 +7,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Ansible Core Team diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index 0744136192..a9e46cf56f 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Joerg Thalheim (!UNKNOWN) diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index 1a071e1d8d..2cc774a1d4 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Matt Clay (@mattclay) @@ -33,6 +32,15 @@ options: vars: - name: ansible_executable - name: ansible_lxd_executable + lxd_become_method: + description: + - Become command used to switch to a non-root user. + - Is only used when O(remote_user) is not V(root). + type: str + default: /bin/su + vars: + - name: lxd_become_method + version_added: 10.4.0 remote: description: - Name of the LXD remote to use. @@ -41,6 +49,22 @@ options: vars: - name: ansible_lxd_remote version_added: 2.0.0 + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + default: root + vars: + - name: ansible_user + env: + - name: ANSIBLE_REMOTE_USER + ini: + - section: defaults + key: remote_user + keyword: + - name: remote_user + version_added: 10.4.0 project: description: - Name of the LXD project to use. @@ -64,7 +88,6 @@ class Connection(ConnectionBase): transport = 'community.general.lxd' has_pipelining = True - default_user = 'root' def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) @@ -74,9 +97,6 @@ class Connection(ConnectionBase): except ValueError: raise AnsibleError("lxc command not found in PATH") - if self._play_context.remote_user is not None and self._play_context.remote_user != 'root': - self._display.warning('lxd does not support remote_user, using default: root') - def _host(self): """ translate remote_addr to lxd (short) hostname """ return self.get_option("remote_addr").split(".", 1)[0] @@ -86,25 +106,40 @@ class Connection(ConnectionBase): super(Connection, self)._connect() if not self._connected: - self._display.vvv("ESTABLISH LXD CONNECTION FOR USER: root", host=self._host()) + self._display.vvv(f"ESTABLISH LXD CONNECTION FOR USER: {self.get_option('remote_user')}", host=self._host()) self._connected = True + def _build_command(self, cmd) -> str: + """build the command to execute on the lxd host""" + + exec_cmd = [self._lxc_cmd] + + if self.get_option("project"): + exec_cmd.extend(["--project", self.get_option("project")]) + + exec_cmd.extend(["exec", f"{self.get_option('remote')}:{self._host()}", "--"]) + + if self.get_option("remote_user") != "root": + self._display.vvv( + f"INFO: Running as non-root user: {self.get_option('remote_user')}, \ + trying to run 'lxc exec' with become method: {self.get_option('lxd_become_method')}", + host=self._host(), + ) + exec_cmd.extend( + [self.get_option("lxd_become_method"), self.get_option("remote_user"), "-c"] + ) + + exec_cmd.extend([self.get_option("executable"), "-c", cmd]) + + return exec_cmd + def exec_command(self, cmd, in_data=None, sudoable=True): """ execute a command on the lxd host """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) self._display.vvv(f"EXEC {cmd}", host=self._host()) - local_cmd = [self._lxc_cmd] - if self.get_option("project"): - local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "exec", - f"{self.get_option('remote')}:{self._host()}", - "--", - self.get_option("executable"), "-c", cmd - ]) - + local_cmd = self._build_command(cmd) self._display.vvvvv(f"EXEC {local_cmd}", host=self._host()) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] @@ -126,6 +161,25 @@ class Connection(ConnectionBase): return process.returncode, stdout, stderr + def _get_remote_uid_gid(self) -> tuple[int, int]: + """Get the user and group ID of 'remote_user' from the instance.""" + + rc, uid_out, err = self.exec_command("/bin/id -u") + if rc != 0: + raise AnsibleError( + f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}" + ) + uid = uid_out.strip() + + rc, gid_out, err = self.exec_command("/bin/id -g") + if rc != 0: + raise AnsibleError( + f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}" + ) + gid = gid_out.strip() + + return int(uid), int(gid) + def put_file(self, in_path, out_path): """ put a file from local to lxd """ super(Connection, self).put_file(in_path, out_path) @@ -138,11 +192,32 @@ class Connection(ConnectionBase): local_cmd = [self._lxc_cmd] if self.get_option("project"): local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "file", "push", - in_path, - f"{self.get_option('remote')}:{self._host()}/{out_path}" - ]) + + if self.get_option("remote_user") != "root": + uid, gid = self._get_remote_uid_gid() + local_cmd.extend( + [ + "file", + "push", + "--uid", + str(uid), + "--gid", + str(gid), + in_path, + f"{self.get_option('remote')}:{self._host()}/{out_path}", + ] + ) + else: + local_cmd.extend( + [ + "file", + "push", + in_path, + f"{self.get_option('remote')}:{self._host()}/{out_path}", + ] + ) + + self._display.vvvvv(f"PUT {local_cmd}", host=self._host()) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index dee476308c..5a9963df2d 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -8,8 +8,7 @@ # # Written by: Kushal Das (https://github.com/kushaldas) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index d9e5d3b1d9..f826741926 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -7,8 +7,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Michael Scherer (@mscherer) diff --git a/plugins/connection/wsl.py b/plugins/connection/wsl.py new file mode 100644 index 0000000000..92ffec52b3 --- /dev/null +++ b/plugins/connection/wsl.py @@ -0,0 +1,795 @@ +# -*- coding: utf-8 -*- +# Derived from ansible/plugins/connection/proxmox_pct_remote.py (c) 2024 Nils Stein (@mietzen) +# Derived from ansible/plugins/connection/paramiko_ssh.py (c) 2012, Michael DeHaan +# Copyright (c) 2025 Rui Lopes (@rgl) +# Copyright (c) 2025 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: Rui Lopes (@rgl) +name: wsl +short_description: Run tasks in WSL distribution using wsl.exe CLI using SSH +requirements: + - paramiko +description: + - Run commands or put/fetch files to an existing WSL distribution using wsl.exe CLI using SSH. + - Uses the Python SSH implementation (Paramiko) to connect to the WSL host. +version_added: "10.6.0" +options: + remote_addr: + description: + - Address of the remote target. + default: inventory_hostname + type: string + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_ssh_host + - name: ansible_paramiko_host + port: + description: Remote port to connect to. + type: int + default: 22 + ini: + - section: defaults + key: remote_port + - section: paramiko_connection + key: remote_port + env: + - name: ANSIBLE_REMOTE_PORT + - name: ANSIBLE_REMOTE_PARAMIKO_PORT + vars: + - name: ansible_port + - name: ansible_ssh_port + - name: ansible_paramiko_port + keyword: + - name: port + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + vars: + - name: ansible_user + - name: ansible_ssh_user + - name: ansible_paramiko_user + env: + - name: ANSIBLE_REMOTE_USER + - name: ANSIBLE_PARAMIKO_REMOTE_USER + ini: + - section: defaults + key: remote_user + - section: paramiko_connection + key: remote_user + keyword: + - name: remote_user + password: + description: + - Secret used to either login the SSH server or as a passphrase for SSH keys that require it. + - Can be set from the CLI with the C(--ask-pass) option. + type: string + vars: + - name: ansible_password + - name: ansible_ssh_pass + - name: ansible_ssh_password + - name: ansible_paramiko_pass + - name: ansible_paramiko_password + use_rsa_sha2_algorithms: + description: + - Whether or not to enable RSA SHA2 algorithms for pubkeys and hostkeys. + - On paramiko versions older than 2.9, this only affects hostkeys. + - For behavior matching paramiko<2.9 set this to V(false). + vars: + - name: ansible_paramiko_use_rsa_sha2_algorithms + ini: + - {key: use_rsa_sha2_algorithms, section: paramiko_connection} + env: + - {name: ANSIBLE_PARAMIKO_USE_RSA_SHA2_ALGORITHMS} + default: true + type: boolean + host_key_auto_add: + description: "Automatically add host keys to C(~/.ssh/known_hosts)." + env: + - name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD + ini: + - key: host_key_auto_add + section: paramiko_connection + type: boolean + look_for_keys: + default: true + description: "Set to V(false) to disable searching for private key files in C(~/.ssh/)." + env: + - name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS + ini: + - {key: look_for_keys, section: paramiko_connection} + type: boolean + proxy_command: + default: "" + description: + - Proxy information for running the connection through a jumphost. + - This option is supported by paramiko version 1.9.0 or newer. + type: string + env: + - name: ANSIBLE_PARAMIKO_PROXY_COMMAND + ini: + - {key: proxy_command, section: paramiko_connection} + vars: + - name: ansible_paramiko_proxy_command + record_host_keys: + default: true + description: "Save the host keys to a file." + env: + - name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS + ini: + - section: paramiko_connection + key: record_host_keys + type: boolean + host_key_checking: + description: "Set this to V(false) if you want to avoid host key checking by the underlying tools Ansible uses to connect + to the host." + type: boolean + default: true + env: + - name: ANSIBLE_HOST_KEY_CHECKING + - name: ANSIBLE_SSH_HOST_KEY_CHECKING + - name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING + ini: + - section: defaults + key: host_key_checking + - section: paramiko_connection + key: host_key_checking + vars: + - name: ansible_host_key_checking + - name: ansible_ssh_host_key_checking + - name: ansible_paramiko_host_key_checking + use_persistent_connections: + description: "Toggles the use of persistence for connections." + type: boolean + default: false + env: + - name: ANSIBLE_USE_PERSISTENT_CONNECTIONS + ini: + - section: defaults + key: use_persistent_connections + banner_timeout: + type: float + default: 30 + description: + - Configures, in seconds, the amount of time to wait for the SSH banner to be presented. + - This option is supported by paramiko version 1.15.0 or newer. + ini: + - section: paramiko_connection + key: banner_timeout + env: + - name: ANSIBLE_PARAMIKO_BANNER_TIMEOUT + timeout: + type: int + default: 10 + description: + - Number of seconds until the plugin gives up on failing to establish a TCP connection. + - This option is supported by paramiko version 2.2.0 or newer. + ini: + - section: defaults + key: timeout + - section: ssh_connection + key: timeout + - section: paramiko_connection + key: timeout + env: + - name: ANSIBLE_TIMEOUT + - name: ANSIBLE_SSH_TIMEOUT + - name: ANSIBLE_PARAMIKO_TIMEOUT + vars: + - name: ansible_ssh_timeout + - name: ansible_paramiko_timeout + cli: + - name: timeout + lock_file_timeout: + type: int + default: 60 + description: Number of seconds until the plugin gives up on trying to write a lock file when writing SSH known host keys. + vars: + - name: ansible_lock_file_timeout + env: + - name: ANSIBLE_LOCK_FILE_TIMEOUT + private_key_file: + description: + - Path to private key file to use for authentication. + type: path + ini: + - section: defaults + key: private_key_file + - section: paramiko_connection + key: private_key_file + env: + - name: ANSIBLE_PRIVATE_KEY_FILE + - name: ANSIBLE_PARAMIKO_PRIVATE_KEY_FILE + vars: + - name: ansible_private_key_file + - name: ansible_ssh_private_key_file + - name: ansible_paramiko_private_key_file + cli: + - name: private_key_file + option: "--private-key" + user_known_hosts_file: + description: + - Path to the user known hosts file. + - Used to verify the ssh hosts keys. + type: path + default: ~/.ssh/known_hosts + ini: + - section: paramiko_connection + key: user_known_hosts_file + vars: + - name: ansible_paramiko_user_known_hosts_file + wsl_distribution: + description: + - WSL distribution name. + type: string + required: true + vars: + - name: wsl_distribution + wsl_user: + description: + - WSL distribution user. + type: string + vars: + - name: wsl_user + become_user: + description: + - WSL distribution user. + type: string + default: root + vars: + - name: become_user + - name: ansible_become_user + become: + description: + - Whether to use the user defined by O(become_user). + type: bool + default: false + vars: + - name: become + - name: ansible_become +""" + +EXAMPLES = r""" +# ------------------------ +# Inventory: inventory.yml +# ------------------------ +--- +all: + children: + wsl: + hosts: + example-wsl-ubuntu: + ansible_host: 10.0.0.10 + wsl_distribution: ubuntu + wsl_user: ubuntu + vars: + ansible_connection: community.general.wsl + ansible_user: vagrant +# ---------------------- +# Playbook: playbook.yml +# ---------------------- +--- +- name: WSL Example + hosts: wsl + gather_facts: true + become: true + tasks: + - name: Ping + ansible.builtin.ping: + - name: Id (with become false) + become: false + changed_when: false + args: + executable: /bin/bash + ansible.builtin.shell: | + exec 2>&1 + set -x + echo "$0" + pwd + id + - name: Id (with become true) + changed_when: false + args: + executable: /bin/bash + ansible.builtin.shell: | + exec 2>&1 + set -x + echo "$0" + pwd + id + - name: Reboot + ansible.builtin.reboot: + boot_time_command: systemctl show -p ActiveEnterTimestamp init.scope +""" + +import io +import os +import pathlib +import shlex +import socket +import tempfile +import traceback +import typing as t + +from ansible.errors import ( + AnsibleAuthenticationFailure, + AnsibleConnectionFailure, + AnsibleError, +) +from ansible_collections.community.general.plugins.module_utils._filelock import FileLock, LockTimeout +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text +from ansible.playbook.play_context import PlayContext +from ansible.plugins.connection import ConnectionBase +from ansible.utils.display import Display +from ansible.utils.path import makedirs_safe +from binascii import hexlify +from subprocess import list2cmdline + +try: + import paramiko + PARAMIKO_IMPORT_ERR = None +except ImportError: + paramiko = None + PARAMIKO_IMPORT_ERR = traceback.format_exc() + + +if t.TYPE_CHECKING and PARAMIKO_IMPORT_ERR is None: + from paramiko import MissingHostKeyPolicy + from paramiko.client import SSHClient + from paramiko.pkey import PKey +else: + MissingHostKeyPolicy: type = object + SSHClient: type = object + PKey: type = object + + +display = Display() + + +def authenticity_msg(hostname: str, ktype: str, fingerprint: str) -> str: + msg = f""" + paramiko: The authenticity of host '{hostname}' can't be established. + The {ktype} key fingerprint is {fingerprint}. + Are you sure you want to continue connecting (yes/no)? + """ + return msg + + +class MyAddPolicy(MissingHostKeyPolicy): + """ + Based on AutoAddPolicy in paramiko so we can determine when keys are added + + and also prompt for input. + + Policy for automatically adding the hostname and new host key to the + local L{HostKeys} object, and saving it. This is used by L{SSHClient}. + """ + + def __init__(self, connection: Connection) -> None: + self.connection = connection + self._options = connection._options + + def missing_host_key(self, client: SSHClient, hostname: str, key: PKey) -> None: + + if all((self.connection.get_option('host_key_checking'), not self.connection.get_option('host_key_auto_add'))): + + fingerprint = hexlify(key.get_fingerprint()) + ktype = key.get_name() + + if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence: + # don't print the prompt string since the user cannot respond + # to the question anyway + raise AnsibleError(authenticity_msg(hostname, ktype, fingerprint)[1:92]) + + inp = to_text( + display.prompt_until(authenticity_msg(hostname, ktype, fingerprint), private=False), + errors='surrogate_or_strict' + ) + + if inp.lower() not in ['yes', 'y', '']: + raise AnsibleError('host connection rejected by user') + + key._added_by_ansible_this_time = True + + # existing implementation below: + client._host_keys.add(hostname, key.get_name(), key) + + # host keys are actually saved in close() function below + # in order to control ordering. + + +class Connection(ConnectionBase): + """ SSH based connections (paramiko) to WSL """ + + transport = 'community.general.wsl' + _log_channel: str | None = None + + def __init__(self, play_context: PlayContext, new_stdin: io.TextIOWrapper | None = None, *args: t.Any, **kwargs: t.Any): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + def _set_log_channel(self, name: str) -> None: + """ Mimic paramiko.SSHClient.set_log_channel """ + self._log_channel = name + + def _parse_proxy_command(self, port: int = 22) -> dict[str, t.Any]: + proxy_command = self.get_option('proxy_command') or None + + sock_kwarg = {} + if proxy_command: + replacers: t.Dict[str, str] = { + '%h': self.get_option('remote_addr'), + '%p': str(port), + '%r': self.get_option('remote_user') + } + for find, replace in replacers.items(): + proxy_command = proxy_command.replace(find, replace) + try: + sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)} + display.vvv(f'CONFIGURE PROXY COMMAND FOR CONNECTION: {proxy_command}', host=self.get_option('remote_addr')) + except AttributeError: + display.warning('Paramiko ProxyCommand support unavailable. ' + 'Please upgrade to Paramiko 1.9.0 or newer. ' + 'Not using configured ProxyCommand') + + return sock_kwarg + + def _connect(self) -> Connection: + """ activates the connection object """ + + if PARAMIKO_IMPORT_ERR is not None: + raise AnsibleError(f'paramiko is not installed: {to_native(PARAMIKO_IMPORT_ERR)}') + + port = self.get_option('port') + display.vvv(f'ESTABLISH PARAMIKO SSH CONNECTION FOR USER: {self.get_option("remote_user")} on PORT {to_text(port)} TO {self.get_option("remote_addr")}', + host=self.get_option('remote_addr')) + + ssh = paramiko.SSHClient() + + # Set pubkey and hostkey algorithms to disable, the only manipulation allowed currently + # is keeping or omitting rsa-sha2 algorithms + # default_keys: t.Tuple[str] = () + paramiko_preferred_pubkeys = getattr(paramiko.Transport, '_preferred_pubkeys', ()) + paramiko_preferred_hostkeys = getattr(paramiko.Transport, '_preferred_keys', ()) + use_rsa_sha2_algorithms = self.get_option('use_rsa_sha2_algorithms') + disabled_algorithms: t.Dict[str, t.Iterable[str]] = {} + if not use_rsa_sha2_algorithms: + if paramiko_preferred_pubkeys: + disabled_algorithms['pubkeys'] = tuple(a for a in paramiko_preferred_pubkeys if 'rsa-sha2' in a) + if paramiko_preferred_hostkeys: + disabled_algorithms['keys'] = tuple(a for a in paramiko_preferred_hostkeys if 'rsa-sha2' in a) + + # override paramiko's default logger name + if self._log_channel is not None: + ssh.set_log_channel(self._log_channel) + + self.keyfile = os.path.expanduser(self.get_option('user_known_hosts_file')) + + if self.get_option('host_key_checking'): + for ssh_known_hosts in ('/etc/ssh/ssh_known_hosts', '/etc/openssh/ssh_known_hosts', self.keyfile): + try: + ssh.load_system_host_keys(ssh_known_hosts) + break + except IOError: + pass # file was not found, but not required to function + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}') + try: + ssh.load_system_host_keys() + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}') + + ssh_connect_kwargs = self._parse_proxy_command(port) + ssh.set_missing_host_key_policy(MyAddPolicy(self)) + conn_password = self.get_option('password') + allow_agent = True + + if conn_password is not None: + allow_agent = False + + try: + key_filename = None + if self.get_option('private_key_file'): + key_filename = os.path.expanduser(self.get_option('private_key_file')) + + # paramiko 2.2 introduced auth_timeout parameter + if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'): + ssh_connect_kwargs['auth_timeout'] = self.get_option('timeout') + + # paramiko 1.15 introduced banner timeout parameter + if LooseVersion(paramiko.__version__) >= LooseVersion('1.15.0'): + ssh_connect_kwargs['banner_timeout'] = self.get_option('banner_timeout') + + ssh.connect( + self.get_option('remote_addr').lower(), + username=self.get_option('remote_user'), + allow_agent=allow_agent, + look_for_keys=self.get_option('look_for_keys'), + key_filename=key_filename, + password=conn_password, + timeout=self.get_option('timeout'), + port=port, + disabled_algorithms=disabled_algorithms, + **ssh_connect_kwargs, + ) + except paramiko.ssh_exception.BadHostKeyException as e: + raise AnsibleConnectionFailure(f'host key mismatch for {to_text(e.hostname)}') + except paramiko.ssh_exception.AuthenticationException as e: + msg = f'Failed to authenticate: {e}' + raise AnsibleAuthenticationFailure(msg) + except Exception as e: + msg = to_text(e) + if u'PID check failed' in msg: + raise AnsibleError('paramiko version issue, please upgrade paramiko on the machine running ansible') + elif u'Private key file is encrypted' in msg: + msg = ( + f'ssh {self.get_option("remote_user")}@{self.get_options("remote_addr")}:{port} : ' + f'{msg}\nTo connect as a different user, use -u .' + ) + raise AnsibleConnectionFailure(msg) + else: + raise AnsibleConnectionFailure(msg) + self.ssh = ssh + self._connected = True + return self + + def _any_keys_added(self) -> bool: + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if added_this_time: + return True + return False + + def _save_ssh_host_keys(self, filename: str) -> None: + """ + not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks + don't complain about it :) + """ + + if not self._any_keys_added(): + return + + path = os.path.expanduser('~/.ssh') + makedirs_safe(path) + + with open(filename, 'w') as f: + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + # was f.write + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if not added_this_time: + f.write(f'{hostname} {keytype} {key.get_base64()}\n') + + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if added_this_time: + f.write(f'{hostname} {keytype} {key.get_base64()}\n') + + def _build_wsl_command(self, cmd: str) -> str: + wsl_distribution = self.get_option('wsl_distribution') + become = self.get_option('become') + become_user = self.get_option('become_user') + if become and become_user: + wsl_user = become_user + else: + wsl_user = self.get_option('wsl_user') + args = ['wsl.exe', '--distribution', wsl_distribution] + if wsl_user: + args.extend(['--user', wsl_user]) + args.extend(['--']) + args.extend(shlex.split(cmd)) + if os.getenv('_ANSIBLE_TEST_WSL_CONNECTION_PLUGIN_Waeri5tepheeSha2fae8'): + return shlex.join(args) + return list2cmdline(args) # see https://github.com/python/cpython/blob/3.11/Lib/subprocess.py#L576 + + def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]: + """ run a command on inside a WSL distribution """ + + cmd = self._build_wsl_command(cmd) + + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + bufsize = 4096 + + try: + self.ssh.get_transport().set_keepalive(5) + chan = self.ssh.get_transport().open_session() + except Exception as e: + text_e = to_text(e) + msg = 'Failed to open session' + if text_e: + msg += f': {text_e}' + raise AnsibleConnectionFailure(to_native(msg)) + + display.vvv(f'EXEC {cmd}', host=self.get_option('remote_addr')) + + cmd = to_bytes(cmd, errors='surrogate_or_strict') + + no_prompt_out = b'' + no_prompt_err = b'' + become_output = b'' + + try: + chan.exec_command(cmd) + if self.become and self.become.expect_prompt(): + password_prompt = False + become_success = False + while not (become_success or password_prompt): + display.debug('Waiting for Privilege Escalation input') + + chunk = chan.recv(bufsize) + display.debug(f'chunk is: {to_text(chunk)}') + if not chunk: + if b'unknown user' in become_output: + n_become_user = to_native(self.become.get_option('become_user')) + raise AnsibleError(f'user {n_become_user} does not exist') + else: + break + # raise AnsibleError('ssh connection closed waiting for password prompt') + become_output += chunk + + # need to check every line because we might get lectured + # and we might get the middle of a line in a chunk + for line in become_output.splitlines(True): + if self.become.check_success(line): + become_success = True + break + elif self.become.check_password_prompt(line): + password_prompt = True + break + + if password_prompt: + if self.become: + become_pass = self.become.get_option('become_pass') + chan.sendall(to_bytes(become_pass + '\n', errors='surrogate_or_strict')) + else: + raise AnsibleError('A password is required but none was supplied') + else: + no_prompt_out += become_output + no_prompt_err += become_output + + if in_data: + for i in range(0, len(in_data), bufsize): + chan.send(in_data[i:i + bufsize]) + chan.shutdown_write() + elif in_data == b'': + chan.shutdown_write() + + except socket.timeout: + raise AnsibleError(f'ssh timed out waiting for privilege escalation.\n{to_text(become_output)}') + + stdout = b''.join(chan.makefile('rb', bufsize)) + stderr = b''.join(chan.makefile_stderr('rb', bufsize)) + returncode = chan.recv_exit_status() + + # NB the full english error message is: + # 'wsl.exe' is not recognized as an internal or external command, + # operable program or batch file. + if "'wsl.exe' is not recognized" in stderr.decode('utf-8'): + raise AnsibleError( + f'wsl.exe not found in path of host: {to_text(self.get_option("remote_addr"))}') + + return (returncode, no_prompt_out + stdout, no_prompt_out + stderr) + + def put_file(self, in_path: str, out_path: str) -> None: + """ transfer a file from local to remote """ + + display.vvv(f'PUT {in_path} TO {out_path}', host=self.get_option('remote_addr')) + try: + with open(in_path, 'rb') as f: + data = f.read() + returncode, stdout, stderr = self.exec_command( + ' '.join([ + self._shell.executable, '-c', + self._shell.quote(f'cat > {out_path}')]), + in_data=data, + sudoable=False) + if returncode != 0: + if 'cat: not found' in stderr.decode('utf-8'): + raise AnsibleError( + f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}') + raise AnsibleError( + f'{to_text(stdout)}\n{to_text(stderr)}') + except Exception as e: + raise AnsibleError( + f'error occurred while putting file from {in_path} to {out_path}!\n{to_text(e)}') + + def fetch_file(self, in_path: str, out_path: str) -> None: + """ save a remote file to the specified path """ + + display.vvv(f'FETCH {in_path} TO {out_path}', host=self.get_option('remote_addr')) + try: + returncode, stdout, stderr = self.exec_command( + ' '.join([ + self._shell.executable, '-c', + self._shell.quote(f'cat {in_path}')]), + sudoable=False) + if returncode != 0: + if 'cat: not found' in stderr.decode('utf-8'): + raise AnsibleError( + f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}') + raise AnsibleError( + f'{to_text(stdout)}\n{to_text(stderr)}') + with open(out_path, 'wb') as f: + f.write(stdout) + except Exception as e: + raise AnsibleError( + f'error occurred while fetching file from {in_path} to {out_path}!\n{to_text(e)}') + + def reset(self) -> None: + """ reset the connection """ + + if not self._connected: + return + self.close() + self._connect() + + def close(self) -> None: + """ terminate the connection """ + + if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added(): + # add any new SSH host keys -- warning -- this could be slow + # (This doesn't acquire the connection lock because it needs + # to exclude only other known_hosts writers, not connections + # that are starting up.) + lockfile = os.path.basename(self.keyfile) + dirname = os.path.dirname(self.keyfile) + makedirs_safe(dirname) + tmp_keyfile_name = None + try: + with FileLock().lock_file(lockfile, dirname, self.get_option('lock_file_timeout')): + # just in case any were added recently + + self.ssh.load_system_host_keys() + self.ssh._host_keys.update(self.ssh._system_host_keys) + + # gather information about the current key file, so + # we can ensure the new file has the correct mode/owner + + key_dir = os.path.dirname(self.keyfile) + if os.path.exists(self.keyfile): + key_stat = os.stat(self.keyfile) + mode = key_stat.st_mode & 0o777 + uid = key_stat.st_uid + gid = key_stat.st_gid + else: + mode = 0o644 + uid = os.getuid() + gid = os.getgid() + + # Save the new keys to a temporary file and move it into place + # rather than rewriting the file. We set delete=False because + # the file will be moved into place rather than cleaned up. + + with tempfile.NamedTemporaryFile(dir=key_dir, delete=False) as tmp_keyfile: + tmp_keyfile_name = tmp_keyfile.name + os.chmod(tmp_keyfile_name, mode) + os.chown(tmp_keyfile_name, uid, gid) + self._save_ssh_host_keys(tmp_keyfile_name) + + os.rename(tmp_keyfile_name, self.keyfile) + except LockTimeout: + raise AnsibleError( + f'writing lock file for {self.keyfile} ran in to the timeout of {self.get_option("lock_file_timeout")}s') + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {e.line}') + except Exception as e: + # unable to save keys, including scenario when key was invalid + # and caught earlier + raise AnsibleError( + f'error occurred while writing SSH host keys!\n{to_text(e)}') + finally: + if tmp_keyfile_name is not None: + pathlib.Path(tmp_keyfile_name).unlink(missing_ok=True) + + self.ssh.close() + self._connected = False diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index aa5442f28e..baca9312b3 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -8,8 +8,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" author: Ansible Core Team diff --git a/plugins/doc_fragments/alicloud.py b/plugins/doc_fragments/alicloud.py index 3b810852b7..cf7255b465 100644 --- a/plugins/doc_fragments/alicloud.py +++ b/plugins/doc_fragments/alicloud.py @@ -16,31 +16,31 @@ options: alicloud_access_key: description: - Alibaba Cloud access key. If not set then the value of environment variable E(ALICLOUD_ACCESS_KEY), E(ALICLOUD_ACCESS_KEY_ID) - will be used instead. + is used instead. aliases: ['access_key_id', 'access_key'] type: str alicloud_secret_key: description: - Alibaba Cloud secret key. If not set then the value of environment variable E(ALICLOUD_SECRET_KEY), E(ALICLOUD_SECRET_ACCESS_KEY) - will be used instead. + is used instead. aliases: ['secret_access_key', 'secret_key'] type: str alicloud_region: description: - The Alibaba Cloud region to use. If not specified then the value of environment variable E(ALICLOUD_REGION), E(ALICLOUD_REGION_ID) - will be used instead. + is used instead. aliases: ['region', 'region_id'] required: true type: str alicloud_security_token: description: - The Alibaba Cloud security token. If not specified then the value of environment variable E(ALICLOUD_SECURITY_TOKEN) - will be used instead. + is used instead. aliases: ['security_token'] type: str alicloud_assume_role: description: - - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials. + - If provided with a role ARN, Ansible attempts to assume this role using the supplied credentials. - The nested assume_role block supports C(alicloud_assume_role_arn), C(alicloud_assume_role_session_name), C(alicloud_assume_role_session_expiration) and C(alicloud_assume_role_policy). type: dict @@ -48,7 +48,7 @@ options: alicloud_assume_role_arn: description: - The Alibaba Cloud C(role_arn). The ARN of the role to assume. If ARN is set to an empty string, it does not perform - role switching. It supports environment variable E(ALICLOUD_ASSUME_ROLE_ARN). ansible will execute with provided credentials. + role switching. It supports environment variable E(ALICLOUD_ASSUME_ROLE_ARN). Ansible executes with provided credentials. aliases: ['assume_role_arn'] type: str alicloud_assume_role_session_name: @@ -68,7 +68,7 @@ options: description: - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section of the Alibaba Cloud console. - - If you are running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the metadata + - If you are running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible just accesses the metadata U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS credential. This is a preferred approach over any other when running in ECS as you can avoid hard coding credentials. Instead these are leased on-the-fly by Ansible which reduces the chance of leakage. @@ -83,7 +83,7 @@ options: description: - This is the path to the shared credentials file. It can also be sourced from the E(ALICLOUD_SHARED_CREDENTIALS_FILE) environment variable. - - If this is not set and a profile is specified, C(~/.aliyun/config.json) will be used. + - If this is not set and a profile is specified, C(~/.aliyun/config.json) is used. type: str author: - "He Guimin (@xiaozhu36)" diff --git a/plugins/doc_fragments/attributes.py b/plugins/doc_fragments/attributes.py index 2ab083eab2..18b02575c4 100644 --- a/plugins/doc_fragments/attributes.py +++ b/plugins/doc_fragments/attributes.py @@ -17,7 +17,7 @@ attributes: check_mode: description: Can run in C(check_mode) and return changed status prediction without modifying target. diff_mode: - description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode. + description: Returns details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode. """ PLATFORM = r""" @@ -32,14 +32,14 @@ attributes: INFO_MODULE = r''' options: {} attributes: - check_mode: - support: full - details: - - This action does not modify state. - diff_mode: - support: N/A - details: - - This action does not modify state. + check_mode: + support: full + details: + - This action does not modify state. + diff_mode: + support: N/A + details: + - This action does not modify state. ''' CONN = r""" @@ -57,23 +57,23 @@ attributes: options: {} attributes: facts: - description: Action returns an C(ansible_facts) dictionary that will update existing host facts. + description: Action returns an C(ansible_facts) dictionary that updates existing host facts. """ # Should be used together with the standard fragment and the FACTS fragment FACTS_MODULE = r''' options: {} attributes: - check_mode: - support: full - details: - - This action does not modify state. - diff_mode: - support: N/A - details: - - This action does not modify state. - facts: - support: full + check_mode: + support: full + details: + - This action does not modify state. + diff_mode: + support: N/A + details: + - This action does not modify state. + facts: + support: full ''' FILES = r""" diff --git a/plugins/doc_fragments/bitbucket.py b/plugins/doc_fragments/bitbucket.py index e8b9ea4df8..65c4c47b51 100644 --- a/plugins/doc_fragments/bitbucket.py +++ b/plugins/doc_fragments/bitbucket.py @@ -16,17 +16,17 @@ options: client_id: description: - The OAuth consumer key. - - If not set the environment variable E(BITBUCKET_CLIENT_ID) will be used. + - If not set the environment variable E(BITBUCKET_CLIENT_ID) is used. type: str client_secret: description: - The OAuth consumer secret. - - If not set the environment variable E(BITBUCKET_CLIENT_SECRET) will be used. + - If not set the environment variable E(BITBUCKET_CLIENT_SECRET) is used. type: str user: description: - The username. - - If not set the environment variable E(BITBUCKET_USERNAME) will be used. + - If not set the environment variable E(BITBUCKET_USERNAME) is used. - O(ignore:username) is an alias of O(user) since community.general 6.0.0. It was an alias of O(workspace) before. type: str version_added: 4.0.0 @@ -34,7 +34,7 @@ options: password: description: - The App password. - - If not set the environment variable E(BITBUCKET_PASSWORD) will be used. + - If not set the environment variable E(BITBUCKET_PASSWORD) is used. type: str version_added: 4.0.0 notes: diff --git a/plugins/doc_fragments/dimensiondata.py b/plugins/doc_fragments/dimensiondata.py index ece97addf0..890c4d741e 100644 --- a/plugins/doc_fragments/dimensiondata.py +++ b/plugins/doc_fragments/dimensiondata.py @@ -28,12 +28,12 @@ options: mcp_user: description: - The username used to authenticate to the CloudControl API. - - If not specified, will fall back to E(MCP_USER) from environment variable or C(~/.dimensiondata). + - If not specified, falls back to E(MCP_USER) from environment variable or C(~/.dimensiondata). type: str mcp_password: description: - The password used to authenticate to the CloudControl API. - - If not specified, will fall back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata). + - If not specified, falls back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata). - Required if O(mcp_user) is specified. type: str location: @@ -43,7 +43,7 @@ options: required: true validate_certs: description: - - If V(false), SSL certificates will not be validated. + - If V(false), SSL certificates are not validated. - This should only be used on private instances of the CloudControl API that use self-signed certificates. type: bool default: true diff --git a/plugins/doc_fragments/django.py b/plugins/doc_fragments/django.py index 3dcdb40171..5d01c8323e 100644 --- a/plugins/doc_fragments/django.py +++ b/plugins/doc_fragments/django.py @@ -18,13 +18,13 @@ options: settings: description: - Specifies the settings module to use. - - The value will be passed as is to the C(--settings) argument in C(django-admin). + - The value is passed as is to the C(--settings) argument in C(django-admin). type: str required: true pythonpath: description: - Adds the given filesystem path to the Python import search path. - - The value will be passed as is to the C(--pythonpath) argument in C(django-admin). + - The value is passed as is to the C(--pythonpath) argument in C(django-admin). type: path traceback: description: diff --git a/plugins/doc_fragments/emc.py b/plugins/doc_fragments/emc.py index 7c62285a72..14dc7bc129 100644 --- a/plugins/doc_fragments/emc.py +++ b/plugins/doc_fragments/emc.py @@ -13,21 +13,21 @@ class ModuleDocFragment(object): # Documentation fragment for VNX (emc_vnx) EMC_VNX = r''' options: - sp_address: - description: - - Address of the SP of target/secondary storage. - type: str - required: true - sp_user: - description: - - Username for accessing SP. - type: str - default: sysadmin - sp_password: - description: - - password for accessing SP. - type: str - default: sysadmin + sp_address: + description: + - Address of the SP of target/secondary storage. + type: str + required: true + sp_user: + description: + - Username for accessing SP. + type: str + default: sysadmin + sp_password: + description: + - password for accessing SP. + type: str + default: sysadmin requirements: - An EMC VNX Storage device. - storops (0.5.10 or greater). Install using C(pip install storops). diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py index 3d478beb59..ea54c80c09 100644 --- a/plugins/doc_fragments/hwc.py +++ b/plugins/doc_fragments/hwc.py @@ -55,5 +55,5 @@ notes: - For authentication, you can set domain using the E(ANSIBLE_HWC_DOMAIN) environment variable. - For authentication, you can set project using the E(ANSIBLE_HWC_PROJECT) environment variable. - For authentication, you can set region using the E(ANSIBLE_HWC_REGION) environment variable. - - Environment variables values will only be used if the playbook values are not set. + - Environment variables values are only used when the playbook values are not set. """ diff --git a/plugins/doc_fragments/influxdb.py b/plugins/doc_fragments/influxdb.py index 9cf47d340a..5dbebea846 100644 --- a/plugins/doc_fragments/influxdb.py +++ b/plugins/doc_fragments/influxdb.py @@ -20,13 +20,13 @@ options: default: localhost username: description: - - Username that will be used to authenticate against InfluxDB server. + - Username that is used to authenticate against InfluxDB server. type: str default: root aliases: [login_username] password: description: - - Password that will be used to authenticate against InfluxDB server. + - Password that is used to authenticate against InfluxDB server. type: str default: root aliases: [login_password] @@ -44,7 +44,7 @@ options: version_added: '0.2.0' validate_certs: description: - - If set to V(false), the SSL certificates will not be validated. + - If set to V(false), the SSL certificates are not validated. - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool default: true @@ -55,11 +55,11 @@ options: default: false timeout: description: - - Number of seconds Requests will wait for client to establish a connection. + - Number of seconds Requests waits for client to establish a connection. type: int retries: description: - - Number of retries client will try before aborting. + - Number of retries client performs before aborting. - V(0) indicates try until success. - Only available when using C(python-influxdb) >= 4.1.0. type: int diff --git a/plugins/doc_fragments/ipa.py b/plugins/doc_fragments/ipa.py index 0edb947aa5..63ea94b465 100644 --- a/plugins/doc_fragments/ipa.py +++ b/plugins/doc_fragments/ipa.py @@ -16,43 +16,43 @@ options: ipa_port: description: - Port of FreeIPA / IPA server. - - If the value is not specified in the task, the value of environment variable E(IPA_PORT) will be used instead. + - If the value is not specified in the task, the value of environment variable E(IPA_PORT) is used instead. - If both the environment variable E(IPA_PORT) and the value are not specified in the task, then default value is set. type: int default: 443 ipa_host: description: - IP or hostname of IPA server. - - If the value is not specified in the task, the value of environment variable E(IPA_HOST) will be used instead. - - If both the environment variable E(IPA_HOST) and the value are not specified in the task, then DNS will be used to - try to discover the FreeIPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_HOST) is used instead. + - If both the environment variable E(IPA_HOST) and the value are not specified in the task, then DNS is used to try + to discover the FreeIPA server. - The relevant entry needed in FreeIPA is the C(ipa-ca) entry. - If neither the DNS entry, nor the environment E(IPA_HOST), nor the value are available in the task, then the default - value will be used. + value is used. type: str default: ipa.example.com ipa_user: description: - Administrative account used on IPA server. - - If the value is not specified in the task, the value of environment variable E(IPA_USER) will be used instead. + - If the value is not specified in the task, the value of environment variable E(IPA_USER) is used instead. - If both the environment variable E(IPA_USER) and the value are not specified in the task, then default value is set. type: str default: admin ipa_pass: description: - Password of administrative user. - - If the value is not specified in the task, the value of environment variable E(IPA_PASS) will be used instead. + - If the value is not specified in the task, the value of environment variable E(IPA_PASS) is used instead. - Note that if the C(urllib_gssapi) library is available, it is possible to use GSSAPI to authenticate to FreeIPA. - - If the environment variable E(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate + - If the environment variable E(KRB5CCNAME) is available, the module uses this Kerberos credentials cache to authenticate to the FreeIPA server. - - If the environment variable E(KRB5_CLIENT_KTNAME) is available, and E(KRB5CCNAME) is not; the module will use this - kerberos keytab to authenticate. + - If the environment variable E(KRB5_CLIENT_KTNAME) is available, and E(KRB5CCNAME) is not; the module uses this Kerberos + keytab to authenticate. - If GSSAPI is not available, the usage of O(ipa_pass) is required. type: str ipa_prot: description: - Protocol used by IPA server. - - If the value is not specified in the task, the value of environment variable E(IPA_PROT) will be used instead. + - If the value is not specified in the task, the value of environment variable E(IPA_PROT) is used instead. - If both the environment variable E(IPA_PROT) and the value are not specified in the task, then default value is set. type: str choices: [http, https] @@ -60,7 +60,7 @@ options: validate_certs: description: - This only applies if O(ipa_prot) is V(https). - - If set to V(false), the SSL certificates will not be validated. + - If set to V(false), the SSL certificates are not validated. - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool default: true @@ -68,7 +68,7 @@ options: description: - Specifies idle timeout (in seconds) for the connection. - For bulk operations, you may want to increase this in order to avoid timeout from IPA server. - - If the value is not specified in the task, the value of environment variable E(IPA_TIMEOUT) will be used instead. + - If the value is not specified in the task, the value of environment variable E(IPA_TIMEOUT) is used instead. - If both the environment variable E(IPA_TIMEOUT) and the value are not specified in the task, then default value is set. type: int diff --git a/plugins/doc_fragments/keycloak.py b/plugins/doc_fragments/keycloak.py index 102a60ab33..75c458d5fc 100644 --- a/plugins/doc_fragments/keycloak.py +++ b/plugins/doc_fragments/keycloak.py @@ -57,6 +57,12 @@ options: type: str version_added: 3.0.0 + refresh_token: + description: + - Authentication refresh token for Keycloak API. + type: str + version_added: 10.3.0 + validate_certs: description: - Verify TLS certificates (do not disable this in production). diff --git a/plugins/doc_fragments/ldap.py b/plugins/doc_fragments/ldap.py index bc182bb36e..abdb32adb7 100644 --- a/plugins/doc_fragments/ldap.py +++ b/plugins/doc_fragments/ldap.py @@ -14,9 +14,10 @@ class ModuleDocFragment(object): # Standard LDAP documentation fragment DOCUMENTATION = r""" notes: - - The default authentication settings will attempt to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to modify the server configuration. If you need - to use a simple bind to access your server, pass the credentials in O(bind_dn) and O(bind_pw). + - The default authentication settings attempts to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with + the default Ubuntu install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to + modify the server configuration. If you need to use a simple bind to access your server, pass the credentials in O(bind_dn) + and O(bind_pw). options: bind_dn: description: @@ -75,7 +76,7 @@ options: default: false validate_certs: description: - - If set to V(false), SSL certificates will not be validated. + - If set to V(false), SSL certificates are not validated. - This should only be used on sites using self-signed certificates. type: bool default: true @@ -89,9 +90,9 @@ options: xorder_discovery: description: - Set the behavior on how to process Xordered DNs. - - V(enable) will perform a C(ONELEVEL) search below the superior RDN to find the matching DN. - - V(disable) will always use the DN unmodified (as passed by the O(dn) parameter). - - V(auto) will only perform a search if the first RDN does not contain an index number (C({x})). + - V(enable) performs a C(ONELEVEL) search below the superior RDN to find the matching DN. + - V(disable) always uses the DN unmodified (as passed by the O(dn) parameter). + - V(auto) only performs a search if the first RDN does not contain an index number (C({x})). type: str choices: ['enable', 'auto', 'disable'] default: auto diff --git a/plugins/doc_fragments/onepassword.py b/plugins/doc_fragments/onepassword.py index a67c9e4dc1..6fb0e252c6 100644 --- a/plugins/doc_fragments/onepassword.py +++ b/plugins/doc_fragments/onepassword.py @@ -18,8 +18,8 @@ options: aliases: ['vault_password'] type: str section: - description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from - any section. + description: Item section containing the field to retrieve (case-insensitive). If absent, returns first match from any + section. domain: description: Domain of 1Password. default: '1password.com' @@ -42,7 +42,7 @@ options: - Only works with 1Password CLI version 2 or later. type: str vault: - description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults. + description: Vault containing the item to retrieve (case-insensitive). If absent, searches all vaults. type: str connect_host: description: The host for 1Password Connect. Must be used in combination with O(connect_token). @@ -65,10 +65,9 @@ options: - name: OP_SERVICE_ACCOUNT_TOKEN version_added: 8.2.0 notes: - - This lookup will use an existing 1Password session if one exists. If not, and you have already performed an initial sign - in (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the O(master_password) - is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain will be used - by C(op). + - This lookup uses an existing 1Password session if one exists. If not, and you have already performed an initial sign in + (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the O(master_password) + is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain is used by C(op). - This lookup can perform an initial login by providing O(subdomain), O(username), O(secret_key), and O(master_password). - Can target a specific account by providing the O(account_id). - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal diff --git a/plugins/doc_fragments/oneview.py b/plugins/doc_fragments/oneview.py index 3caabe4512..366e3e3e42 100644 --- a/plugins/doc_fragments/oneview.py +++ b/plugins/doc_fragments/oneview.py @@ -17,8 +17,8 @@ options: description: - Path to a JSON configuration file containing the OneView client configuration. The configuration file is optional and when used should be present in the host running the ansible commands. If the file path is not provided, the configuration - will be loaded from environment variables. For links to example configuration files or how to use the environment - variables verify the notes section. + is loaded from environment variables. For links to example configuration files or how to use the environment variables + verify the notes section. type: path api_version: description: @@ -49,16 +49,16 @@ notes: U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json).' - 'Check how to use environment variables for configuration at: U(https://github.com/HewlettPackard/oneview-ansible#environment-variables).' - 'Additional Playbooks for the HPE OneView Ansible modules can be found at: U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples).' - - 'The OneView API version used will directly affect returned and expected fields in resources. Information on setting the - desired API version and can be found at: U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version).' + - 'The OneView API version used directly affects returned and expected fields in resources. Information on setting the desired + API version and can be found at: U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version).' """ VALIDATEETAG = r""" options: validate_etag: description: - - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag for the - resource matches the ETag provided in the data. + - When the ETag Validation is enabled, the request is conditionally processed only if the current ETag for the resource + matches the ETag provided in the data. type: bool default: true """ diff --git a/plugins/doc_fragments/openswitch.py b/plugins/doc_fragments/openswitch.py index f0e9e87c3d..30b477fbe7 100644 --- a/plugins/doc_fragments/openswitch.py +++ b/plugins/doc_fragments/openswitch.py @@ -21,8 +21,8 @@ options: port: description: - Specifies the port to use when building the connection to the remote device. This value applies to either O(transport=cli) - or O(transport=rest). The port value will default to the appropriate transport common port if none is provided in - the task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport. + or O(transport=rest). The port value defaults to the appropriate transport common port if none is provided in the + task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport. type: int default: 0 (use common port) username: @@ -30,25 +30,24 @@ options: - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate either the CLI login or the eAPI authentication depending on which transport is used. Note this argument does not affect the SSH transport. If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_USERNAME) - will be used instead. + is used instead. type: str password: description: - Specifies the password to use to authenticate the connection to the remote device. This is a common argument used for either O(transport=cli) or O(transport=rest). Note this argument does not affect the SSH transport. If the value - is not specified in the task, the value of environment variable E(ANSIBLE_NET_PASSWORD) will be used instead. + is not specified in the task, the value of environment variable E(ANSIBLE_NET_PASSWORD) is used instead. type: str timeout: description: - Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands. - If the timeout is exceeded before the operation is completed, the module will error. + If the timeout is exceeded before the operation is completed, the module fails. type: int default: 10 ssh_keyfile: description: - Specifies the SSH key to use to authenticate the connection to the remote device. This argument is only used for O(transport=cli). - If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_SSH_KEYFILE) will be used - instead. + If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_SSH_KEYFILE) is used instead. type: path transport: description: diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py index f657af407b..08b2948bf3 100644 --- a/plugins/doc_fragments/oracle.py +++ b/plugins/doc_fragments/oracle.py @@ -40,9 +40,10 @@ options: type: str api_user_key_file: description: - - Full path and filename of the private key (in PEM format). If not set, then the value of the E(OCI_USER_KEY_FILE) variable, - if any, is used. This option is required if the private key is not specified through a configuration file (See O(config_file_location)). - If the key is encrypted with a pass-phrase, the O(api_user_key_pass_phrase) option must also be provided. + - Full path and filename of the private key (in PEM format). If not set, then the value of the E(OCI_USER_KEY_FILE) + variable, if any, is used. This option is required if the private key is not specified through a configuration file + (See O(config_file_location)). If the key is encrypted with a pass-phrase, the O(api_user_key_pass_phrase) option + must also be provided. type: path api_user_key_pass_phrase: description: @@ -53,9 +54,9 @@ options: auth_type: description: - The type of authentication to use for making API requests. By default O(auth_type=api_key) based authentication is - performed and the API key (see O(api_user_key_file)) in your config file will be used. If this 'auth_type' module - option is not specified, the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal) - to use instance principal based authentication when running ansible playbooks within an OCI compute instance. + performed and the API key (see O(api_user_key_file)) in your config file is used. If O(auth_type) is not specified, + the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal) to use instance principal + based authentication when running ansible playbooks within an OCI compute instance. choices: ['api_key', 'instance_principal'] default: 'api_key' type: str diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py index be0c931db4..5ccd6525c0 100644 --- a/plugins/doc_fragments/oracle_creatable_resource.py +++ b/plugins/doc_fragments/oracle_creatable_resource.py @@ -12,8 +12,8 @@ class ModuleDocFragment(object): options: force_create: description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an idempotent operation, - and does not create the resource if it already exists. Setting this option to V(true), forcefully creates a copy of the - resource, even if it already exists. This option is mutually exclusive with O(key_by). + and does not create the resource if it already exists. Setting this option to V(true), forcefully creates a copy of + the resource, even if it already exists. This option is mutually exclusive with O(key_by). default: false type: bool key_by: diff --git a/plugins/doc_fragments/pipx.py b/plugins/doc_fragments/pipx.py index b94495d4a1..dde13f6dd3 100644 --- a/plugins/doc_fragments/pipx.py +++ b/plugins/doc_fragments/pipx.py @@ -13,26 +13,22 @@ class ModuleDocFragment(object): options: global: description: - - The module will pass the C(--global) argument to C(pipx), to execute actions in global scope. - - The C(--global) is only available in C(pipx>=1.6.0), so make sure to have a compatible version when using this option. - Moreover, a nasty bug with C(--global) was fixed in C(pipx==1.7.0), so it is strongly recommended you used that version - or newer. + - The module passes the C(--global) argument to C(pipx), to execute actions in global scope. type: bool default: false executable: description: - Path to the C(pipx) installed in the system. - - If not specified, the module will use C(python -m pipx) to run the tool, using the same Python interpreter as ansible + - If not specified, the module uses C(python -m pipx) to run the tool, using the same Python interpreter as ansible itself. type: path +requirements: + - This module requires C(pipx) version 1.7.0 or above. Please note that C(pipx) 1.7.0 requires Python 3.8 or above. notes: - - This module requires C(pipx) version 0.16.2.1 or above. From community.general 11.0.0 onwards, the module will require - C(pipx>=1.7.0). - - Please note that C(pipx) requires Python 3.6 or above. - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. - - This module will honor C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed - using the R(environment Ansible keyword, playbooks_environment). + - This module honors C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed using + the R(environment Ansible keyword, playbooks_environment). seealso: - name: C(pipx) command manual page description: Manual page for the command. diff --git a/plugins/doc_fragments/proxmox.py b/plugins/doc_fragments/proxmox.py deleted file mode 100644 index 4641c36d3e..0000000000 --- a/plugins/doc_fragments/proxmox.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Ansible project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - # Common parameters for Proxmox VE modules - DOCUMENTATION = r""" -options: - api_host: - description: - - Specify the target host of the Proxmox VE cluster. - type: str - required: true - api_port: - description: - - Specify the target port of the Proxmox VE cluster. - - Uses the E(PROXMOX_PORT) environment variable if not specified. - type: int - required: false - version_added: 9.1.0 - api_user: - description: - - Specify the user to authenticate with. - type: str - required: true - api_password: - description: - - Specify the password to authenticate with. - - You can use E(PROXMOX_PASSWORD) environment variable. - type: str - api_token_id: - description: - - Specify the token ID. - - Requires C(proxmoxer>=1.1.0) to work. - type: str - version_added: 1.3.0 - api_token_secret: - description: - - Specify the token secret. - - Requires C(proxmoxer>=1.1.0) to work. - type: str - version_added: 1.3.0 - validate_certs: - description: - - If V(false), SSL certificates will not be validated. - - This should only be used on personally controlled sites using self-signed certificates. - type: bool - default: false -requirements: ["proxmoxer", "requests"] -""" - - SELECTION = r""" -options: - vmid: - description: - - Specifies the instance ID. - - If not set the next available ID will be fetched from ProxmoxAPI. - type: int - node: - description: - - Proxmox VE node on which to operate. - - Only required for O(state=present). - - For every other states it will be autodiscovered. - type: str - pool: - description: - - Add the new VM to the specified pool. - type: str -""" - - ACTIONGROUP_PROXMOX = r""" -options: {} -attributes: - action_group: - description: Use C(group/community.general.proxmox) in C(module_defaults) to set defaults for this module. - support: full - membership: - - community.general.proxmox -""" diff --git a/plugins/doc_fragments/redfish.py b/plugins/doc_fragments/redfish.py new file mode 100644 index 0000000000..a20e064988 --- /dev/null +++ b/plugins/doc_fragments/redfish.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2025 Ansible community +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Use together with the community.general.redfish module utils' REDFISH_COMMON_ARGUMENT_SPEC + DOCUMENTATION = r""" +options: + validate_certs: + description: + - If V(false), TLS/SSL certificates are not validated. + - Set this to V(true) to enable certificate checking. Should be used together with O(ca_path). + type: bool + default: false + ca_path: + description: + - PEM formatted file that contains a CA certificate to be used for validation. + - Only used if O(validate_certs=true). + type: path + ciphers: + required: false + description: + - TLS/SSL Ciphers to use for the request. + - When a list is provided, all ciphers are joined in order with V(:). + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) + for more details. + - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. + type: list + elements: str +""" diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py index 149c018d79..c7bb88b81d 100644 --- a/plugins/doc_fragments/redis.py +++ b/plugins/doc_fragments/redis.py @@ -45,7 +45,7 @@ options: default: true ca_certs: description: - - Path to root certificates file. If not set and O(tls) is set to V(true), certifi ca-certificates will be used. + - Path to root certificates file. If not set and O(tls) is set to V(true), certifi's CA certificates are used. type: str client_cert_file: description: diff --git a/plugins/doc_fragments/utm.py b/plugins/doc_fragments/utm.py index f6954a1917..32c18e93b8 100644 --- a/plugins/doc_fragments/utm.py +++ b/plugins/doc_fragments/utm.py @@ -31,7 +31,8 @@ options: utm_token: description: - The token used to identify at the REST-API. - - See U(https://www.sophos.com/en-us/medialibrary/PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2. + - See U(https://www.sophos.com/en-us/medialibrary/PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter + 2.4.2. type: str required: true utm_protocol: @@ -48,8 +49,8 @@ options: state: description: - The desired state of the object. - - V(present) will create or update an object. - - V(absent) will delete an object if it was present. + - V(present) creates or updates an object. + - V(absent) deletes an object if present. type: str choices: [absent, present] default: present diff --git a/plugins/doc_fragments/xenserver.py b/plugins/doc_fragments/xenserver.py index d1377e8964..f4e0946219 100644 --- a/plugins/doc_fragments/xenserver.py +++ b/plugins/doc_fragments/xenserver.py @@ -15,28 +15,27 @@ options: hostname: description: - The hostname or IP address of the XenServer host or XenServer pool master. - - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) will be used instead. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) is used instead. type: str default: localhost aliases: [host, pool] username: description: - The username to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) will be used instead. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) is used instead. type: str default: root aliases: [admin, user] password: description: - The password to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) will be used instead. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) is used instead. type: str aliases: [pass, pwd] validate_certs: description: - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted. - - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) will be used - instead. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) is used instead. type: bool default: true """ diff --git a/plugins/filter/accumulate.py b/plugins/filter/accumulate.py index 9400936e1d..c48afa0467 100644 --- a/plugins/filter/accumulate.py +++ b/plugins/filter/accumulate.py @@ -2,43 +2,44 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -DOCUMENTATION = ''' - name: accumulate - short_description: Produce a list of accumulated sums of the input list contents - version_added: 10.1.0 - author: Max Gautier (@VannTen) - description: - - Passthrough to the L(Python itertools.accumulate function,https://docs.python.org/3/library/itertools.html#itertools.accumulate). - - Transforms an input list into the cumulative list of results from applying addition to the elements of the input list. - - Addition means the default Python implementation of C(+) for input list elements type. - options: - _input: - description: A list. - type: list - elements: any - required: true -''' - -RETURN = ''' - _value: - description: A list of cumulated sums of the elements of the input list. +DOCUMENTATION = r""" +name: accumulate +short_description: Produce a list of accumulated sums of the input list contents +version_added: 10.1.0 +author: Max Gautier (@VannTen) +description: + - Passthrough to the L(Python itertools.accumulate function,https://docs.python.org/3/library/itertools.html#itertools.accumulate). + - Transforms an input list into the cumulative list of results from applying addition to the elements of the input list. + - Addition means the default Python implementation of C(+) for input list elements type. +options: + _input: + description: A list. type: list elements: any -''' + required: true +""" -EXAMPLES = ''' +RETURN = r""" +_value: + description: A list of cumulated sums of the elements of the input list. + type: list + elements: any +""" + +EXAMPLES = r""" - name: Enumerate parent directories of some path ansible.builtin.debug: var: > - "/some/path/to/my/file" - | split('/') | map('split', '/') - | community.general.accumulate | map('join', '/') + "/some/path/to/my/file" + | split('/') | map('split', '/') + | community.general.accumulate | map('join', '/') # Produces: ['', '/some', '/some/path', '/some/path/to', '/some/path/to/my', '/some/path/to/my/file'] + - name: Growing string ansible.builtin.debug: var: "'abc' | community.general.accumulate" # Produces ['a', 'ab', 'abc'] -''' +""" from itertools import accumulate from collections.abc import Sequence diff --git a/plugins/filter/counter.py b/plugins/filter/counter.py index 1b79294b59..bd4b5d4448 100644 --- a/plugins/filter/counter.py +++ b/plugins/filter/counter.py @@ -3,37 +3,37 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: counter - short_description: Counts hashable elements in a sequence - version_added: 4.3.0 - author: Rémy Keil (@keilr) - description: - - Counts hashable elements in a sequence. - options: - _input: - description: A sequence. - type: list - elements: any - required: true -''' +DOCUMENTATION = r""" +name: counter +short_description: Counts hashable elements in a sequence +version_added: 4.3.0 +author: Rémy Keil (@keilr) +description: + - Counts hashable elements in a sequence. +options: + _input: + description: A sequence. + type: list + elements: any + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Count occurrences ansible.builtin.debug: msg: >- {{ [1, 'a', 2, 2, 'a', 'b', 'a'] | community.general.counter }} # Produces: {1: 1, 'a': 3, 2: 2, 'b': 1} -''' +""" -RETURN = ''' - _value: - description: A dictionary with the elements of the sequence as keys, and their number of occurrences in the sequence as values. - type: dictionary -''' +RETURN = r""" +_value: + description: A dictionary with the elements of the sequence as keys, and their number of occurrences in the sequence as + values. + type: dictionary +""" from ansible.errors import AnsibleFilterError from ansible.module_utils.common._collections_compat import Sequence diff --git a/plugins/filter/crc32.py b/plugins/filter/crc32.py index 1f0aa2e9b0..e394d23732 100644 --- a/plugins/filter/crc32.py +++ b/plugins/filter/crc32.py @@ -2,8 +2,7 @@ # Copyright (c) 2022, Julien Riou # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.errors import AnsibleFilterError from ansible.module_utils.common.text.converters import to_bytes @@ -16,33 +15,33 @@ except ImportError: HAS_ZLIB = False -DOCUMENTATION = ''' - name: crc32 - short_description: Generate a CRC32 checksum - version_added: 5.4.0 - description: - - Checksum a string using CRC32 algorithm and return its hexadecimal representation. - options: - _input: - description: - - The string to checksum. - type: string - required: true - author: - - Julien Riou -''' - -EXAMPLES = ''' - - name: Checksum a test string - ansible.builtin.debug: - msg: "{{ 'test' | community.general.crc32 }}" -''' - -RETURN = ''' - _value: - description: CRC32 checksum. +DOCUMENTATION = r""" +name: crc32 +short_description: Generate a CRC32 checksum +version_added: 5.4.0 +description: + - Checksum a string using CRC32 algorithm and return its hexadecimal representation. +options: + _input: + description: + - The string to checksum. type: string -''' + required: true +author: + - Julien Riou +""" + +EXAMPLES = r""" +- name: Checksum a test string + ansible.builtin.debug: + msg: "{{ 'test' | community.general.crc32 }}" +""" + +RETURN = r""" +_value: + description: CRC32 checksum. + type: string +""" def crc32s(value): diff --git a/plugins/filter/dict.py b/plugins/filter/dict.py index 3e0558bb61..23c977dfd6 100644 --- a/plugins/filter/dict.py +++ b/plugins/filter/dict.py @@ -4,25 +4,24 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: dict - short_description: Convert a list of tuples into a dictionary - version_added: 3.0.0 - author: Felix Fontein (@felixfontein) - description: - - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function. - options: - _input: - description: A list of tuples (with exactly two elements). - type: list - elements: tuple - required: true -''' +DOCUMENTATION = r""" +name: dict +short_description: Convert a list of tuples into a dictionary +version_added: 3.0.0 +author: Felix Fontein (@felixfontein) +description: + - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function. +options: + _input: + description: A list of tuples (with exactly two elements). + type: list + elements: tuple + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Convert list of tuples into dictionary ansible.builtin.set_fact: dictionary: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}" @@ -53,13 +52,13 @@ EXAMPLES = ''' # "k2": 42, # "k3": "b" # } -''' +""" -RETURN = ''' - _value: - description: A dictionary with the provided key-value pairs. - type: dictionary -''' +RETURN = r""" +_value: + description: A dictionary with the provided key-value pairs. + type: dictionary +""" def dict_filter(sequence): diff --git a/plugins/filter/dict_kv.py b/plugins/filter/dict_kv.py index 59595f9573..1d73bde301 100644 --- a/plugins/filter/dict_kv.py +++ b/plugins/filter/dict_kv.py @@ -3,40 +3,39 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: dict_kv - short_description: Convert a value to a dictionary with a single key-value pair - version_added: 1.3.0 - author: Stanislav German-Evtushenko (@giner) - description: - - Convert a value to a dictionary with a single key-value pair. - positional: key - options: - _input: - description: The value for the single key-value pair. - type: any - required: true - key: - description: The key for the single key-value pair. - type: any - required: true -''' +DOCUMENTATION = r""" +name: dict_kv +short_description: Convert a value to a dictionary with a single key-value pair +version_added: 1.3.0 +author: Stanislav German-Evtushenko (@giner) +description: + - Convert a value to a dictionary with a single key-value pair. +positional: key +options: + _input: + description: The value for the single key-value pair. + type: any + required: true + key: + description: The key for the single key-value pair. + type: any + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a one-element dictionary from a value ansible.builtin.debug: msg: "{{ 'myvalue' | dict_kv('mykey') }}" # Produces the dictionary {'mykey': 'myvalue'} -''' +""" -RETURN = ''' - _value: - description: A dictionary with a single key-value pair. - type: dictionary -''' +RETURN = r""" +_value: + description: A dictionary with a single key-value pair. + type: dictionary +""" def dict_kv(value, key): diff --git a/plugins/filter/from_csv.py b/plugins/filter/from_csv.py index 617f314794..e9a5d73e53 100644 --- a/plugins/filter/from_csv.py +++ b/plugins/filter/from_csv.py @@ -5,54 +5,53 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: from_csv - short_description: Converts CSV text input into list of dicts - version_added: 2.3.0 - author: Andrew Pantuso (@Ajpantuso) - description: - - Converts CSV text input into list of dictionaries. - options: - _input: - description: A string containing a CSV document. - type: string - required: true - dialect: - description: - - The CSV dialect to use when parsing the CSV file. - - Possible values include V(excel), V(excel-tab) or V(unix). - type: str - default: excel - fieldnames: - description: - - A list of field names for every column. - - This is needed if the CSV does not have a header. - type: list - elements: str - delimiter: - description: - - A one-character string used to separate fields. - - When using this parameter, you change the default value used by O(dialect). - - The default value depends on the dialect used. - type: str - skipinitialspace: - description: - - Whether to ignore any whitespaces immediately following the delimiter. - - When using this parameter, you change the default value used by O(dialect). - - The default value depends on the dialect used. - type: bool - strict: - description: - - Whether to raise an exception on bad CSV input. - - When using this parameter, you change the default value used by O(dialect). - - The default value depends on the dialect used. - type: bool -''' +DOCUMENTATION = r""" +name: from_csv +short_description: Converts CSV text input into list of dicts +version_added: 2.3.0 +author: Andrew Pantuso (@Ajpantuso) +description: + - Converts CSV text input into list of dictionaries. +options: + _input: + description: A string containing a CSV document. + type: string + required: true + dialect: + description: + - The CSV dialect to use when parsing the CSV file. + - Possible values include V(excel), V(excel-tab) or V(unix). + type: str + default: excel + fieldnames: + description: + - A list of field names for every column. + - This is needed if the CSV does not have a header. + type: list + elements: str + delimiter: + description: + - A one-character string used to separate fields. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: str + skipinitialspace: + description: + - Whether to ignore any whitespaces immediately following the delimiter. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: bool + strict: + description: + - Whether to raise an exception on bad CSV input. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: bool +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Parse a CSV file's contents ansible.builtin.debug: msg: >- @@ -71,14 +70,14 @@ EXAMPLES = ''' # "Column 1": "bar", # "Value": "42", # } -''' +""" -RETURN = ''' - _value: - description: A list with one dictionary per row. - type: list - elements: dictionary -''' +RETURN = r""" +_value: + description: A list with one dictionary per row. + type: list + elements: dictionary +""" from ansible.errors import AnsibleFilterError diff --git a/plugins/filter/from_ini.py b/plugins/filter/from_ini.py index 5b4bd4a3af..d77338df99 100644 --- a/plugins/filter/from_ini.py +++ b/plugins/filter/from_ini.py @@ -4,47 +4,46 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -DOCUMENTATION = r''' - name: from_ini - short_description: Converts INI text input into a dictionary - version_added: 8.2.0 - author: Steffen Scheib (@sscheib) - description: - - Converts INI text input into a dictionary. - options: - _input: - description: A string containing an INI document. - type: string - required: true -''' +DOCUMENTATION = r""" +name: from_ini +short_description: Converts INI text input into a dictionary +version_added: 8.2.0 +author: Steffen Scheib (@sscheib) +description: + - Converts INI text input into a dictionary. +options: + _input: + description: A string containing an INI document. + type: string + required: true +""" -EXAMPLES = r''' - - name: Slurp an INI file - ansible.builtin.slurp: - src: /etc/rhsm/rhsm.conf - register: rhsm_conf +EXAMPLES = r""" +- name: Slurp an INI file + ansible.builtin.slurp: + src: /etc/rhsm/rhsm.conf + register: rhsm_conf - - name: Display the INI file as dictionary - ansible.builtin.debug: - var: rhsm_conf.content | b64decode | community.general.from_ini +- name: Display the INI file as dictionary + ansible.builtin.debug: + var: rhsm_conf.content | b64decode | community.general.from_ini - - name: Set a new dictionary fact with the contents of the INI file - ansible.builtin.set_fact: - rhsm_dict: >- - {{ - rhsm_conf.content | b64decode | community.general.from_ini - }} -''' +- name: Set a new dictionary fact with the contents of the INI file + ansible.builtin.set_fact: + rhsm_dict: >- + {{ + rhsm_conf.content | b64decode | community.general.from_ini + }} +""" -RETURN = ''' - _value: - description: A dictionary representing the INI file. - type: dictionary -''' +RETURN = r""" +_value: + description: A dictionary representing the INI file. + type: dictionary +""" -__metaclass__ = type from ansible.errors import AnsibleFilterError from ansible.module_utils.six import string_types diff --git a/plugins/filter/groupby_as_dict.py b/plugins/filter/groupby_as_dict.py index 8e29c5863c..81a24a1e9f 100644 --- a/plugins/filter/groupby_as_dict.py +++ b/plugins/filter/groupby_as_dict.py @@ -3,32 +3,31 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: groupby_as_dict - short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute - version_added: 3.1.0 - author: Felix Fontein (@felixfontein) - description: - - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute. - - This filter is similar to the Jinja2 C(groupby) filter. Use the Jinja2 C(groupby) filter if you have multiple entries with the same value, - or when you need a dictionary with list values, or when you need to use deeply nested attributes. - positional: attribute - options: - _input: - description: A list of dictionaries - type: list - elements: dictionary - required: true - attribute: - description: The attribute to use as the key. - type: str - required: true -''' +DOCUMENTATION = r""" +name: groupby_as_dict +short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute +version_added: 3.1.0 +author: Felix Fontein (@felixfontein) +description: + - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute. + - This filter is similar to the Jinja2 C(groupby) filter. Use the Jinja2 C(groupby) filter if you have multiple entries + with the same value, or when you need a dictionary with list values, or when you need to use deeply nested attributes. +positional: attribute +options: + _input: + description: A list of dictionaries. + type: list + elements: dictionary + required: true + attribute: + description: The attribute to use as the key. + type: str + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Arrange a list of dictionaries as a dictionary of dictionaries ansible.builtin.debug: msg: "{{ sequence | community.general.groupby_as_dict('key') }}" @@ -46,13 +45,13 @@ EXAMPLES = ''' # other_value: # key: other_value # baz: bar -''' +""" -RETURN = ''' - _value: - description: A dictionary containing the dictionaries from the list as values. - type: dictionary -''' +RETURN = r""" +_value: + description: A dictionary containing the dictionaries from the list as values. + type: dictionary +""" from ansible.errors import AnsibleFilterError from ansible.module_utils.common._collections_compat import Mapping, Sequence diff --git a/plugins/filter/hashids.py b/plugins/filter/hashids.py index ac771e6219..76e6aaa3a5 100644 --- a/plugins/filter/hashids.py +++ b/plugins/filter/hashids.py @@ -4,18 +4,21 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.errors import ( AnsibleError, AnsibleFilterError, - AnsibleFilterTypeError, ) from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.collections import is_sequence +try: + from ansible.errors import AnsibleTypeError +except ImportError: + from ansible.errors import AnsibleFilterTypeError as AnsibleTypeError + try: from hashids import Hashids HAS_HASHIDS = True @@ -64,7 +67,7 @@ def hashids_encode(nums, salt=None, alphabet=None, min_length=None): try: hashid = hashids.encode(*nums) except TypeError as e: - raise AnsibleFilterTypeError( + raise AnsibleTypeError( "Data to encode must by a tuple or list of ints: %s" % to_native(e) ) diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py index 2fe3ef9d73..6a2feb93f0 100644 --- a/plugins/filter/jc.py +++ b/plugins/filter/jc.py @@ -5,44 +5,43 @@ # # contributed by Kelly Brazil -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: jc - short_description: Convert output of many shell commands and file-types to JSON - version_added: 1.1.0 - author: Kelly Brazil (@kellyjonbrazil) - description: - - Convert output of many shell commands and file-types to JSON. - - Uses the L(jc library,https://github.com/kellyjonbrazil/jc). - positional: parser - options: - _input: - description: The data to convert. - type: string - required: true - parser: - description: - - The correct parser for the input data. - - For example V(ifconfig). - - "Note: use underscores instead of dashes (if any) in the parser module name." - - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers. - type: string - required: true - quiet: - description: Set to V(false) to not suppress warnings. - type: boolean - default: true - raw: - description: Set to V(true) to return pre-processed JSON. - type: boolean - default: false - requirements: - - jc installed as a Python library (U(https://pypi.org/project/jc/)) -''' +DOCUMENTATION = r""" +name: jc +short_description: Convert output of many shell commands and file-types to JSON +version_added: 1.1.0 +author: Kelly Brazil (@kellyjonbrazil) +description: + - Convert output of many shell commands and file-types to JSON. + - Uses the L(jc library,https://github.com/kellyjonbrazil/jc). +positional: parser +options: + _input: + description: The data to convert. + type: string + required: true + parser: + description: + - The correct parser for the input data. + - For example V(ifconfig). + - 'Note: use underscores instead of dashes (if any) in the parser module name.' + - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers. + type: string + required: true + quiet: + description: Set to V(false) to not suppress warnings. + type: boolean + default: true + raw: + description: Set to V(true) to return pre-processed JSON. + type: boolean + default: false +requirements: + - jc installed as a Python library (U(https://pypi.org/project/jc/)) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install the prereqs of the jc filter (jc Python package) on the Ansible controller delegate_to: localhost ansible.builtin.pip: @@ -68,13 +67,13 @@ EXAMPLES = ''' # "operating_system": "GNU/Linux", # "processor": "x86_64" # } -''' +""" -RETURN = ''' - _value: - description: The processed output. - type: any -''' +RETURN = r""" +_value: + description: The processed output. + type: any +""" from ansible.errors import AnsibleError, AnsibleFilterError import importlib @@ -144,11 +143,11 @@ def jc_filter(data, parser, quiet=True, raw=False): # old API (jc v1.17.7 and lower) else: - jc_parser = importlib.import_module('jc.parsers.' + parser) + jc_parser = importlib.import_module(f'jc.parsers.{parser}') return jc_parser.parse(data, quiet=quiet, raw=raw) except Exception as e: - raise AnsibleFilterError('Error in jc filter plugin: %s' % e) + raise AnsibleFilterError(f'Error in jc filter plugin: {e}') class FilterModule(object): diff --git a/plugins/filter/json_diff.yml b/plugins/filter/json_diff.yml new file mode 100644 index 0000000000..a370564d7a --- /dev/null +++ b/plugins/filter/json_diff.yml @@ -0,0 +1,56 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_diff + short_description: Create a JSON patch by comparing two JSON files + description: + - This filter compares the input with the argument and computes a list of operations + that can be consumed by the P(community.general.json_patch_recipe#filter) to change the input + to the argument. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: target + options: + _input: + description: A list or a dictionary representing a source JSON object, or a string containing a JSON object. + type: raw + required: true + target: + description: A list or a dictionary representing a target JSON object, or a string containing a JSON object. + type: raw + required: true + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A list of JSON patch operations to apply. + type: list + elements: dict + +EXAMPLES: | + - name: Compute a difference + ansible.builtin.debug: + msg: "{{ input | community.general.json_diff(target) }}" + vars: + input: {"foo": 1, "bar":{"baz": 2}, "baw": [1, 2, 3], "hello": "day"} + target: {"foo": 1, "bar": {"baz": 2}, "baw": [1, 3], "baq": {"baz": 2}, "hello": "night"} + # => [ + # {"op": "add", "path": "/baq", "value": {"baz": 2}}, + # {"op": "remove", "path": "/baw/1"}, + # {"op": "replace", "path": "/hello", "value": "night"} + # ] diff --git a/plugins/filter/json_patch.py b/plugins/filter/json_patch.py new file mode 100644 index 0000000000..4600bfaf92 --- /dev/null +++ b/plugins/filter/json_patch.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations +from json import loads +from typing import TYPE_CHECKING +from ansible.errors import AnsibleFilterError + +__metaclass__ = type # pylint: disable=C0103 + +if TYPE_CHECKING: + from typing import Any, Callable, Union + +try: + import jsonpatch + +except ImportError as exc: + HAS_LIB = False + JSONPATCH_IMPORT_ERROR = exc +else: + HAS_LIB = True + JSONPATCH_IMPORT_ERROR = None + +OPERATIONS_AVAILABLE = ["add", "copy", "move", "remove", "replace", "test"] +OPERATIONS_NEEDING_FROM = ["copy", "move"] +OPERATIONS_NEEDING_VALUE = ["add", "replace", "test"] + + +class FilterModule: + """Filter plugin.""" + + def check_json_object(self, filter_name: str, object_name: str, inp: Any): + if isinstance(inp, (str, bytes, bytearray)): + try: + return loads(inp) + except Exception as e: + raise AnsibleFilterError( + f"{filter_name}: could not decode JSON from {object_name}: {e}" + ) from e + + if not isinstance(inp, (list, dict)): + raise AnsibleFilterError( + f"{filter_name}: {object_name} is not dictionary, list or string" + ) + + return inp + + def check_patch_arguments(self, filter_name: str, args: dict): + + if "op" not in args or not isinstance(args["op"], str): + raise AnsibleFilterError(f"{filter_name}: 'op' argument is not a string") + + if args["op"] not in OPERATIONS_AVAILABLE: + raise AnsibleFilterError( + f"{filter_name}: unsupported 'op' argument: {args['op']}" + ) + + if "path" not in args or not isinstance(args["path"], str): + raise AnsibleFilterError(f"{filter_name}: 'path' argument is not a string") + + if args["op"] in OPERATIONS_NEEDING_FROM: + if "from" not in args: + raise AnsibleFilterError( + f"{filter_name}: 'from' argument missing for '{args['op']}' operation" + ) + if not isinstance(args["from"], str): + raise AnsibleFilterError( + f"{filter_name}: 'from' argument is not a string" + ) + + def json_patch( + self, + inp: Union[str, list, dict, bytes, bytearray], + op: str, + path: str, + value: Any = None, + **kwargs: dict, + ) -> Any: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_patch' filter" + ) from JSONPATCH_IMPORT_ERROR + + args = {"op": op, "path": path} + from_arg = kwargs.pop("from", None) + fail_test = kwargs.pop("fail_test", False) + + if kwargs: + raise AnsibleFilterError( + f"json_patch: unexpected keywords arguments: {', '.join(sorted(kwargs))}" + ) + + if not isinstance(fail_test, bool): + raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool") + + if op in OPERATIONS_NEEDING_VALUE: + args["value"] = value + if op in OPERATIONS_NEEDING_FROM and from_arg is not None: + args["from"] = from_arg + + inp = self.check_json_object("json_patch", "input", inp) + self.check_patch_arguments("json_patch", args) + + result = None + + try: + result = jsonpatch.apply_patch(inp, [args]) + except jsonpatch.JsonPatchTestFailed as e: + if fail_test: + raise AnsibleFilterError( + f"json_patch: test operation failed: {e}" + ) from e + else: + pass + except Exception as e: + raise AnsibleFilterError(f"json_patch: patch failed: {e}") from e + + return result + + def json_patch_recipe( + self, + inp: Union[str, list, dict, bytes, bytearray], + operations: list, + /, + fail_test: bool = False, + ) -> Any: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_patch_recipe' filter" + ) from JSONPATCH_IMPORT_ERROR + + if not isinstance(operations, list): + raise AnsibleFilterError( + "json_patch_recipe: 'operations' needs to be a list" + ) + + if not isinstance(fail_test, bool): + raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool") + + result = None + + inp = self.check_json_object("json_patch_recipe", "input", inp) + for args in operations: + self.check_patch_arguments("json_patch_recipe", args) + + try: + result = jsonpatch.apply_patch(inp, operations) + except jsonpatch.JsonPatchTestFailed as e: + if fail_test: + raise AnsibleFilterError( + f"json_patch_recipe: test operation failed: {e}" + ) from e + else: + pass + except Exception as e: + raise AnsibleFilterError(f"json_patch_recipe: patch failed: {e}") from e + + return result + + def json_diff( + self, + inp: Union[str, list, dict, bytes, bytearray], + target: Union[str, list, dict, bytes, bytearray], + ) -> list: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_diff' filter" + ) from JSONPATCH_IMPORT_ERROR + + inp = self.check_json_object("json_diff", "input", inp) + target = self.check_json_object("json_diff", "target", target) + + try: + result = list(jsonpatch.make_patch(inp, target)) + except Exception as e: + raise AnsibleFilterError(f"JSON diff failed: {e}") from e + + return result + + def filters(self) -> dict[str, Callable[..., Any]]: + """Map filter plugin names to their functions. + + Returns: + dict: The filter plugin functions. + """ + return { + "json_patch": self.json_patch, + "json_patch_recipe": self.json_patch_recipe, + "json_diff": self.json_diff, + } diff --git a/plugins/filter/json_patch.yml b/plugins/filter/json_patch.yml new file mode 100644 index 0000000000..42a0309202 --- /dev/null +++ b/plugins/filter/json_patch.yml @@ -0,0 +1,145 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_patch + short_description: Apply a JSON-Patch (RFC 6902) operation to an object + description: + - This filter applies a single JSON patch operation and returns a modified object. + - If the operation is a test, the filter returns an ummodified object if the test + succeeded and a V(none) value otherwise. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: op, path, value + options: + _input: + description: A list or a dictionary representing a JSON object, or a string containing a JSON object. + type: raw + required: true + op: + description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)). + type: str + choices: [add, copy, move, remove, replace, test] + required: true + path: + description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)). + type: str + required: true + value: + description: Value to use in the operation. Ignored for O(op=copy), O(op=move), and O(op=remove). + type: raw + from: + description: The source location for the copy and move operation. Mandatory + for O(op=copy) and O(op=move), ignored otherwise. + type: str + fail_test: + description: If V(false), a failed O(op=test) will return V(none). If V(true), the filter + invocation will fail with an error. + type: bool + default: false + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A modified object or V(none) if O(op=test), O(fail_test=false) and the test failed. + type: any + returned: always + +EXAMPLES: | + - name: Insert a new element into an array at a specified index + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/1', {'baz': 'qux'}) }}" + vars: + input: ["foo": { "one": 1 }, "bar": { "two": 2 }] + # => [{"foo": {"one": 1}}, {"baz": "qux"}, {"bar": {"two": 2}}] + + - name: Insert a new key into a dictionary + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/bar/baz', 'qux') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": {"baz": "qux", "two": 2}} + + - name: Input is a string + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/baz', 3) }}" + vars: + input: '{ "foo": { "one": 1 }, "bar": { "two": 2 } }' + # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": 3} + + - name: Existing key is replaced + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/bar', 'qux') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": "qux"} + + - name: Escaping tilde as ~0 and slash as ~1 in the path + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/~0~1', 'qux') }}" + vars: + input: {} + # => {"~/": "qux"} + + - name: Add at the end of the array + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/-', 4) }}" + vars: + input: [1, 2, 3] + # => [1, 2, 3, 4] + + - name: Remove a key + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('remove', '/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1} } + + - name: Replace a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('replace', '/bar', 2) }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": 2} + + - name: Copy a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('copy', '/baz', from='/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": { "two": 2 }} + + - name: Move a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('move', '/baz', from='/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "baz": { "two": 2 }} + + - name: Successful test + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('test', '/bar/two', 2) | ternary('OK', 'Failed') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => OK + + - name: Unuccessful test + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('test', '/bar/two', 9) | ternary('OK', 'Failed') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => Failed diff --git a/plugins/filter/json_patch_recipe.yml b/plugins/filter/json_patch_recipe.yml new file mode 100644 index 0000000000..671600b941 --- /dev/null +++ b/plugins/filter/json_patch_recipe.yml @@ -0,0 +1,102 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_patch_recipe + short_description: Apply JSON-Patch (RFC 6902) operations to an object + description: + - This filter sequentially applies JSON patch operations and returns a modified object. + - If there is a test operation in the list, the filter continues if the test + succeeded and returns a V(none) value otherwise. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: operations, fail_test + options: + _input: + description: A list or a dictionary representing a JSON object, or a string containing a JSON object. + type: raw + required: true + operations: + description: A list of JSON patch operations to apply. + type: list + elements: dict + required: true + suboptions: + op: + description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)). + type: str + choices: [add, copy, move, remove, replace, test] + required: true + path: + description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)). + type: str + required: true + value: + description: Value to use in the operation. Ignored for O(operations[].op=copy), O(operations[].op=move), and O(operations[].op=remove). + type: raw + from: + description: The source location for the copy and move operation. Mandatory + for O(operations[].op=copy) and O(operations[].op=move), ignored otherwise. + type: str + fail_test: + description: If V(false), a failed O(operations[].op=test) will return V(none). If V(true), the filter + invocation will fail with an error. + type: bool + default: false + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A modified object or V(none) if O(operations[].op=test), O(fail_test=false) + and the test failed. + type: any + returned: always + +EXAMPLES: | + - name: Apply a series of operations + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch_recipe(operations) }}" + vars: + input: {} + operations: + - op: 'add' + path: '/foo' + value: 1 + - op: 'add' + path: '/bar' + value: [] + - op: 'add' + path: '/bar/-' + value: 2 + - op: 'add' + path: '/bar/0' + value: 1 + - op: 'remove' + path: '/bar/0' + - op: 'move' + from: '/foo' + path: '/baz' + - op: 'copy' + from: '/baz' + path: '/bax' + - op: 'copy' + from: '/baz' + path: '/bay' + - op: 'replace' + path: '/baz' + value: [10, 20, 30] + # => {"bar":[2],"bax":1,"bay":1,"baz":[10,20,30]} diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py index 9e8fa4ef2e..8976694a94 100644 --- a/plugins/filter/json_query.py +++ b/plugins/filter/json_query.py @@ -3,32 +3,31 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: json_query - short_description: Select a single element or a data subset from a complex data structure - description: - - This filter lets you query a complex JSON structure and iterate over it using a loop structure. - positional: expr - options: - _input: - description: - - The JSON data to query. - type: any - required: true - expr: - description: - - The query expression. - - See U(http://jmespath.org/examples.html) for examples. - type: string - required: true - requirements: - - jmespath -''' +DOCUMENTATION = r""" +name: json_query +short_description: Select a single element or a data subset from a complex data structure +description: + - This filter lets you query a complex JSON structure and iterate over it using a loop structure. +positional: expr +options: + _input: + description: + - The JSON data to query. + type: any + required: true + expr: + description: + - The query expression. + - See U(http://jmespath.org/examples.html) for examples. + type: string + required: true +requirements: + - jmespath +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Define data to work on in the examples below ansible.builtin.set_fact: domain_definition: @@ -99,13 +98,13 @@ EXAMPLES = ''' msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" vars: server_name_query: "domain.server[?contains(name,'server1')].port" -''' +""" -RETURN = ''' - _value: - description: The result of the query. - type: any -''' +RETURN = r""" +_value: + description: The result of the query. + type: any +""" from ansible.errors import AnsibleError, AnsibleFilterError diff --git a/plugins/filter/keep_keys.py b/plugins/filter/keep_keys.py index 97b706a950..98b34b4197 100644 --- a/plugins/filter/keep_keys.py +++ b/plugins/filter/keep_keys.py @@ -4,102 +4,101 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: keep_keys - short_description: Keep specific keys from dictionaries in a list - version_added: "9.1.0" - author: - - Vladimir Botka (@vbotka) - - Felix Fontein (@felixfontein) - description: This filter keeps only specified keys from a provided list of dictionaries. - options: - _input: - description: - - A list of dictionaries. - - Top level keys must be strings. - type: list - elements: dictionary - required: true - target: - description: - - A single key or key pattern to keep, or a list of keys or keys patterns to keep. - - If O(matching_parameter=regex) there must be exactly one pattern provided. - type: raw - required: true - matching_parameter: - description: Specify the matching option of target keys. - type: str - default: equal - choices: - equal: Matches keys of exactly one of the O(target) items. - starts_with: Matches keys that start with one of the O(target) items. - ends_with: Matches keys that end with one of the O(target) items. - regex: - - Matches keys that match the regular expresion provided in O(target). - - In this case, O(target) must be a regex string or a list with single regex string. -''' +DOCUMENTATION = r""" +name: keep_keys +short_description: Keep specific keys from dictionaries in a list +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter keeps only specified keys from a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to keep, or a list of keys or keys patterns to keep. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +""" -EXAMPLES = ''' - l: +EXAMPLES = r""" +- l: - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} # 1) By default match keys that equal any of the items in the target. - t: [k0_x0, k1_x1] +- t: [k0_x0, k1_x1] r: "{{ l | community.general.keep_keys(target=t) }}" # 2) Match keys that start with any of the items in the target. - t: [k0, k1] +- t: [k0, k1] r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" # 3) Match keys that end with any of the items in target. - t: [x0, x1] +- t: [x0, x1] r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" # 4) Match keys by the regex. - t: ['^.*[01]_x.*$'] +- t: ['^.*[01]_x.*$'] r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" # 5) Match keys by the regex. - t: '^.*[01]_x.*$' +- t: '^.*[01]_x.*$' r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" # The results of above examples 1-5 are all the same. - r: +- r: - {k0_x0: A0, k1_x1: B0} - {k0_x0: A1, k1_x1: B1} # 6) By default match keys that equal the target. - t: k0_x0 +- t: k0_x0 r: "{{ l | community.general.keep_keys(target=t) }}" # 7) Match keys that start with the target. - t: k0 +- t: k0 r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" # 8) Match keys that end with the target. - t: x0 +- t: x0 r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" # 9) Match keys by the regex. - t: '^.*0_x.*$' +- t: '^.*0_x.*$' r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" # The results of above examples 6-9 are all the same. - r: +- r: - {k0_x0: A0} - {k0_x0: A1} -''' +""" -RETURN = ''' - _value: - description: The list of dictionaries with selected keys. - type: list - elements: dictionary -''' +RETURN = r""" +_value: + description: The list of dictionaries with selected keys. + type: list + elements: dictionary +""" from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( _keys_filter_params, diff --git a/plugins/filter/lists.py b/plugins/filter/lists.py index d16f955c22..707ec9f1fe 100644 --- a/plugins/filter/lists.py +++ b/plugins/filter/lists.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.errors import AnsibleFilterError from ansible.module_utils.common.collections import is_sequence diff --git a/plugins/filter/lists_difference.yml b/plugins/filter/lists_difference.yml index 9806a9f0bc..630e77cf0a 100644 --- a/plugins/filter/lists_difference.yml +++ b/plugins/filter/lists_difference.yml @@ -31,7 +31,7 @@ EXAMPLES: | list1: [1, 2, 5, 3, 4, 10] list2: [1, 2, 3, 4, 5, 11, 99] # => [10] - + - name: Return the difference of list1, list2 and list3. ansible.builtin.debug: msg: "{{ [list1, list2, list3] | community.general.lists_difference(flatten=true) }}" diff --git a/plugins/filter/lists_intersect.yml b/plugins/filter/lists_intersect.yml index 8253463dee..d2ea9483b1 100644 --- a/plugins/filter/lists_intersect.yml +++ b/plugins/filter/lists_intersect.yml @@ -31,7 +31,7 @@ EXAMPLES: | list1: [1, 2, 5, 3, 4, 10] list2: [1, 2, 3, 4, 5, 11, 99] # => [1, 2, 5, 3, 4] - + - name: Return the intersection of list1, list2 and list3. ansible.builtin.debug: msg: "{{ [list1, list2, list3] | community.general.lists_intersect(flatten=true) }}" diff --git a/plugins/filter/lists_mergeby.py b/plugins/filter/lists_mergeby.py index 0e47d50172..b15df2e089 100644 --- a/plugins/filter/lists_mergeby.py +++ b/plugins/filter/lists_mergeby.py @@ -3,68 +3,61 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: lists_mergeby - short_description: Merge two or more lists of dictionaries by a given attribute - version_added: 2.0.0 - author: Vladimir Botka (@vbotka) - description: - - Merge two or more lists by attribute O(index). Optional - parameters O(recursive) and O(list_merge) control the merging of - the nested dictionaries and lists. - - The function C(merge_hash) from C(ansible.utils.vars) is used. - - To learn details on how to use the parameters O(recursive) and - O(list_merge) see Ansible User's Guide chapter "Using filters to - manipulate data" section R(Combining hashes/dictionaries, combine_filter) or the - filter P(ansible.builtin.combine#filter). +DOCUMENTATION = r""" +name: lists_mergeby +short_description: Merge two or more lists of dictionaries by a given attribute +version_added: 2.0.0 +author: Vladimir Botka (@vbotka) +description: + - Merge two or more lists by attribute O(index). Optional parameters O(recursive) and O(list_merge) control the merging + of the nested dictionaries and lists. + - The function C(merge_hash) from C(ansible.utils.vars) is used. + - To learn details on how to use the parameters O(recursive) and O(list_merge) see Ansible User's Guide chapter "Using filters + to manipulate data" section R(Combining hashes/dictionaries, combine_filter) or the filter P(ansible.builtin.combine#filter). +positional: another_list, index +options: + _input: + description: + - A list of dictionaries, or a list of lists of dictionaries. + - The required type of the C(elements) is set to C(raw) because all elements of O(_input) can be either dictionaries + or lists. + type: list + elements: raw + required: true + another_list: + description: + - Another list of dictionaries, or a list of lists of dictionaries. + - This parameter can be specified multiple times. + type: list + elements: raw + index: + description: + - The dictionary key that must be present in every dictionary in every list that is used to merge the lists. + type: string + required: true + recursive: + description: + - Should the combine recursively merge nested dictionaries (hashes). + - B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg). + type: boolean + default: false + list_merge: + description: + - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists. + type: string + default: replace + choices: + - replace + - keep + - append + - prepend + - append_rp + - prepend_rp +""" - positional: another_list, index - options: - _input: - description: - - A list of dictionaries, or a list of lists of dictionaries. - - The required type of the C(elements) is set to C(raw) - because all elements of O(_input) can be either dictionaries - or lists. - type: list - elements: raw - required: true - another_list: - description: - - Another list of dictionaries, or a list of lists of dictionaries. - - This parameter can be specified multiple times. - type: list - elements: raw - index: - description: - - The dictionary key that must be present in every dictionary in every list that is used to - merge the lists. - type: string - required: true - recursive: - description: - - Should the combine recursively merge nested dictionaries (hashes). - - "B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg)." - type: boolean - default: false - list_merge: - description: - - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists. - type: string - default: replace - choices: - - replace - - keep - - append - - prepend - - append_rp - - prepend_rp -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Some results below are manually formatted for better readability. The # dictionaries' keys will be sorted alphabetically in real output. @@ -193,14 +186,14 @@ EXAMPLES = ''' # r: # - {index: a, foo: {x:1, y: 3, z: 4}} # - {index: b, foo: [Y1, Y2]} -''' +""" -RETURN = ''' - _value: - description: The merged list. - type: list - elements: dictionary -''' +RETURN = r""" +_value: + description: The merged list. + type: list + elements: dictionary +""" from ansible.errors import AnsibleFilterError from ansible.module_utils.six import string_types diff --git a/plugins/filter/lists_symmetric_difference.yml b/plugins/filter/lists_symmetric_difference.yml index d985704c2c..abd8caab8a 100644 --- a/plugins/filter/lists_symmetric_difference.yml +++ b/plugins/filter/lists_symmetric_difference.yml @@ -31,7 +31,7 @@ EXAMPLES: | list1: [1, 2, 5, 3, 4, 10] list2: [1, 2, 3, 4, 5, 11, 99] # => [10, 11, 99] - + - name: Return the symmetric difference of list1, list2 and list3. ansible.builtin.debug: msg: "{{ [list1, list2, list3] | community.general.lists_symmetric_difference(flatten=true) }}" diff --git a/plugins/filter/lists_union.yml b/plugins/filter/lists_union.yml index ba69090836..8c1ffb4f87 100644 --- a/plugins/filter/lists_union.yml +++ b/plugins/filter/lists_union.yml @@ -32,7 +32,7 @@ EXAMPLES: | list2: [1, 2, 3, 4, 5, 11, 99] list3: [1, 2, 3, 4, 5, 10, 99, 101] # => [1, 2, 5, 3, 4, 10, 11, 99, 101] - + - name: Return the union of list1 and list2. ansible.builtin.debug: msg: "{{ [list1, list2] | community.general.lists_union(flatten=true) }}" diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py index 662c62b07c..1ece58230c 100644 --- a/plugins/filter/random_mac.py +++ b/plugins/filter/random_mac.py @@ -4,28 +4,27 @@ # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: random_mac - short_description: Generate a random MAC address - description: - - Generates random networking interfaces MAC addresses for a given prefix. - options: - _input: - description: A string prefix to use as a basis for the random MAC generated. - type: string - required: true - seed: - description: - - A randomization seed to initialize the process, used to get repeatable results. - - If no seed is provided, a system random source such as C(/dev/urandom) is used. - required: false - type: string -''' +DOCUMENTATION = r""" +name: random_mac +short_description: Generate a random MAC address +description: + - Generates random networking interfaces MAC addresses for a given prefix. +options: + _input: + description: A string prefix to use as a basis for the random MAC generated. + type: string + required: true + seed: + description: + - A randomization seed to initialize the process, used to get repeatable results. + - If no seed is provided, a system random source such as C(/dev/urandom) is used. + required: false + type: string +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Random MAC given a prefix ansible.builtin.debug: msg: "{{ '52:54:00' | community.general.random_mac }}" @@ -34,13 +33,13 @@ EXAMPLES = ''' - name: With a seed ansible.builtin.debug: msg: "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}" -''' +""" -RETURN = ''' - _value: - description: The generated MAC. - type: string -''' +RETURN = r""" +_value: + description: The generated MAC. + type: string +""" import re from random import Random, SystemRandom diff --git a/plugins/filter/remove_keys.py b/plugins/filter/remove_keys.py index 7a4d912d34..2058803138 100644 --- a/plugins/filter/remove_keys.py +++ b/plugins/filter/remove_keys.py @@ -4,102 +4,101 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: remove_keys - short_description: Remove specific keys from dictionaries in a list - version_added: "9.1.0" - author: - - Vladimir Botka (@vbotka) - - Felix Fontein (@felixfontein) - description: This filter removes only specified keys from a provided list of dictionaries. - options: - _input: - description: - - A list of dictionaries. - - Top level keys must be strings. - type: list - elements: dictionary - required: true - target: - description: - - A single key or key pattern to remove, or a list of keys or keys patterns to remove. - - If O(matching_parameter=regex) there must be exactly one pattern provided. - type: raw - required: true - matching_parameter: - description: Specify the matching option of target keys. - type: str - default: equal - choices: - equal: Matches keys of exactly one of the O(target) items. - starts_with: Matches keys that start with one of the O(target) items. - ends_with: Matches keys that end with one of the O(target) items. - regex: - - Matches keys that match the regular expresion provided in O(target). - - In this case, O(target) must be a regex string or a list with single regex string. -''' +DOCUMENTATION = r""" +name: remove_keys +short_description: Remove specific keys from dictionaries in a list +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter removes only specified keys from a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to remove, or a list of keys or keys patterns to remove. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +""" -EXAMPLES = ''' - l: +EXAMPLES = r""" +- l: - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} # 1) By default match keys that equal any of the items in the target. - t: [k0_x0, k1_x1] +- t: [k0_x0, k1_x1] r: "{{ l | community.general.remove_keys(target=t) }}" # 2) Match keys that start with any of the items in the target. - t: [k0, k1] +- t: [k0, k1] r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" # 3) Match keys that end with any of the items in target. - t: [x0, x1] +- t: [x0, x1] r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" # 4) Match keys by the regex. - t: ['^.*[01]_x.*$'] +- t: ['^.*[01]_x.*$'] r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" # 5) Match keys by the regex. - t: '^.*[01]_x.*$' +- t: '^.*[01]_x.*$' r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" # The results of above examples 1-5 are all the same. - r: +- r: - {k2_x2: [C0], k3_x3: foo} - {k2_x2: [C1], k3_x3: bar} # 6) By default match keys that equal the target. - t: k0_x0 +- t: k0_x0 r: "{{ l | community.general.remove_keys(target=t) }}" # 7) Match keys that start with the target. - t: k0 +- t: k0 r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" # 8) Match keys that end with the target. - t: x0 +- t: x0 r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" # 9) Match keys by the regex. - t: '^.*0_x.*$' +- t: '^.*0_x.*$' r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" # The results of above examples 6-9 are all the same. - r: +- r: - {k1_x1: B0, k2_x2: [C0], k3_x3: foo} - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} -''' +""" -RETURN = ''' - _value: - description: The list of dictionaries with selected keys removed. - type: list - elements: dictionary -''' +RETURN = r""" +_value: + description: The list of dictionaries with selected keys removed. + type: list + elements: dictionary +""" from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( _keys_filter_params, diff --git a/plugins/filter/replace_keys.py b/plugins/filter/replace_keys.py index 70b264eba6..69fe02832b 100644 --- a/plugins/filter/replace_keys.py +++ b/plugins/filter/replace_keys.py @@ -4,132 +4,131 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: replace_keys - short_description: Replace specific keys in a list of dictionaries - version_added: "9.1.0" - author: - - Vladimir Botka (@vbotka) - - Felix Fontein (@felixfontein) - description: This filter replaces specified keys in a provided list of dictionaries. - options: - _input: +DOCUMENTATION = r""" +name: replace_keys +short_description: Replace specific keys in a list of dictionaries +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter replaces specified keys in a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A list of dictionaries with attributes C(before) and C(after). + - The value of O(target[].after) replaces key matching O(target[].before). + type: list + elements: dictionary + required: true + suboptions: + before: description: - - A list of dictionaries. - - Top level keys must be strings. - type: list - elements: dictionary - required: true - target: - description: - - A list of dictionaries with attributes C(before) and C(after). - - The value of O(target[].after) replaces key matching O(target[].before). - type: list - elements: dictionary - required: true - suboptions: - before: - description: - - A key or key pattern to change. - - The interpretation of O(target[].before) depends on O(matching_parameter). - - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) will be used. - type: str - after: - description: A matching key change to. - type: str - matching_parameter: - description: Specify the matching option of target keys. + - A key or key pattern to change. + - The interpretation of O(target[].before) depends on O(matching_parameter). + - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) is used. type: str - default: equal - choices: - equal: Matches keys of exactly one of the O(target[].before) items. - starts_with: Matches keys that start with one of the O(target[].before) items. - ends_with: Matches keys that end with one of the O(target[].before) items. - regex: Matches keys that match one of the regular expressions provided in O(target[].before). -''' + after: + description: A matching key change to. + type: str + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target[].before) items. + starts_with: Matches keys that start with one of the O(target[].before) items. + ends_with: Matches keys that end with one of the O(target[].before) items. + regex: Matches keys that match one of the regular expressions provided in O(target[].before). +""" -EXAMPLES = ''' - l: +EXAMPLES = r""" +- l: - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} # 1) By default, replace keys that are equal any of the attributes before. - t: +- t: - {before: k0_x0, after: a0} - {before: k1_x1, after: a1} r: "{{ l | community.general.replace_keys(target=t) }}" # 2) Replace keys that starts with any of the attributes before. - t: +- t: - {before: k0, after: a0} - {before: k1, after: a1} r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" # 3) Replace keys that ends with any of the attributes before. - t: +- t: - {before: x0, after: a0} - {before: x1, after: a1} r: "{{ l | community.general.replace_keys(target=t, matching_parameter='ends_with') }}" # 4) Replace keys that match any regex of the attributes before. - t: +- t: - {before: "^.*0_x.*$", after: a0} - {before: "^.*1_x.*$", after: a1} r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" # The results of above examples 1-4 are all the same. - r: +- r: - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} # 5) If more keys match the same attribute before the last one will be used. - t: +- t: - {before: "^.*_x.*$", after: X} r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" # gives - r: +- r: - X: foo - X: bar # 6) If there are items with equal attribute before the first one will be used. - t: +- t: - {before: "^.*_x.*$", after: X} - {before: "^.*_x.*$", after: Y} r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" # gives - r: +- r: - X: foo - X: bar # 7) If there are more matches for a key the first one will be used. - l: +- l: - {aaa1: A, bbb1: B, ccc1: C} - {aaa2: D, bbb2: E, ccc2: F} - t: +- t: - {before: a, after: X} - {before: aa, after: Y} r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" # gives - r: +- r: - {X: A, bbb1: B, ccc1: C} - {X: D, bbb2: E, ccc2: F} -''' +""" -RETURN = ''' - _value: - description: The list of dictionaries with replaced keys. - type: list - elements: dictionary -''' +RETURN = r""" +_value: + description: The list of dictionaries with replaced keys. + type: list + elements: dictionary +""" from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( _keys_filter_params, diff --git a/plugins/filter/reveal_ansible_type.py b/plugins/filter/reveal_ansible_type.py index 916aaff930..f2f0d6780b 100644 --- a/plugins/filter/reveal_ansible_type.py +++ b/plugins/filter/reveal_ansible_type.py @@ -3,119 +3,132 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: reveal_ansible_type - short_description: Return input type - version_added: "9.2.0" - author: Vladimir Botka (@vbotka) - description: This filter returns input type. - options: - _input: - description: Input data. - type: raw - required: true - alias: - description: Data type aliases. - default: {} - type: dictionary -''' +DOCUMENTATION = r""" +name: reveal_ansible_type +short_description: Return input type +version_added: "9.2.0" +author: Vladimir Botka (@vbotka) +description: This filter returns input type. +options: + _input: + description: Input data. + type: raw + required: true + alias: + description: Data type aliases. + default: {} + type: dictionary +""" -EXAMPLES = ''' -# Substitution converts str to AnsibleUnicode -# ------------------------------------------- +EXAMPLES = r""" +# Substitution converts str to AnsibleUnicode or _AnsibleTaggedStr +# ---------------------------------------------------------------- -# String. AnsibleUnicode. -data: "abc" -result: '{{ data | community.general.reveal_ansible_type }}' -# result => AnsibleUnicode +# String. AnsibleUnicode or _AnsibleTaggedStr. +- data: "abc" + result: '{{ data | community.general.reveal_ansible_type }}' +# result => AnsibleUnicode (or _AnsibleTaggedStr) -# String. AnsibleUnicode alias str. -alias: {"AnsibleUnicode": "str"} -data: "abc" -result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# String. AnsibleUnicode/_AnsibleTaggedStr alias str. +- alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str"} + data: "abc" + result: '{{ data | community.general.reveal_ansible_type(alias) }}' # result => str -# List. All items are AnsibleUnicode. -data: ["a", "b", "c"] -result: '{{ data | community.general.reveal_ansible_type }}' -# result => list[AnsibleUnicode] +# List. All items are AnsibleUnicode/_AnsibleTaggedStr. +- data: ["a", "b", "c"] + result: '{{ data | community.general.reveal_ansible_type }}' +# result => list[AnsibleUnicode] or list[_AnsibleTaggedStr] -# Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. -data: {"a": "foo", "b": "bar", "c": "baz"} -result: '{{ data | community.general.reveal_ansible_type }}' -# result => dict[AnsibleUnicode, AnsibleUnicode] +# Dictionary. All keys and values are AnsibleUnicode/_AnsibleTaggedStr. +- data: {"a": "foo", "b": "bar", "c": "baz"} + result: '{{ data | community.general.reveal_ansible_type }}' +# result => dict[AnsibleUnicode, AnsibleUnicode] or dict[_AnsibleTaggedStr, _AnsibleTaggedStr] # No substitution and no alias. Type of strings is str # ---------------------------------------------------- # String -result: '{{ "abc" | community.general.reveal_ansible_type }}' +- result: '{{ "abc" | community.general.reveal_ansible_type }}' # result => str # Integer -result: '{{ 123 | community.general.reveal_ansible_type }}' +- result: '{{ 123 | community.general.reveal_ansible_type }}' # result => int # Float -result: '{{ 123.45 | community.general.reveal_ansible_type }}' +- result: '{{ 123.45 | community.general.reveal_ansible_type }}' # result => float # Boolean -result: '{{ true | community.general.reveal_ansible_type }}' +- result: '{{ true | community.general.reveal_ansible_type }}' # result => bool # List. All items are strings. -result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' +- result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' # result => list[str] # List of dictionaries. -result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' +- result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' # result => list[dict] # Dictionary. All keys are strings. All values are integers. -result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' +- result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' # result => dict[str, int] # Dictionary. All keys are strings. All values are integers. -result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' +- result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' # result => dict[str, int] -# Type of strings is AnsibleUnicode or str -# ---------------------------------------- +# Type of strings is AnsibleUnicode, _AnsibleTaggedStr, or str +# ------------------------------------------------------------ # Dictionary. The keys are integers or strings. All values are strings. -alias: {"AnsibleUnicode": "str"} -data: {1: 'a', 'b': 'b'} -result: '{{ data | community.general.reveal_ansible_type(alias) }}' +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + data: {1: 'a', 'b': 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' # result => dict[int|str, str] # Dictionary. All keys are integers. All values are keys. -alias: {"AnsibleUnicode": "str"} -data: {1: 'a', 2: 'b'} -result: '{{ data | community.general.reveal_ansible_type(alias) }}' +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + data: {1: 'a', 2: 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' # result => dict[int, str] # Dictionary. All keys are strings. Multiple types values. -alias: {"AnsibleUnicode": "str"} -data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} -result: '{{ data | community.general.reveal_ansible_type(alias) }}' +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float + data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': true, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' # result => dict[str, bool|dict|float|int|list|str] # List. Multiple types items. -alias: {"AnsibleUnicode": "str"} -data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] -result: '{{ data | community.general.reveal_ansible_type(alias) }}' +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float + data: [1, 2, 1.1, 'abc', true, ['x', 'y', 'z'], {'x': 1, 'y': 2}] + result: '{{ data | community.general.reveal_ansible_type(alias) }}' # result => list[bool|dict|float|int|list|str] -''' +""" -RETURN = ''' - _value: - description: Type of the data. - type: str -''' +RETURN = r""" +_value: + description: Type of the data. + type: str +""" from ansible_collections.community.general.plugins.plugin_utils.ansible_type import _ansible_type @@ -123,6 +136,7 @@ from ansible_collections.community.general.plugins.plugin_utils.ansible_type imp def reveal_ansible_type(data, alias=None): """Returns data type""" + # TODO: expose use_native_type parameter return _ansible_type(data, alias) diff --git a/plugins/filter/time.py b/plugins/filter/time.py index 25970cd260..e8a867a1fe 100644 --- a/plugins/filter/time.py +++ b/plugins/filter/time.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re from ansible.errors import AnsibleFilterError diff --git a/plugins/filter/to_days.yml b/plugins/filter/to_days.yml index b5f6424fa3..c76697f1ee 100644 --- a/plugins/filter/to_days.yml +++ b/plugins/filter/to_days.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_days - short_description: Converte a duration string to days + short_description: Converts a duration string to days version_added: 0.2.0 description: - Parse a human readable time duration string and convert to days. diff --git a/plugins/filter/to_hours.yml b/plugins/filter/to_hours.yml index 353fdfc317..520740897b 100644 --- a/plugins/filter/to_hours.yml +++ b/plugins/filter/to_hours.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_hours - short_description: Converte a duration string to hours + short_description: Converts a duration string to hours version_added: 0.2.0 description: - Parse a human readable time duration string and convert to hours. diff --git a/plugins/filter/to_ini.py b/plugins/filter/to_ini.py index 2bc41f6962..4be1a684e7 100644 --- a/plugins/filter/to_ini.py +++ b/plugins/filter/to_ini.py @@ -4,36 +4,36 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -DOCUMENTATION = r''' - name: to_ini - short_description: Converts a dictionary to the INI file format - version_added: 8.2.0 - author: Steffen Scheib (@sscheib) - description: - - Converts a dictionary to the INI file format. - options: - _input: - description: The dictionary that should be converted to the INI format. - type: dictionary - required: true -''' +DOCUMENTATION = r""" +name: to_ini +short_description: Converts a dictionary to the INI file format +version_added: 8.2.0 +author: Steffen Scheib (@sscheib) +description: + - Converts a dictionary to the INI file format. +options: + _input: + description: The dictionary that should be converted to the INI format. + type: dictionary + required: true +""" -EXAMPLES = r''' - - name: Define a dictionary - ansible.builtin.set_fact: - my_dict: - section_name: - key_name: 'key value' +EXAMPLES = r""" +- name: Define a dictionary + ansible.builtin.set_fact: + my_dict: + section_name: + key_name: 'key value' - another_section: - connection: 'ssh' + another_section: + connection: 'ssh' - - name: Write dictionary to INI file - ansible.builtin.copy: - dest: /tmp/test.ini - content: '{{ my_dict | community.general.to_ini }}' +- name: Write dictionary to INI file + ansible.builtin.copy: + dest: /tmp/test.ini + content: '{{ my_dict | community.general.to_ini }}' # /tmp/test.ini will look like this: # [section_name] @@ -41,17 +41,15 @@ EXAMPLES = r''' # # [another_section] # connection = ssh -''' +""" -RETURN = r''' - _value: - description: A string formatted as INI file. - type: string -''' +RETURN = r""" +_value: + description: A string formatted as INI file. + type: string +""" -__metaclass__ = type - from ansible.errors import AnsibleFilterError from ansible.module_utils.common._collections_compat import Mapping from ansible.module_utils.six.moves import StringIO diff --git a/plugins/filter/to_milliseconds.yml b/plugins/filter/to_milliseconds.yml index 19ed02438c..f25bd86623 100644 --- a/plugins/filter/to_milliseconds.yml +++ b/plugins/filter/to_milliseconds.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_milliseconds - short_description: Converte a duration string to milliseconds + short_description: Converts a duration string to milliseconds version_added: 0.2.0 description: - Parse a human readable time duration string and convert to milliseconds. diff --git a/plugins/filter/to_minutes.yml b/plugins/filter/to_minutes.yml index e8d6f763a8..924fb6feb3 100644 --- a/plugins/filter/to_minutes.yml +++ b/plugins/filter/to_minutes.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_minutes - short_description: Converte a duration string to minutes + short_description: Converts a duration string to minutes version_added: 0.2.0 description: - Parse a human readable time duration string and convert to minutes. diff --git a/plugins/filter/to_months.yml b/plugins/filter/to_months.yml index 1f1cd661d8..09e9c38b5d 100644 --- a/plugins/filter/to_months.yml +++ b/plugins/filter/to_months.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_months - short_description: Converte a duration string to months + short_description: Convert a duration string to months version_added: 0.2.0 description: - Parse a human readable time duration string and convert to months. diff --git a/plugins/filter/to_prettytable.py b/plugins/filter/to_prettytable.py new file mode 100644 index 0000000000..269ac318ff --- /dev/null +++ b/plugins/filter/to_prettytable.py @@ -0,0 +1,411 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Timur Gadiev +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) + +DOCUMENTATION = r""" +name: to_prettytable +short_description: Format a list of dictionaries as an ASCII table +version_added: "10.7.0" +author: Timur Gadiev (@tgadiev) +description: + - This filter takes a list of dictionaries and formats it as an ASCII table using the I(prettytable) Python library. +requirements: + - prettytable +options: + _input: + description: A list of dictionaries to format. + type: list + elements: dictionary + required: true + column_order: + description: List of column names to specify the order of columns in the table. + type: list + elements: string + header_names: + description: List of custom header names to use instead of dictionary keys. + type: list + elements: string + column_alignments: + description: + - Dictionary where keys are column names and values are alignment settings. Valid alignment values are C(left), C(center), + C(right), C(l), C(c), or C(r). + - "For example, V({'name': 'left', 'id': 'right'}) aligns the C(name) column to the left and the C(id) column to the + right." + type: dictionary +""" + +EXAMPLES = r""" +- name: Set a list of users + ansible.builtin.set_fact: + users: + - name: Alice + age: 25 + role: admin + - name: Bob + age: 30 + role: user + +- name: Display a list of users as a table + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable + }} + +- name: Display a table with custom column ordering + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['role', 'name', 'age'] + ) + }} + +- name: Display a table with selective column output (only show name and role fields) + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['name', 'role'] + ) + }} + +- name: Display a table with custom headers + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + header_names=['User Name', 'User Age', 'User Role'] + ) + }} + +- name: Display a table with custom alignments + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_alignments={'name': 'center', 'age': 'right', 'role': 'left'} + ) + }} + +- name: Combine multiple options + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['role', 'name', 'age'], + header_names=['Position', 'Full Name', 'Years'], + column_alignments={'name': 'center', 'age': 'right', 'role': 'left'} + ) + }} +""" + +RETURN = r""" +_value: + description: The formatted ASCII table. + type: string +""" + +try: + import prettytable + HAS_PRETTYTABLE = True +except ImportError: + HAS_PRETTYTABLE = False + +from ansible.errors import AnsibleFilterError +from ansible.module_utils._text import to_text +from ansible.module_utils.six import string_types + + +class TypeValidationError(AnsibleFilterError): + """Custom exception for type validation errors. + + Args: + obj: The object with incorrect type + expected: Description of expected type + """ + def __init__(self, obj, expected): + type_name = "string" if isinstance(obj, string_types) else type(obj).__name__ + super().__init__(f"Expected {expected}, got a {type_name}") + + +def _validate_list_param(param, param_name, ensure_strings=True): + """Validate a parameter is a list and optionally ensure all elements are strings. + + Args: + param: The parameter to validate + param_name: The name of the parameter for error messages + ensure_strings: Whether to check that all elements are strings + + Raises: + AnsibleFilterError: If validation fails + """ + # Map parameter names to their original error message format + error_messages = { + "column_order": "a list of column names", + "header_names": "a list of header names" + } + + # Use the specific error message if available, otherwise use a generic one + error_msg = error_messages.get(param_name, f"a list for {param_name}") + + if not isinstance(param, list): + raise TypeValidationError(param, error_msg) + + if ensure_strings: + for item in param: + if not isinstance(item, string_types): + # Maintain original error message format + if param_name == "column_order": + error_msg = "a string for column name" + elif param_name == "header_names": + error_msg = "a string for header name" + else: + error_msg = f"a string for {param_name} element" + raise TypeValidationError(item, error_msg) + + +def _match_key(item_dict, lookup_key): + """Find a matching key in a dictionary, handling type conversion. + + Args: + item_dict: Dictionary to search in + lookup_key: Key to look for, possibly needing type conversion + + Returns: + The matching key or None if no match found + """ + # Direct key match + if lookup_key in item_dict: + return lookup_key + + # Try boolean conversion for 'true'/'false' strings + if isinstance(lookup_key, string_types): + if lookup_key.lower() == 'true' and True in item_dict: + return True + if lookup_key.lower() == 'false' and False in item_dict: + return False + + # Try numeric conversion for string numbers + if lookup_key.isdigit() and int(lookup_key) in item_dict: + return int(lookup_key) + + # No match found + return None + + +def _build_key_maps(data): + """Build mappings between string keys and original keys. + + Args: + data: List of dictionaries with keys to map + + Returns: + Tuple of (key_map, reverse_key_map) + """ + key_map = {} + reverse_key_map = {} + + # Check if the data list is not empty + if not data: + return key_map, reverse_key_map + + first_dict = data[0] + for orig_key in first_dict.keys(): + # Store string version of the key + str_key = to_text(orig_key) + key_map[str_key] = orig_key + # Also store lowercase version for case-insensitive lookups + reverse_key_map[str_key.lower()] = orig_key + + return key_map, reverse_key_map + + +def _configure_alignments(table, field_names, column_alignments): + """Configure column alignments for the table. + + Args: + table: The PrettyTable instance to configure + field_names: List of field names to align + column_alignments: Dict of column alignments + """ + valid_alignments = {"left", "center", "right", "l", "c", "r"} + + if not isinstance(column_alignments, dict): + return + + for col_name, alignment in column_alignments.items(): + if col_name in field_names: + # We already validated alignment is a string and a valid value in the main function + # Just apply it here + alignment = alignment.lower() + table.align[col_name] = alignment[0] + + +def to_prettytable(data, *args, **kwargs): + """Convert a list of dictionaries to an ASCII table. + + Args: + data: List of dictionaries to format + *args: Optional list of column names to specify column order + **kwargs: Optional keyword arguments: + - column_order: List of column names to specify the order + - header_names: List of custom header names + - column_alignments: Dict of column alignments (left, center, right) + + Returns: + String containing the ASCII table + """ + if not HAS_PRETTYTABLE: + raise AnsibleFilterError( + 'You need to install "prettytable" Python module to use this filter' + ) + + # === Input validation === + # Validate list type + if not isinstance(data, list): + raise TypeValidationError(data, "a list of dictionaries") + + # Validate dictionary items if list is not empty + if data and not all(isinstance(item, dict) for item in data): + invalid_item = next((item for item in data if not isinstance(item, dict)), None) + raise TypeValidationError(invalid_item, "all items in the list to be dictionaries") + + # Get sample dictionary to determine fields - empty if no data + sample_dict = data[0] if data else {} + max_fields = len(sample_dict) + + # === Process column order === + # Handle both positional and keyword column_order + column_order = kwargs.pop('column_order', None) + + # Check for conflict between args and column_order + if args and column_order is not None: + raise AnsibleFilterError("Cannot use both positional arguments and the 'column_order' keyword argument") + + # Use positional args if provided + if args: + column_order = list(args) + + # Validate column_order + if column_order is not None: + _validate_list_param(column_order, "column_order") + + # Validate column_order doesn't exceed the number of fields (skip if data is empty) + if data and len(column_order) > max_fields: + raise AnsibleFilterError( + f"'column_order' has more elements ({len(column_order)}) than available fields in data ({max_fields})") + + # === Process headers === + # Determine field names and ensure they are strings + if column_order: + field_names = column_order + else: + # Use field names from first dictionary, ensuring all are strings + field_names = [to_text(k) for k in sample_dict] + + # Process custom headers + header_names = kwargs.pop('header_names', None) + if header_names is not None: + _validate_list_param(header_names, "header_names") + + # Validate header_names doesn't exceed the number of fields (skip if data is empty) + if data and len(header_names) > max_fields: + raise AnsibleFilterError( + f"'header_names' has more elements ({len(header_names)}) than available fields in data ({max_fields})") + + # Validate that column_order and header_names have the same size if both provided + if column_order is not None and len(column_order) != len(header_names): + raise AnsibleFilterError( + f"'column_order' and 'header_names' must have the same number of elements. " + f"Got {len(column_order)} columns and {len(header_names)} headers.") + + # === Process alignments === + # Get column alignments and validate + column_alignments = kwargs.pop('column_alignments', {}) + valid_alignments = {"left", "center", "right", "l", "c", "r"} + + # Validate column_alignments is a dictionary + if not isinstance(column_alignments, dict): + raise TypeValidationError(column_alignments, "a dictionary for column_alignments") + + # Validate column_alignments keys and values + for key, value in column_alignments.items(): + # Check that keys are strings + if not isinstance(key, string_types): + raise TypeValidationError(key, "a string for column_alignments key") + + # Check that values are strings + if not isinstance(value, string_types): + raise TypeValidationError(value, "a string for column_alignments value") + + # Check that values are valid alignments + if value.lower() not in valid_alignments: + raise AnsibleFilterError( + f"Invalid alignment '{value}' in 'column_alignments'. " + f"Valid alignments are: {', '.join(sorted(valid_alignments))}") + + # Validate column_alignments doesn't have more keys than fields (skip if data is empty) + if data and len(column_alignments) > max_fields: + raise AnsibleFilterError( + f"'column_alignments' has more elements ({len(column_alignments)}) than available fields in data ({max_fields})") + + # Check for unknown parameters + if kwargs: + raise AnsibleFilterError(f"Unknown parameter(s) for to_prettytable filter: {', '.join(sorted(kwargs))}") + + # === Build the table === + table = prettytable.PrettyTable() + + # Set the field names for display + display_names = header_names if header_names is not None else field_names + table.field_names = [to_text(name) for name in display_names] + + # Configure alignments after setting field_names + _configure_alignments(table, display_names, column_alignments) + + # Build key maps only if not using explicit column_order and we have data + key_map = {} + reverse_key_map = {} + if not column_order and data: # Only needed when using original dictionary keys and we have data + key_map, reverse_key_map = _build_key_maps(data) + + # If we have an empty list with no custom parameters, return a simple empty table + if not data and not column_order and not header_names and not column_alignments: + return "++\n++" + + # Process each row if we have data + for item in data: + row = [] + for col in field_names: + # Try direct mapping first + if col in key_map: + row.append(item.get(key_map[col], "")) + else: + # Try to find a matching key in the item + matched_key = _match_key(item, col) + if matched_key is not None: + row.append(item.get(matched_key, "")) + else: + # Try case-insensitive lookup as last resort + lower_col = col.lower() if isinstance(col, string_types) else str(col).lower() + if lower_col in reverse_key_map: + row.append(item.get(reverse_key_map[lower_col], "")) + else: + # No match found + row.append("") + table.add_row(row) + + return to_text(table) + + +class FilterModule(object): + """Ansible core jinja2 filters.""" + + def filters(self): + return { + 'to_prettytable': to_prettytable + } diff --git a/plugins/filter/to_seconds.yml b/plugins/filter/to_seconds.yml index d858e062a3..49b69d6d69 100644 --- a/plugins/filter/to_seconds.yml +++ b/plugins/filter/to_seconds.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_seconds - short_description: Converte a duration string to seconds + short_description: Converts a duration string to seconds version_added: 0.2.0 description: - Parse a human readable time duration string and convert to seconds. diff --git a/plugins/filter/to_time_unit.yml b/plugins/filter/to_time_unit.yml index bda124865c..256ca573f4 100644 --- a/plugins/filter/to_time_unit.yml +++ b/plugins/filter/to_time_unit.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_time_unit - short_description: Converte a duration string to the given time unit + short_description: Converts a duration string to the given time unit version_added: 0.2.0 description: - Parse a human readable time duration string and convert to the given time unit. diff --git a/plugins/filter/to_weeks.yml b/plugins/filter/to_weeks.yml index 7bf31bb65c..750e77c378 100644 --- a/plugins/filter/to_weeks.yml +++ b/plugins/filter/to_weeks.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_weeks - short_description: Converte a duration string to weeks + short_description: Converts a duration string to weeks version_added: 0.2.0 description: - Parse a human readable time duration string and convert to weeks. diff --git a/plugins/filter/to_years.yml b/plugins/filter/to_years.yml index 33c85a3ece..62f282a8b6 100644 --- a/plugins/filter/to_years.yml +++ b/plugins/filter/to_years.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_years - short_description: Converte a duration string to years + short_description: Converts a duration string to years version_added: 0.2.0 description: - Parse a human readable time duration string and convert to years. diff --git a/plugins/filter/unicode_normalize.py b/plugins/filter/unicode_normalize.py index dfbf20c573..aed7979de8 100644 --- a/plugins/filter/unicode_normalize.py +++ b/plugins/filter/unicode_normalize.py @@ -4,54 +4,58 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: unicode_normalize - short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms - version_added: 3.7.0 - author: Andrew Pantuso (@Ajpantuso) - description: - - Normalizes unicode strings to facilitate comparison of characters with normalized forms. - positional: form - options: - _input: - description: A unicode string. - type: string - required: true - form: - description: - - The normal form to use. - - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details. - type: string - default: NFC - choices: - - NFC - - NFD - - NFKC - - NFKD -''' +DOCUMENTATION = r""" +name: unicode_normalize +short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms +version_added: 3.7.0 +author: Andrew Pantuso (@Ajpantuso) +description: + - Normalizes unicode strings to facilitate comparison of characters with normalized forms. +positional: form +options: + _input: + description: A unicode string. + type: string + required: true + form: + description: + - The normal form to use. + - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details. + type: string + default: NFC + choices: + - NFC + - NFD + - NFKC + - NFKD +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Normalize unicode string ansible.builtin.set_fact: dictionary: "{{ 'ä' | community.general.unicode_normalize('NFKD') }}" # The resulting string has length 2: one letter is 'a', the other # the diacritic combiner. -''' +""" -RETURN = ''' - _value: - description: The normalized unicode string of the specified normal form. - type: string -''' +RETURN = r""" +_value: + description: The normalized unicode string of the specified normal form. + type: string +""" from unicodedata import normalize -from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError +from ansible.errors import AnsibleFilterError from ansible.module_utils.six import text_type +try: + from ansible.errors import AnsibleTypeError +except ImportError: + from ansible.errors import AnsibleFilterTypeError as AnsibleTypeError + def unicode_normalize(data, form='NFC'): """Applies normalization to 'unicode' strings. @@ -66,7 +70,7 @@ def unicode_normalize(data, form='NFC'): """ if not isinstance(data, text_type): - raise AnsibleFilterTypeError("%s is not a valid input type" % type(data)) + raise AnsibleTypeError("%s is not a valid input type" % type(data)) if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'): raise AnsibleFilterError("%s is not a valid form" % form) diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py index 09eedbf563..f3fb30035a 100644 --- a/plugins/filter/version_sort.py +++ b/plugins/filter/version_sort.py @@ -3,37 +3,36 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: version_sort - short_description: Sort a list according to version order instead of pure alphabetical one - version_added: 2.2.0 - author: Eric L. (@ericzolf) - description: - - Sort a list according to version order instead of pure alphabetical one. - options: - _input: - description: A list of strings to sort. - type: list - elements: string - required: true -''' +DOCUMENTATION = r""" +name: version_sort +short_description: Sort a list according to version order instead of pure alphabetical one +version_added: 2.2.0 +author: Eric L. (@ericzolf) +description: + - Sort a list according to version order instead of pure alphabetical one. +options: + _input: + description: A list of strings to sort. + type: list + elements: string + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Convert list of tuples into dictionary ansible.builtin.set_fact: dictionary: "{{ ['2.1', '2.10', '2.9'] | community.general.version_sort }}" # Result is ['2.1', '2.9', '2.10'] -''' +""" -RETURN = ''' - _value: - description: The list of strings sorted by version. - type: list - elements: string -''' +RETURN = r""" +_value: + description: The list of strings sorted by version. + type: list + elements: string +""" from ansible_collections.community.general.plugins.module_utils.version import LooseVersion diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index 7d65f583d6..677e1a3ad5 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -3,117 +3,133 @@ # Copyright (c) 2020 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Orion Poplawski (@opoplawski) - name: cobbler - short_description: Cobbler inventory source - version_added: 1.0.0 +DOCUMENTATION = r""" +author: Orion Poplawski (@opoplawski) +name: cobbler +short_description: Cobbler inventory source +version_added: 1.0.0 +description: + - Get inventory hosts from the cobbler service. + - 'Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and have a C(plugin: + cobbler) entry.' + - Adds the primary IP addresses to C(cobbler_ipv4_address) and C(cobbler_ipv6_address) host variables if defined in Cobbler. + The primary IP address is defined as the management interface if defined, or the interface who's DNS name matches the + hostname of the system, or else the first interface found. +extends_documentation_fragment: + - inventory_cache +options: + plugin: + description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize + it as its own. + type: string + required: true + choices: ['cobbler', 'community.general.cobbler'] + url: + description: URL to cobbler. + type: string + default: 'http://cobbler/cobbler_api' + env: + - name: COBBLER_SERVER + user: + description: Cobbler authentication user. + type: string + required: false + env: + - name: COBBLER_USER + password: + description: Cobbler authentication password. + type: string + required: false + env: + - name: COBBLER_PASSWORD + cache_fallback: + description: Fallback to cached results if connection to cobbler fails. + type: boolean + default: false + connection_timeout: + description: Timeout to connect to cobbler server. + type: int + required: false + version_added: 10.7.0 + exclude_mgmt_classes: + description: Management classes to exclude from inventory. + type: list + default: [] + elements: str + version_added: 7.4.0 + exclude_profiles: description: - - Get inventory hosts from the cobbler service. - - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and have a C(plugin: cobbler) entry." - - Adds the primary IP addresses to C(cobbler_ipv4_address) and C(cobbler_ipv6_address) host variables if defined in Cobbler. The primary IP address is - defined as the management interface if defined, or the interface who's DNS name matches the hostname of the system, or else the first interface found. - extends_documentation_fragment: - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize it as its own. - type: string - required: true - choices: [ 'cobbler', 'community.general.cobbler' ] - url: - description: URL to cobbler. - type: string - default: 'http://cobbler/cobbler_api' - env: - - name: COBBLER_SERVER - user: - description: Cobbler authentication user. - type: string - required: false - env: - - name: COBBLER_USER - password: - description: Cobbler authentication password. - type: string - required: false - env: - - name: COBBLER_PASSWORD - cache_fallback: - description: Fallback to cached results if connection to cobbler fails. - type: boolean - default: false - exclude_mgmt_classes: - description: Management classes to exclude from inventory. - type: list - default: [] - elements: str - version_added: 7.4.0 - exclude_profiles: - description: - - Profiles to exclude from inventory. - - Ignored if O(include_profiles) is specified. - type: list - default: [] - elements: str - include_mgmt_classes: - description: Management classes to include from inventory. - type: list - default: [] - elements: str - version_added: 7.4.0 - include_profiles: - description: - - Profiles to include from inventory. - - If specified, all other profiles will be excluded. - - O(exclude_profiles) is ignored if O(include_profiles) is specified. - type: list - default: [] - elements: str - version_added: 4.4.0 - inventory_hostname: - description: - - What to use for the ansible inventory hostname. - - By default the networking hostname is used if defined, otherwise the DNS name of the management or first non-static interface. - - If set to V(system), the cobbler system name is used. - type: str - choices: [ 'hostname', 'system' ] - default: hostname - version_added: 7.1.0 - group_by: - description: Keys to group hosts by. - type: list - elements: string - default: [ 'mgmt_classes', 'owners', 'status' ] - group: - description: Group to place all hosts into. - default: cobbler - group_prefix: - description: Prefix to apply to cobbler groups. - default: cobbler_ - want_facts: - description: Toggle, if V(true) the plugin will retrieve host facts from the server. - type: boolean - default: true - want_ip_addresses: - description: - - Toggle, if V(true) the plugin will add a C(cobbler_ipv4_addresses) and C(cobbleer_ipv6_addresses) dictionary to the defined O(group) mapping - interface DNS names to IP addresses. - type: boolean - default: true - version_added: 7.1.0 -''' + - Profiles to exclude from inventory. + - Ignored if O(include_profiles) is specified. + type: list + default: [] + elements: str + include_mgmt_classes: + description: Management classes to include from inventory. + type: list + default: [] + elements: str + version_added: 7.4.0 + include_profiles: + description: + - Profiles to include from inventory. + - If specified, all other profiles are excluded. + - O(exclude_profiles) is ignored if O(include_profiles) is specified. + type: list + default: [] + elements: str + version_added: 4.4.0 + inventory_hostname: + description: + - What to use for the ansible inventory hostname. + - By default the networking hostname is used if defined, otherwise the DNS name of the management or first non-static + interface. + - If set to V(system), the cobbler system name is used. + type: str + choices: ['hostname', 'system'] + default: hostname + version_added: 7.1.0 + group_by: + description: Keys to group hosts by. + type: list + elements: string + default: ['mgmt_classes', 'owners', 'status'] + group: + description: Group to place all hosts into. + default: cobbler + group_prefix: + description: Prefix to apply to cobbler groups. + default: cobbler_ + want_facts: + description: Toggle, if V(true) the plugin retrieves all host facts from the server. + type: boolean + default: true + want_ip_addresses: + description: + - Toggle, if V(true) the plugin adds a C(cobbler_ipv4_addresses) and C(cobbler_ipv6_addresses) dictionary to the + defined O(group) mapping interface DNS names to IP addresses. + type: boolean + default: true + version_added: 7.1.0 + facts_level: + description: + - Set to V(normal) to gather only system-level variables. + - Set to V(as_rendered) to gather all variables as rolled up by Cobbler. + type: string + choices: ['normal', 'as_rendered'] + default: normal + version_added: 10.7.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" # my.cobbler.yml plugin: community.general.cobbler url: http://cobbler/cobbler_api user: ansible-tester password: secure -''' +""" import socket @@ -135,6 +151,18 @@ except ImportError: HAS_XMLRPC_CLIENT = False +class TimeoutTransport (xmlrpc_client.SafeTransport): + def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + super(TimeoutTransport, self).__init__() + self._timeout = timeout + self.context = None + + def make_connection(self, host): + conn = xmlrpc_client.SafeTransport.make_connection(self, host) + conn.timeout = self._timeout + return conn + + class InventoryModule(BaseInventoryPlugin, Cacheable): ''' Host inventory parser for ansible using cobbler as source. ''' @@ -143,7 +171,9 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def __init__(self): super(InventoryModule, self).__init__() self.cache_key = None - self.connection = None + + if not HAS_XMLRPC_CLIENT: + raise AnsibleError('Could not import xmlrpc client library') def verify_file(self, path): valid = False @@ -154,18 +184,6 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"') return valid - def _get_connection(self): - if not HAS_XMLRPC_CLIENT: - raise AnsibleError('Could not import xmlrpc client library') - - if self.connection is None: - self.display.vvvv(f'Connecting to {self.cobbler_url}\n') - self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True) - self.token = None - if self.get_option('user') is not None: - self.token = self.connection.login(text_type(self.get_option('user')), text_type(self.get_option('password'))) - return self.connection - def _init_cache(self): if self.cache_key not in self._cache: self._cache[self.cache_key] = {} @@ -179,12 +197,11 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def _get_profiles(self): if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}): - c = self._get_connection() try: if self.token is not None: - data = c.get_profiles(self.token) + data = self.cobbler.get_profiles(self.token) else: - data = c.get_profiles() + data = self.cobbler.get_profiles() except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError): self._reload_cache() else: @@ -195,12 +212,20 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def _get_systems(self): if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}): - c = self._get_connection() try: if self.token is not None: - data = c.get_systems(self.token) + data = self.cobbler.get_systems(self.token) else: - data = c.get_systems() + data = self.cobbler.get_systems() + + # If more facts are requested, gather them all from Cobbler + if self.facts_level == "as_rendered": + for i, host in enumerate(data): + self.display.vvvv(f"Gathering all facts for {host['name']}\n") + if self.token is not None: + data[i] = self.cobbler.get_system_as_rendered(host['name'], self.token) + else: + data[i] = self.cobbler.get_system_as_rendered(host['name']) except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError): self._reload_cache() else: @@ -230,6 +255,17 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): # get connection host self.cobbler_url = self.get_option('url') + self.display.vvvv(f'Connecting to {self.cobbler_url}\n') + + if 'connection_timeout' in self._options: + self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True, + transport=TimeoutTransport(timeout=self.get_option('connection_timeout'))) + else: + self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True) + self.token = None + if self.get_option('user') is not None: + self.token = self.cobbler.login(text_type(self.get_option('user')), text_type(self.get_option('password'))) + self.cache_key = self.get_cache_key(path) self.use_cache = cache and self.get_option('cache') @@ -239,6 +275,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): self.include_profiles = self.get_option('include_profiles') self.group_by = self.get_option('group_by') self.inventory_hostname = self.get_option('inventory_hostname') + self.facts_level = self.get_option('facts_level') for profile in self._get_profiles(): if profile['parent']: @@ -320,7 +357,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): # Add host to groups specified by group_by fields for group_by in self.group_by: - if host[group_by] == '<>': + if host[group_by] == '<>' or host[group_by] == '': groups = [] else: groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by] diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py index 9a467905dd..7a10b553a2 100644 --- a/plugins/inventory/gitlab_runners.py +++ b/plugins/inventory/gitlab_runners.py @@ -4,64 +4,65 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' - name: gitlab_runners - author: - - Stefan Heitmüller (@morph027) - short_description: Ansible dynamic inventory plugin for GitLab runners. - requirements: - - python-gitlab > 1.8.0 - extends_documentation_fragment: - - constructed - description: - - Reads inventories from the GitLab API. - - Uses a YAML configuration file gitlab_runners.[yml|yaml]. - options: - plugin: - description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as its own. - type: str - required: true - choices: - - gitlab_runners - - community.general.gitlab_runners - server_url: - description: The URL of the GitLab server, with protocol (i.e. http or https). - env: - - name: GITLAB_SERVER_URL - version_added: 1.0.0 - type: str - required: true - api_token: - description: GitLab token for logging in. - env: - - name: GITLAB_API_TOKEN - version_added: 1.0.0 - type: str - aliases: - - private_token - - access_token - filter: - description: filter runners from GitLab API - env: - - name: GITLAB_FILTER - version_added: 1.0.0 - type: str - choices: ['active', 'paused', 'online', 'specific', 'shared'] - verbose_output: - description: Toggle to (not) include all available nodes metadata - type: bool - default: true -''' +DOCUMENTATION = r""" +name: gitlab_runners +author: + - Stefan Heitmüller (@morph027) +short_description: Ansible dynamic inventory plugin for GitLab runners +requirements: + - python-gitlab > 1.8.0 +extends_documentation_fragment: + - constructed +description: + - Reads inventories from the GitLab API. + - Uses a YAML configuration file gitlab_runners.[yml|yaml]. +options: + plugin: + description: The name of this plugin, it should always be set to V(gitlab_runners) for this plugin to recognize it as its own. + type: str + required: true + choices: + - gitlab_runners + - community.general.gitlab_runners + server_url: + description: The URL of the GitLab server, with protocol (i.e. http or https). + env: + - name: GITLAB_SERVER_URL + version_added: 1.0.0 + type: str + required: true + api_token: + description: GitLab token for logging in. + env: + - name: GITLAB_API_TOKEN + version_added: 1.0.0 + type: str + aliases: + - private_token + - access_token + filter: + description: Filter runners from GitLab API. + env: + - name: GITLAB_FILTER + version_added: 1.0.0 + type: str + choices: ['active', 'paused', 'online', 'specific', 'shared'] + verbose_output: + description: Toggle to (not) include all available nodes metadata. + type: bool + default: true +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # gitlab_runners.yml plugin: community.general.gitlab_runners host: https://gitlab.com +--- # Example using constructed features to create groups and set ansible_host plugin: community.general.gitlab_runners host: https://gitlab.com @@ -78,7 +79,7 @@ keyed_groups: # hint: labels containing special characters will be converted to safe names - key: 'tag_list' prefix: tag -''' +""" from ansible.errors import AnsibleError, AnsibleParserError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py index 527a329173..64d77b437d 100644 --- a/plugins/inventory/icinga2.py +++ b/plugins/inventory/icinga2.py @@ -3,75 +3,73 @@ # Copyright (c) 2021 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' - name: icinga2 - short_description: Icinga2 inventory source - version_added: 3.7.0 - author: - - Cliff Hults (@BongoEADGC6) +DOCUMENTATION = r""" +name: icinga2 +short_description: Icinga2 inventory source +version_added: 3.7.0 +author: + - Cliff Hults (@BongoEADGC6) +description: + - Get inventory hosts from the Icinga2 API. + - Uses a configuration file as an inventory source, it must end in C(.icinga2.yml) or C(.icinga2.yaml). +extends_documentation_fragment: + - constructed +options: + strict: + version_added: 4.4.0 + compose: + version_added: 4.4.0 + groups: + version_added: 4.4.0 + keyed_groups: + version_added: 4.4.0 + plugin: + description: Name of the plugin. + required: true + type: string + choices: ['community.general.icinga2'] + url: + description: Root URL of Icinga2 API. + type: string + required: true + user: + description: Username to query the API. + type: string + required: true + password: + description: Password to query the API. + type: string + required: true + host_filter: description: - - Get inventory hosts from the Icinga2 API. - - "Uses a configuration file as an inventory source, it must end in - C(.icinga2.yml) or C(.icinga2.yaml)." - extends_documentation_fragment: - - constructed - options: - strict: - version_added: 4.4.0 - compose: - version_added: 4.4.0 - groups: - version_added: 4.4.0 - keyed_groups: - version_added: 4.4.0 - plugin: - description: Name of the plugin. - required: true - type: string - choices: ['community.general.icinga2'] - url: - description: Root URL of Icinga2 API. - type: string - required: true - user: - description: Username to query the API. - type: string - required: true - password: - description: Password to query the API. - type: string - required: true - host_filter: - description: - - An Icinga2 API valid host filter. Leave blank for no filtering - type: string - required: false - validate_certs: - description: Enables or disables SSL certificate verification. - type: boolean - default: true - inventory_attr: - description: - - Allows the override of the inventory name based on different attributes. - - This allows for changing the way limits are used. - - The current default, V(address), is sometimes not unique or present. We recommend to use V(name) instead. - type: string - default: address - choices: ['name', 'display_name', 'address'] - version_added: 4.2.0 - group_by_hostgroups: - description: - - Uses Icinga2 hostgroups as groups. - type: boolean - default: true - version_added: 8.4.0 -''' + - An Icinga2 API valid host filter. Leave blank for no filtering. + type: string + required: false + validate_certs: + description: Enables or disables SSL certificate verification. + type: boolean + default: true + inventory_attr: + description: + - Allows the override of the inventory name based on different attributes. + - This allows for changing the way limits are used. + - The current default, V(address), is sometimes not unique or present. We recommend to use V(name) instead. + type: string + default: address + choices: ['name', 'display_name', 'address'] + version_added: 4.2.0 + group_by_hostgroups: + description: + - Uses Icinga2 hostgroups as groups. + type: boolean + default: true + version_added: 8.4.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" # my.icinga2.yml plugin: community.general.icinga2 url: http://localhost:5665 @@ -94,7 +92,7 @@ compose: # set 'ansible_user' and 'ansible_port' from icinga2 host vars ansible_user: icinga2_attributes.vars.ansible_user ansible_port: icinga2_attributes.vars.ansible_port | default(22) -''' +""" import json @@ -292,11 +290,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable): self.group_by_hostgroups = self.get_option('group_by_hostgroups') if self.templar.is_template(self.icinga2_url): - self.icinga2_url = self.templar.template(variable=self.icinga2_url, disable_lookups=False) + self.icinga2_url = self.templar.template(variable=self.icinga2_url) if self.templar.is_template(self.icinga2_user): - self.icinga2_user = self.templar.template(variable=self.icinga2_user, disable_lookups=False) + self.icinga2_user = self.templar.template(variable=self.icinga2_user) if self.templar.is_template(self.icinga2_password): - self.icinga2_password = self.templar.template(variable=self.icinga2_password, disable_lookups=False) + self.icinga2_password = self.templar.template(variable=self.icinga2_password) self.icinga2_url = f"{self.icinga2_url.rstrip('/')}/v1" diff --git a/plugins/inventory/iocage.py b/plugins/inventory/iocage.py index 5dc18b4710..603003d617 100644 --- a/plugins/inventory/iocage.py +++ b/plugins/inventory/iocage.py @@ -4,80 +4,114 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: iocage - short_description: iocage inventory source - version_added: 10.2.0 - author: - - Vladimir Botka (@vbotka) - requirements: - - iocage >= 1.8 +DOCUMENTATION = r""" +name: iocage +short_description: C(iocage) inventory source +version_added: 10.2.0 +author: + - Vladimir Botka (@vbotka) +requirements: + - iocage >= 1.8 +description: + - Get inventory hosts from the C(iocage) jail manager running on O(host). + - By default, O(host) is V(localhost). If O(host) is not V(localhost) it is expected that the user running Ansible on the + controller can connect to the O(host) account O(user) with SSH non-interactively and execute the command C(iocage list). + - Uses a configuration file as an inventory source, it must end in C(.iocage.yml) or C(.iocage.yaml). +extends_documentation_fragment: + - ansible.builtin.constructed + - ansible.builtin.inventory_cache +options: + plugin: description: - - Get inventory hosts from the iocage jail manager running on O(host). - - By default, O(host) is V(localhost). If O(host) is not V(localhost) it - is expected that the user running Ansible on the controller can - connect to the O(host) account O(user) with SSH non-interactively and - execute the command C(iocage list). - - Uses a configuration file as an inventory source, it must end - in C(.iocage.yml) or C(.iocage.yaml). - extends_documentation_fragment: - - ansible.builtin.constructed - - ansible.builtin.inventory_cache - options: - plugin: - description: - - The name of this plugin, it should always be set to - V(community.general.iocage) for this plugin to recognize - it as its own. - required: true - choices: ['community.general.iocage'] - type: str - host: - description: The IP/hostname of the C(iocage) host. - type: str - default: localhost - user: - description: - - C(iocage) user. - It is expected that the O(user) is able to connect to the - O(host) with SSH and execute the command C(iocage list). - This option is not required if O(host) is V(localhost). - type: str - get_properties: - description: - - Get jails' properties. - Creates dictionary C(iocage_properties) for each added host. - type: boolean - default: false - env: - description: O(user)'s environment on O(host). - type: dict - default: {} - notes: - - You might want to test the command C(ssh user@host iocage list -l) on - the controller before using this inventory plugin with O(user) specified - and with O(host) other than V(localhost). - - If you run this inventory plugin on V(localhost) C(ssh) is not used. - In this case, test the command C(iocage list -l). - - This inventory plugin creates variables C(iocage_*) for each added host. - - The values of these variables are collected from the output of the - command C(iocage list -l). - - The names of these variables correspond to the output columns. - - The column C(NAME) is used to name the added host. -''' + - The name of this plugin, it should always be set to V(community.general.iocage) for this plugin to recognize it as + its own. + required: true + choices: ['community.general.iocage'] + type: str + host: + description: The IP/hostname of the C(iocage) host. + type: str + default: localhost + user: + description: + - C(iocage) user. It is expected that the O(user) is able to connect to the O(host) with SSH and execute the command + C(iocage list). This option is not required if O(host=localhost). + type: str + sudo: + description: + - Enable execution as root. + - This requires passwordless sudo of the command C(iocage list*). + type: bool + default: false + version_added: 10.3.0 + sudo_preserve_env: + description: + - Preserve environment if O(sudo) is enabled. + - This requires C(SETENV) sudoers tag. + type: bool + default: false + version_added: 10.3.0 + get_properties: + description: + - Get jails' properties. Creates dictionary C(iocage_properties) for each added host. + type: bool + default: false + env: + description: + - O(user)'s environment on O(host). + - Enable O(sudo_preserve_env) if O(sudo) is enabled. + type: dict + default: {} + hooks_results: + description: + - List of paths to the files in a jail. + - Content of the files is stored in the items of the list C(iocage_hooks). + - If a file is not available the item keeps the dash character C(-). + - The variable C(iocage_hooks) is not created if O(hooks_results) is empty. + type: list + elements: path + version_added: 10.4.0 + inventory_hostname_tag: + description: + - The name of the tag in the C(iocage properties notes) that contains the jails alias. + - By default, the C(iocage list -l) column C(NAME) is used to name the jail. + - This option requires the notes format C("t1=v1 t2=v2 ..."). + - The option O(get_properties) must be enabled. + type: str + version_added: 11.0.0 + inventory_hostname_required: + description: + - If enabled, the tag declared in O(inventory_hostname_tag) is required. + type: bool + default: false + version_added: 11.0.0 +notes: + - You might want to test the command C(ssh user@host iocage list -l) on the controller before using this inventory plugin + with O(user) specified and with O(host) other than V(localhost). + - If you run this inventory plugin on V(localhost) C(ssh) is not used. In this case, test the command C(iocage list -l). + - This inventory plugin creates variables C(iocage_*) for each added host. + - The values of these variables are collected from the output of the command C(iocage list -l). + - The names of these variables correspond to the output columns. + - The column C(NAME) is used to name the added host. + - The option O(hooks_results) expects the C(poolname) of a jail is mounted to C(/poolname). For example, if you activate + the pool C(iocage) this plugin expects to find the O(hooks_results) items in the path C(/iocage/iocage/jails//root). + If you mount the C(poolname) to a different path the easiest remedy is to create a symlink. +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # file name must end with iocage.yaml or iocage.yml plugin: community.general.iocage host: 10.1.0.73 user: admin +--- # user is not required if iocage is running on localhost (default) plugin: community.general.iocage +--- # run cryptography without legacy algorithms plugin: community.general.iocage host: 10.1.0.73 @@ -85,6 +119,18 @@ user: admin env: CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 +--- +# execute as root +# sudoers example 'admin ALL=(ALL) NOPASSWD:SETENV: /usr/local/bin/iocage list*' +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +sudo: true +sudo_preserve_env: true +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + +--- # enable cache plugin: community.general.iocage host: 10.1.0.73 @@ -93,6 +139,7 @@ env: CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 cache: true +--- # see inventory plugin ansible.builtin.constructed plugin: community.general.iocage host: 10.1.0.73 @@ -111,7 +158,19 @@ keyed_groups: key: iocage_release - prefix: state key: iocage_state -''' + +--- +# Read the file /var/db/dhclient-hook.address.epair0b in the jails and use it as ansible_host +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +hooks_results: + - /var/db/dhclient-hook.address.epair0b +compose: + ansible_host: iocage_hooks.0 +groups: + test: inventory_hostname.startswith('test') +""" import re import os @@ -126,9 +185,27 @@ display = Display() def _parse_ip4(ip4): - if ip4 == '-': - return ip4 - return re.split('\\||/', ip4)[1] + ''' Return dictionary iocage_ip4_dict. default = {ip4: [], msg: ''}. + If item matches ifc|IP or ifc|CIDR parse ifc, ip, and mask. + Otherwise, append item to msg. + ''' + + iocage_ip4_dict = {} + iocage_ip4_dict['ip4'] = [] + iocage_ip4_dict['msg'] = '' + + items = ip4.split(',') + for item in items: + if re.match('^\\w+\\|(?:\\d{1,3}\\.){3}\\d{1,3}.*$', item): + i = re.split('\\||/', item) + if len(i) == 3: + iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': i[2]}) + else: + iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': '-'}) + else: + iocage_ip4_dict['msg'] += item + + return iocage_ip4_dict class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): @@ -173,8 +250,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def get_inventory(self, path): host = self.get_option('host') + sudo = self.get_option('sudo') + sudo_preserve_env = self.get_option('sudo_preserve_env') env = self.get_option('env') get_properties = self.get_option('get_properties') + hooks_results = self.get_option('hooks_results') + inventory_hostname_tag = self.get_option('inventory_hostname_tag') + inventory_hostname_required = self.get_option('inventory_hostname_required') cmd = [] my_env = os.environ.copy() @@ -185,11 +267,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): cmd.append("ssh") cmd.append(f"{user}@{host}") cmd.extend([f"{k}={v}" for k, v in env.items()]) - cmd.append(self.IOCAGE) cmd_list = cmd.copy() + if sudo: + cmd_list.append('sudo') + if sudo_preserve_env: + cmd_list.append('--preserve-env') + cmd_list.append(self.IOCAGE) cmd_list.append('list') - cmd_list.append('--header') cmd_list.append('--long') try: p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, env=my_env) @@ -211,6 +296,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if get_properties: for hostname, host_vars in results['_meta']['hostvars'].items(): cmd_get_properties = cmd.copy() + cmd_get_properties.append(self.IOCAGE) cmd_get_properties.append("get") cmd_get_properties.append("--all") cmd_get_properties.append(f"{hostname}") @@ -231,19 +317,88 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self.get_properties(t_stdout, results, hostname) + if hooks_results: + cmd_get_pool = cmd.copy() + cmd_get_pool.append(self.IOCAGE) + cmd_get_pool.append('get') + cmd_get_pool.append('--pool') + try: + p = Popen(cmd_get_pool, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError( + f'Failed to run cmd={cmd_get_pool}, rc={p.returncode}, stderr={to_native(stderr)}') + try: + iocage_pool = to_text(stdout, errors='surrogate_or_strict').strip() + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + except Exception as e: + raise AnsibleError(f'Failed to get pool: {e}') from e + + for hostname, host_vars in results['_meta']['hostvars'].items(): + iocage_hooks = [] + for hook in hooks_results: + path = f"/{iocage_pool}/iocage/jails/{hostname}/root{hook}" + cmd_cat_hook = cmd.copy() + cmd_cat_hook.append('cat') + cmd_cat_hook.append(path) + try: + p = Popen(cmd_cat_hook, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + iocage_hooks.append('-') + continue + + try: + iocage_hook = to_text(stdout, errors='surrogate_or_strict').strip() + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + + except Exception: + iocage_hooks.append('-') + else: + iocage_hooks.append(iocage_hook) + + results['_meta']['hostvars'][hostname]['iocage_hooks'] = iocage_hooks + + # Optionally, get the jails names from the properties notes. + # Requires the notes format "t1=v1 t2=v2 ..." + if inventory_hostname_tag: + if not get_properties: + raise AnsibleError('Jail properties are needed to use inventory_hostname_tag. Enable get_properties') + update = {} + for hostname, host_vars in results['_meta']['hostvars'].items(): + tags = dict(tag.split('=', 1) for tag in host_vars['iocage_properties']['notes'].split() if '=' in tag) + if inventory_hostname_tag in tags: + update[hostname] = tags[inventory_hostname_tag] + elif inventory_hostname_required: + raise AnsibleError(f'Mandatory tag {inventory_hostname_tag!r} is missing in the properties notes.') + for hostname, alias in update.items(): + results['_meta']['hostvars'][alias] = results['_meta']['hostvars'].pop(hostname) + return results def get_jails(self, t_stdout, results): - jails = [x.split() for x in t_stdout.splitlines()] - for jail in jails: + lines = t_stdout.splitlines() + if len(lines) < 5: + return + indices = [i for i, val in enumerate(lines[1]) if val == '|'] + for line in lines[3::2]: + jail = [line[i + 1:j].strip() for i, j in zip(indices[:-1], indices[1:])] iocage_name = jail[1] + iocage_ip4_dict = _parse_ip4(jail[6]) + if iocage_ip4_dict['ip4']: + iocage_ip4 = ','.join([d['ip'] for d in iocage_ip4_dict['ip4']]) + else: + iocage_ip4 = '-' results['_meta']['hostvars'][iocage_name] = {} results['_meta']['hostvars'][iocage_name]['iocage_jid'] = jail[0] results['_meta']['hostvars'][iocage_name]['iocage_boot'] = jail[2] results['_meta']['hostvars'][iocage_name]['iocage_state'] = jail[3] results['_meta']['hostvars'][iocage_name]['iocage_type'] = jail[4] results['_meta']['hostvars'][iocage_name]['iocage_release'] = jail[5] - results['_meta']['hostvars'][iocage_name]['iocage_ip4'] = _parse_ip4(jail[6]) + results['_meta']['hostvars'][iocage_name]['iocage_ip4_dict'] = iocage_ip4_dict + results['_meta']['hostvars'][iocage_name]['iocage_ip4'] = iocage_ip4 results['_meta']['hostvars'][iocage_name]['iocage_ip6'] = jail[7] results['_meta']['hostvars'][iocage_name]['iocage_template'] = jail[8] results['_meta']['hostvars'][iocage_name]['iocage_basejail'] = jail[9] diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 46f2faeace..bf6faba07a 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -3,91 +3,92 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: linode - author: - - Luke Murphy (@decentral1se) - short_description: Ansible dynamic inventory plugin for Linode. - requirements: - - linode_api4 >= 2.0.0 - description: - - Reads inventories from the Linode API v4. - - Uses a YAML configuration file that ends with linode.(yml|yaml). - - Linode labels are used by default as the hostnames. - - The default inventory groups are built from groups (deprecated by - Linode) and not tags. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - cache: - version_added: 4.5.0 - cache_plugin: - version_added: 4.5.0 - cache_timeout: - version_added: 4.5.0 - cache_connection: - version_added: 4.5.0 - cache_prefix: - version_added: 4.5.0 - plugin: - description: Marks this as an instance of the 'linode' plugin. - type: string - required: true - choices: ['linode', 'community.general.linode'] - ip_style: - description: Populate hostvars with all information available from the Linode APIv4. - type: string - default: plain - choices: - - plain - - api - version_added: 3.6.0 - access_token: - description: The Linode account personal access token. - type: string - required: true - env: - - name: LINODE_ACCESS_TOKEN - regions: - description: Populate inventory with instances in this region. - default: [] - type: list - elements: string - tags: - description: Populate inventory only with instances which have at least one of the tags listed here. - default: [] - type: list - elements: string - version_added: 2.0.0 - types: - description: Populate inventory with instances with this type. - default: [] - type: list - elements: string - strict: - version_added: 2.0.0 - compose: - version_added: 2.0.0 - groups: - version_added: 2.0.0 - keyed_groups: - version_added: 2.0.0 -''' +DOCUMENTATION = r""" +name: linode +author: + - Luke Murphy (@decentral1se) +short_description: Ansible dynamic inventory plugin for Linode +requirements: + - linode_api4 >= 2.0.0 +description: + - Reads inventories from the Linode API v4. + - Uses a YAML configuration file that ends with linode.(yml|yaml). + - Linode labels are used by default as the hostnames. + - The default inventory groups are built from groups (deprecated by Linode) and not tags. +extends_documentation_fragment: + - constructed + - inventory_cache +options: + cache: + version_added: 4.5.0 + cache_plugin: + version_added: 4.5.0 + cache_timeout: + version_added: 4.5.0 + cache_connection: + version_added: 4.5.0 + cache_prefix: + version_added: 4.5.0 + plugin: + description: Marks this as an instance of the 'linode' plugin. + type: string + required: true + choices: ['linode', 'community.general.linode'] + ip_style: + description: Populate hostvars with all information available from the Linode APIv4. + type: string + default: plain + choices: + - plain + - api + version_added: 3.6.0 + access_token: + description: The Linode account personal access token. + type: string + required: true + env: + - name: LINODE_ACCESS_TOKEN + regions: + description: Populate inventory with instances in this region. + default: [] + type: list + elements: string + tags: + description: Populate inventory only with instances which have at least one of the tags listed here. + default: [] + type: list + elements: string + version_added: 2.0.0 + types: + description: Populate inventory with instances with this type. + default: [] + type: list + elements: string + strict: + version_added: 2.0.0 + compose: + version_added: 2.0.0 + groups: + version_added: 2.0.0 + keyed_groups: + version_added: 2.0.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" +--- # Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment. plugin: community.general.linode +--- # You can use Jinja to template the access token. plugin: community.general.linode access_token: "{{ lookup('ini', 'token', section='your_username', file='~/.config/linode-cli') }}" # For older Ansible versions, you need to write this as: # access_token: "{{ lookup('ini', 'token section=your_username file=~/.config/linode-cli') }}" +--- # Example with regions, types, groups and access token plugin: community.general.linode access_token: foobar @@ -96,6 +97,7 @@ regions: types: - g5-standard-2 +--- # Example with keyed_groups, groups, and compose plugin: community.general.linode access_token: foobar @@ -114,13 +116,14 @@ compose: ansible_ssh_host: ipv4[0] ansible_port: 2222 +--- # Example where control traffic limited to internal network plugin: community.general.linode access_token: foobar ip_style: api compose: ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first" -''' +""" from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable @@ -146,7 +149,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): access_token = self.get_option('access_token') if self.templar.is_template(access_token): - access_token = self.templar.template(variable=access_token, disable_lookups=False) + access_token = self.templar.template(variable=access_token) if access_token is None: raise AnsibleError(( diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index d7b942c1f7..efdca6563e 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -3,120 +3,122 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: lxd - short_description: Returns Ansible inventory from lxd host +DOCUMENTATION = r""" +name: lxd +short_description: Returns Ansible inventory from lxd host +description: + - Get inventory from the lxd. + - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. +version_added: "3.0.0" +author: "Frank Dornheim (@conloos)" +requirements: + - ipaddress + - lxd >= 4.0 +options: + plugin: + description: Token that ensures this is a source file for the 'lxd' plugin. + type: string + required: true + choices: ['community.general.lxd'] + url: description: - - Get inventory from the lxd. - - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. - version_added: "3.0.0" - author: "Frank Dornheim (@conloos)" - requirements: - - ipaddress - - lxd >= 4.0 - options: - plugin: - description: Token that ensures this is a source file for the 'lxd' plugin. - type: string - required: true - choices: [ 'community.general.lxd' ] - url: - description: - - The unix domain socket path or the https URL for the lxd server. - - Sockets in filesystem have to start with C(unix:). - - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). - type: string - default: unix:/var/snap/lxd/common/lxd/unix.socket - client_key: - description: - - The client certificate key file path. - aliases: [ key_file ] - default: $HOME/.config/lxc/client.key - type: path - client_cert: - description: - - The client certificate file path. - aliases: [ cert_file ] - default: $HOME/.config/lxc/client.crt - type: path - server_cert: - description: - - The server certificate file path. - type: path - version_added: 8.0.0 - server_check_hostname: - description: - - This option controls if the server's hostname is checked as part of the HTTPS connection verification. - This can be useful to disable, if for example, the server certificate provided (see O(server_cert) option) - does not cover a name matching the one used to communicate with the server. Such mismatch is common as LXD - generates self-signed server certificates by default. - type: bool - default: true - version_added: 8.0.0 - trust_password: - description: - - The client trusted password. - - You need to set this password on the lxd server before - running this module using the following command - C(lxc config set core.trust_password ) - See U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password). - - If O(trust_password) is set, this module send a request for authentication before sending any requests. - type: str - state: - description: Filter the instance according to the current status. - type: str - default: none - choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ] - project: - description: Filter the instance according to the given project. - type: str - default: default - version_added: 6.2.0 - type_filter: - description: - - Filter the instances by type V(virtual-machine), V(container) or V(both). - - The first version of the inventory only supported containers. - type: str - default: container - choices: [ 'virtual-machine', 'container', 'both' ] - version_added: 4.2.0 - prefered_instance_network_interface: - description: - - If an instance has multiple network interfaces, select which one is the preferred as pattern. - - Combined with the first number that can be found e.g. 'eth' + 0. - - The option has been renamed from O(prefered_container_network_interface) to O(prefered_instance_network_interface) - in community.general 3.8.0. The old name still works as an alias. - type: str - default: eth - aliases: - - prefered_container_network_interface - prefered_instance_network_family: - description: - - If an instance has multiple network interfaces, which one is the preferred by family. - - Specify V(inet) for IPv4 and V(inet6) for IPv6. - type: str - default: inet - choices: [ 'inet', 'inet6' ] - groupby: - description: - - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid). - - See example for syntax. - type: dict -''' + - The unix domain socket path or the https URL for the lxd server. + - Sockets in filesystem have to start with C(unix:). + - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). + type: string + default: unix:/var/snap/lxd/common/lxd/unix.socket + client_key: + description: + - The client certificate key file path. + aliases: [key_file] + default: $HOME/.config/lxc/client.key + type: path + client_cert: + description: + - The client certificate file path. + aliases: [cert_file] + default: $HOME/.config/lxc/client.crt + type: path + server_cert: + description: + - The server certificate file path. + type: path + version_added: 8.0.0 + server_check_hostname: + description: + - This option controls if the server's hostname is checked as part of the HTTPS connection verification. This can be + useful to disable, if for example, the server certificate provided (see O(server_cert) option) does not cover a name + matching the one used to communicate with the server. Such mismatch is common as LXD generates self-signed server + certificates by default. + type: bool + default: true + version_added: 8.0.0 + trust_password: + description: + - The client trusted password. + - You need to set this password on the lxd server before running this module using the following command C(lxc config + set core.trust_password ) See + U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password). + - If O(trust_password) is set, this module send a request for authentication before sending any requests. + type: str + state: + description: Filter the instance according to the current status. + type: str + default: none + choices: ['STOPPED', 'STARTING', 'RUNNING', 'none'] + project: + description: Filter the instance according to the given project. + type: str + default: default + version_added: 6.2.0 + type_filter: + description: + - Filter the instances by type V(virtual-machine), V(container) or V(both). + - The first version of the inventory only supported containers. + type: str + default: container + choices: ['virtual-machine', 'container', 'both'] + version_added: 4.2.0 + prefered_instance_network_interface: + description: + - If an instance has multiple network interfaces, select which one is the preferred as pattern. + - Combined with the first number that can be found, for example C(eth) + C(0). + - The option has been renamed from O(prefered_container_network_interface) to O(prefered_instance_network_interface) + in community.general 3.8.0. The old name still works as an alias. + type: str + default: eth + aliases: + - prefered_container_network_interface + prefered_instance_network_family: + description: + - If an instance has multiple network interfaces, which one is the preferred by family. + - Specify V(inet) for IPv4 and V(inet6) for IPv6. + type: str + default: inet + choices: ['inet', 'inet6'] + groupby: + description: + - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), + C(type), C(vlanid). + - See example for syntax. + type: dict +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # simple lxd.yml plugin: community.general.lxd url: unix:/var/snap/lxd/common/lxd/unix.socket +--- # simple lxd.yml including filter plugin: community.general.lxd url: unix:/var/snap/lxd/common/lxd/unix.socket state: RUNNING +--- # simple lxd.yml including virtual machines and containers plugin: community.general.lxd url: unix:/var/snap/lxd/common/lxd/unix.socket @@ -163,7 +165,7 @@ groupby: projectInternals: type: project attribute: internals -''' +""" import json import re diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index f40f33b972..3339d66b46 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -3,112 +3,118 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: nmap - short_description: Uses nmap to find hosts to target +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: nmap +short_description: Uses nmap to find hosts to target +description: + - Uses a YAML configuration file with a valid YAML extension. +extends_documentation_fragment: + - constructed + - inventory_cache +requirements: + - nmap CLI installed +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.nmap#inventory) plugin. + type: string + required: true + choices: ['nmap', 'community.general.nmap'] + sudo: + description: Set to V(true) to execute a C(sudo nmap) plugin scan. + version_added: 4.8.0 + default: false + type: boolean + address: + description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. + type: string + required: true + env: + - name: ANSIBLE_NMAP_ADDRESS + version_added: 6.6.0 + exclude: description: - - Uses a YAML configuration file with a valid YAML extension. - extends_documentation_fragment: - - constructed - - inventory_cache - requirements: - - nmap CLI installed - options: - plugin: - description: token that ensures this is a source file for the 'nmap' plugin. - type: string - required: true - choices: ['nmap', 'community.general.nmap'] - sudo: - description: Set to V(true) to execute a C(sudo nmap) plugin scan. - version_added: 4.8.0 - default: false - type: boolean - address: - description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. - type: string - required: true - env: - - name: ANSIBLE_NMAP_ADDRESS - version_added: 6.6.0 - exclude: - description: - - List of addresses to exclude. - - For example V(10.2.2.15-25) or V(10.2.2.15,10.2.2.16). - type: list - elements: string - env: - - name: ANSIBLE_NMAP_EXCLUDE - version_added: 6.6.0 - port: - description: - - Only scan specific port or port range (C(-p)). - - For example, you could pass V(22) for a single port, V(1-65535) for a range of ports, - or V(U:53,137,T:21-25,139,8080,S:9) to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all. - type: string - version_added: 6.5.0 - ports: - description: Enable/disable scanning ports. - type: boolean - default: true - ipv4: - description: use IPv4 type addresses - type: boolean - default: true - ipv6: - description: use IPv6 type addresses - type: boolean - default: true - udp_scan: - description: - - Scan via UDP. - - Depending on your system you might need O(sudo=true) for this to work. - type: boolean - default: false - version_added: 6.1.0 - icmp_timestamp: - description: - - Scan via ICMP Timestamp (C(-PP)). - - Depending on your system you might need O(sudo=true) for this to work. - type: boolean - default: false - version_added: 6.1.0 - open: - description: Only scan for open (or possibly open) ports. - type: boolean - default: false - version_added: 6.5.0 - dns_resolve: - description: Whether to always (V(true)) or never (V(false)) do DNS resolution. - type: boolean - default: false - version_added: 6.1.0 - use_arp_ping: - description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method. - type: boolean - default: true - version_added: 7.4.0 - notes: - - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false). - - 'TODO: add OS fingerprinting' -''' -EXAMPLES = ''' + - List of addresses to exclude. + - For example V(10.2.2.15-25) or V(10.2.2.15,10.2.2.16). + type: list + elements: string + env: + - name: ANSIBLE_NMAP_EXCLUDE + version_added: 6.6.0 + port: + description: + - Only scan specific port or port range (C(-p)). + - For example, you could pass V(22) for a single port, V(1-65535) for a range of ports, or V(U:53,137,T:21-25,139,8080,S:9) + to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all. + type: string + version_added: 6.5.0 + ports: + description: Enable/disable scanning ports. + type: boolean + default: true + ipv4: + description: Use IPv4 type addresses. + type: boolean + default: true + ipv6: + description: Use IPv6 type addresses. + type: boolean + default: true + udp_scan: + description: + - Scan using UDP. + - Depending on your system you might need O(sudo=true) for this to work. + type: boolean + default: false + version_added: 6.1.0 + icmp_timestamp: + description: + - Scan using ICMP Timestamp (C(-PP)). + - Depending on your system you might need O(sudo=true) for this to work. + type: boolean + default: false + version_added: 6.1.0 + open: + description: Only scan for open (or possibly open) ports. + type: boolean + default: false + version_added: 6.5.0 + dns_resolve: + description: Whether to always (V(true)) or never (V(false)) do DNS resolution. + type: boolean + default: false + version_added: 6.1.0 + dns_servers: + description: Specify which DNS servers to use for name resolution. + type: list + elements: string + version_added: 10.5.0 + use_arp_ping: + description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method. + type: boolean + default: true + version_added: 7.4.0 +notes: + - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false). + - 'TODO: add OS fingerprinting.' +""" +EXAMPLES = r""" +--- # inventory.config file in YAML format plugin: community.general.nmap strict: false address: 192.168.0.0/24 - +--- # a sudo nmap scan to fully use nmap scan power. plugin: community.general.nmap sudo: true strict: false address: 192.168.0.0/24 +--- # an nmap scan specifying ports and classifying results to an inventory group plugin: community.general.nmap address: 192.168.0.0/24 @@ -116,7 +122,7 @@ exclude: 192.168.0.1, web.example.com port: 22, 443 groups: web_servers: "ports | selectattr('port', 'equalto', '443')" -''' +""" import os import re @@ -230,6 +236,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if self.get_option('dns_resolve'): cmd.append('-n') + if self.get_option('dns_servers'): + cmd.append('--dns-servers') + cmd.append(','.join(self.get_option('dns_servers'))) + if self.get_option('udp_scan'): cmd.append('-sU') diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index 9475049c08..8b4821a009 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -3,52 +3,51 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: online - author: - - Remy Leone (@remyleone) - short_description: Scaleway (previously Online SAS or Online.net) inventory source - description: - - Get inventory hosts from Scaleway (previously Online SAS or Online.net). - options: - plugin: - description: token that ensures this is a source file for the 'online' plugin. - type: string - required: true - choices: ['online', 'community.general.online'] - oauth_token: - required: true - description: Online OAuth token. - type: string - env: - # in order of precedence - - name: ONLINE_TOKEN - - name: ONLINE_API_KEY - - name: ONLINE_OAUTH_TOKEN - hostnames: - description: List of preference about what to use as an hostname. - type: list - elements: string - default: - - public_ipv4 - choices: - - public_ipv4 - - private_ipv4 - - hostname - groups: - description: List of groups. - type: list - elements: string - choices: - - location - - offer - - rpn -''' +DOCUMENTATION = r""" +name: online +author: + - Remy Leone (@remyleone) +short_description: Scaleway (previously Online SAS or Online.net) inventory source +description: + - Get inventory hosts from Scaleway (previously Online SAS or Online.net). +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.online#inventory) plugin. + type: string + required: true + choices: ['online', 'community.general.online'] + oauth_token: + required: true + description: Online OAuth token. + type: string + env: + # in order of precedence + - name: ONLINE_TOKEN + - name: ONLINE_API_KEY + - name: ONLINE_OAUTH_TOKEN + hostnames: + description: List of preference about what to use as an hostname. + type: list + elements: string + default: + - public_ipv4 + choices: + - public_ipv4 + - private_ipv4 + - hostname + groups: + description: List of groups. + type: list + elements: string + choices: + - location + - offer + - rpn +""" -EXAMPLES = r''' +EXAMPLES = r""" # online_inventory.yml file in YAML format # Example command line: ansible-inventory --list -i online_inventory.yml @@ -59,7 +58,7 @@ groups: - location - offer - rpn -''' +""" import json from sys import version as python_version diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py index 7fc320f326..8ced301dd1 100644 --- a/plugins/inventory/opennebula.py +++ b/plugins/inventory/opennebula.py @@ -3,81 +3,78 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' - name: opennebula - author: - - Kristian Feldsam (@feldsam) - short_description: OpenNebula inventory source - version_added: "3.8.0" - extends_documentation_fragment: - - constructed +DOCUMENTATION = r""" +name: opennebula +author: + - Kristian Feldsam (@feldsam) +short_description: OpenNebula inventory source +version_added: "3.8.0" +extends_documentation_fragment: + - constructed +description: + - Get inventory hosts from OpenNebula cloud. + - Uses an YAML configuration file ending with either C(opennebula.yml) or C(opennebula.yaml) to set parameter values. + - Uses O(api_authfile), C(~/.one/one_auth), or E(ONE_AUTH) pointing to a OpenNebula credentials file. +options: + plugin: + description: Token that ensures this is a source file for the 'opennebula' plugin. + type: string + required: true + choices: [community.general.opennebula] + api_url: description: - - Get inventory hosts from OpenNebula cloud. - - Uses an YAML configuration file ending with either C(opennebula.yml) or C(opennebula.yaml) - to set parameter values. - - Uses O(api_authfile), C(~/.one/one_auth), or E(ONE_AUTH) pointing to a OpenNebula credentials file. - options: - plugin: - description: Token that ensures this is a source file for the 'opennebula' plugin. - type: string - required: true - choices: [ community.general.opennebula ] - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - transferred over the network unencrypted. - - If not set then the value of the E(ONE_URL) environment variable is used. - env: - - name: ONE_URL - required: true - type: string - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - then the value of the E(ONE_USERNAME) environment variable is used. - env: - - name: ONE_USERNAME - type: string - api_password: - description: - - Password or a token of the user to login into OpenNebula RPC server. - - If not set, the value of the E(ONE_PASSWORD) environment variable is used. - env: - - name: ONE_PASSWORD - required: false - type: string - api_authfile: - description: - - If both O(api_username) or O(api_password) are not set, then it will try - authenticate with ONE auth file. Default path is C(~/.one/one_auth). - - Set environment variable E(ONE_AUTH) to override this path. - env: - - name: ONE_AUTH - required: false - type: string - hostname: - description: Field to match the hostname. Note V(v4_first_ip) corresponds to the first IPv4 found on VM. - type: string - default: v4_first_ip - choices: - - v4_first_ip - - v6_first_ip - - name - filter_by_label: - description: Only return servers filtered by this label. - type: string - group_by_labels: - description: Create host groups by vm labels - type: bool - default: true -''' + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. + - If not set then the value of the E(ONE_URL) environment variable is used. + env: + - name: ONE_URL + required: true + type: string + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment + variable is used. + env: + - name: ONE_USERNAME + type: string + api_password: + description: + - Password or a token of the user to login into OpenNebula RPC server. + - If not set, the value of the E(ONE_PASSWORD) environment variable is used. + env: + - name: ONE_PASSWORD + required: false + type: string + api_authfile: + description: + - If both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth file. Default + path is C(~/.one/one_auth). + - Set environment variable E(ONE_AUTH) to override this path. + env: + - name: ONE_AUTH + required: false + type: string + hostname: + description: Field to match the hostname. Note V(v4_first_ip) corresponds to the first IPv4 found on VM. + type: string + default: v4_first_ip + choices: + - v4_first_ip + - v6_first_ip + - name + filter_by_label: + description: Only return servers filtered by this label. + type: string + group_by_labels: + description: Create host groups by VM labels. + type: bool + default: true +""" -EXAMPLES = r''' +EXAMPLES = r""" # inventory_opennebula.yml file in YAML format # Example command line: ansible-inventory --list -i inventory_opennebula.yml @@ -85,7 +82,7 @@ EXAMPLES = r''' plugin: community.general.opennebula api_url: https://opennebula:2633/RPC2 filter_by_label: Cache -''' +""" try: import pyone diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py deleted file mode 100644 index cf25efc58c..0000000000 --- a/plugins/inventory/proxmox.py +++ /dev/null @@ -1,685 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2016 Guido Günther , Daniel Lobato Garcia -# Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' - name: proxmox - short_description: Proxmox inventory source - version_added: "1.2.0" - author: - - Jeffrey van Pelt (@Thulium-Drake) - requirements: - - requests >= 1.1 - description: - - Get inventory hosts from a Proxmox PVE cluster. - - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)" - - Will retrieve the first network interface with an IP for Proxmox nodes. - - Can retrieve LXC/QEMU configuration as facts. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to V(community.general.proxmox) for this plugin to recognize it as its own. - required: true - choices: ['community.general.proxmox'] - type: str - url: - description: - - URL to Proxmox cluster. - - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_URL) will be used instead. - - Since community.general 4.7.0 you can also use templating to specify the value of the O(url). - default: 'http://localhost:8006' - type: str - env: - - name: PROXMOX_URL - version_added: 2.0.0 - user: - description: - - Proxmox authentication user. - - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_USER) will be used instead. - - Since community.general 4.7.0 you can also use templating to specify the value of the O(user). - required: true - type: str - env: - - name: PROXMOX_USER - version_added: 2.0.0 - password: - description: - - Proxmox authentication password. - - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_PASSWORD) will be used instead. - - Since community.general 4.7.0 you can also use templating to specify the value of the O(password). - - If you do not specify a password, you must set O(token_id) and O(token_secret) instead. - type: str - env: - - name: PROXMOX_PASSWORD - version_added: 2.0.0 - token_id: - description: - - Proxmox authentication token ID. - - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_TOKEN_ID) will be used instead. - - To use token authentication, you must also specify O(token_secret). If you do not specify O(token_id) and O(token_secret), - you must set a password instead. - - Make sure to grant explicit pve permissions to the token or disable 'privilege separation' to use the users' privileges instead. - version_added: 4.8.0 - type: str - env: - - name: PROXMOX_TOKEN_ID - token_secret: - description: - - Proxmox authentication token secret. - - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_TOKEN_SECRET) will be used instead. - - To use token authentication, you must also specify O(token_id). If you do not specify O(token_id) and O(token_secret), - you must set a password instead. - version_added: 4.8.0 - type: str - env: - - name: PROXMOX_TOKEN_SECRET - validate_certs: - description: Verify SSL certificate if using HTTPS. - type: boolean - default: true - group_prefix: - description: Prefix to apply to Proxmox groups. - default: proxmox_ - type: str - facts_prefix: - description: Prefix to apply to LXC/QEMU config facts. - default: proxmox_ - type: str - want_facts: - description: - - Gather LXC/QEMU configuration facts. - - When O(want_facts) is set to V(true) more details about QEMU VM status are possible, besides the running and stopped states. - Currently if the VM is running and it is suspended, the status will be running and the machine will be in C(running) group, - but its actual state will be paused. See O(qemu_extended_statuses) for how to retrieve the real status. - default: false - type: bool - qemu_extended_statuses: - description: - - Requires O(want_facts) to be set to V(true) to function. This will allow you to differentiate between C(paused) and C(prelaunch) - statuses of the QEMU VMs. - - This introduces multiple groups [prefixed with O(group_prefix)] C(prelaunch) and C(paused). - default: false - type: bool - version_added: 5.1.0 - want_proxmox_nodes_ansible_host: - version_added: 3.0.0 - description: - - Whether to set C(ansible_host) for proxmox nodes. - - When set to V(true) (default), will use the first available interface. This can be different from what you expect. - - The default of this option changed from V(true) to V(false) in community.general 6.0.0. - type: bool - default: false - exclude_nodes: - description: Exclude proxmox nodes and the nodes-group from the inventory output. - type: bool - default: false - version_added: 8.1.0 - filters: - version_added: 4.6.0 - description: A list of Jinja templates that allow filtering hosts. - type: list - elements: str - default: [] - strict: - version_added: 2.5.0 - compose: - version_added: 2.5.0 - groups: - version_added: 2.5.0 - keyed_groups: - version_added: 2.5.0 -''' - -EXAMPLES = ''' -# Minimal example which will not gather additional facts for QEMU/LXC guests -# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006 -# my.proxmox.yml -plugin: community.general.proxmox -user: ansible@pve -password: secure -# Note that this can easily give you wrong values as ansible_host. See further below for -# an example where this is set to `false` and where ansible_host is set with `compose`. -want_proxmox_nodes_ansible_host: true - -# Instead of login with password, proxmox supports api token authentication since release 6.2. -plugin: community.general.proxmox -user: ci@pve -token_id: gitlab-1 -token_secret: fa256e9c-26ab-41ec-82da-707a2c079829 - -# The secret can also be a vault string or passed via the environment variable TOKEN_SECRET. -token_secret: !vault | - $ANSIBLE_VAULT;1.1;AES256 - 62353634333163633336343265623632626339313032653563653165313262343931643431656138 - 6134333736323265656466646539663134306166666237630a653363623262636663333762316136 - 34616361326263383766366663393837626437316462313332663736623066656237386531663731 - 3037646432383064630a663165303564623338666131353366373630656661333437393937343331 - 32643131386134396336623736393634373936356332623632306561356361323737313663633633 - 6231313333666361656537343562333337323030623732323833 - -# More complete example demonstrating the use of 'want_facts' and the constructed options -# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true' -# my.proxmox.yml -plugin: community.general.proxmox -url: http://pve.domain.com:8006 -user: ansible@pve -password: secure -want_facts: true -keyed_groups: - # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true' - - key: proxmox_tags_parsed - separator: "" - prefix: group -groups: - webservers: "'web' in (proxmox_tags_parsed|list)" - mailservers: "'mail' in (proxmox_tags_parsed|list)" -compose: - ansible_port: 2222 -# Note that this can easily give you wrong values as ansible_host. See further below for -# an example where this is set to `false` and where ansible_host is set with `compose`. -want_proxmox_nodes_ansible_host: true - -# Using the inventory to allow ansible to connect via the first IP address of the VM / Container -# (Default is connection by name of QEMU/LXC guests) -# Note: my_inv_var demonstrates how to add a string variable to every host used by the inventory. -# my.proxmox.yml -plugin: community.general.proxmox -url: http://192.168.1.2:8006 -user: ansible@pve -password: secure -validate_certs: false # only do this when you trust the network! -want_facts: true -want_proxmox_nodes_ansible_host: false -compose: - ansible_host: proxmox_ipconfig0.ip | default(proxmox_net0.ip) | ipaddr('address') - my_inv_var_1: "'my_var1_value'" - my_inv_var_2: > - "my_var_2_value" - -# Specify the url, user and password using templating -# my.proxmox.yml -plugin: community.general.proxmox -url: "{{ lookup('ansible.builtin.ini', 'url', section='proxmox', file='file.ini') }}" -user: "{{ lookup('ansible.builtin.env','PM_USER') | default('ansible@pve') }}" -password: "{{ lookup('community.general.random_string', base64=True) }}" -# Note that this can easily give you wrong values as ansible_host. See further up for -# an example where this is set to `false` and where ansible_host is set with `compose`. -want_proxmox_nodes_ansible_host: true - -''' - -import itertools -import re - -from ansible.module_utils.common._collections_compat import MutableMapping - -from ansible.errors import AnsibleError -from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable -from ansible.module_utils.six import string_types -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.utils.display import Display - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe - -# 3rd party imports -try: - import requests - if LooseVersion(requests.__version__) < LooseVersion('1.1.0'): - raise ImportError - HAS_REQUESTS = True -except ImportError: - HAS_REQUESTS = False - -display = Display() - - -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - ''' Host inventory parser for ansible using Proxmox as source. ''' - - NAME = 'community.general.proxmox' - - def __init__(self): - - super(InventoryModule, self).__init__() - - # from config - self.proxmox_url = None - - self.session = None - self.cache_key = None - self.use_cache = None - - def verify_file(self, path): - - valid = False - if super(InventoryModule, self).verify_file(path): - if path.endswith(('proxmox.yaml', 'proxmox.yml')): - valid = True - else: - self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"') - return valid - - def _get_session(self): - if not self.session: - self.session = requests.session() - self.session.verify = self.get_option('validate_certs') - return self.session - - def _get_auth(self): - validate_certs = self.get_option('validate_certs') - - if validate_certs is False: - from requests.packages.urllib3 import disable_warnings - disable_warnings() - - if self.proxmox_password: - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password}) - a = self._get_session() - ret = a.post(f'{self.proxmox_url}/api2/json/access/ticket', data=credentials) - json = ret.json() - self.headers = { - # only required for POST/PUT/DELETE methods, which we are not using currently - # 'CSRFPreventionToken': json['data']['CSRFPreventionToken'], - 'Cookie': f"PVEAuthCookie={json['data']['ticket']}" - } - else: - # Clean and format token components - user = self.proxmox_user.strip() - token_id = self.proxmox_token_id.strip() - token_secret = self.proxmox_token_secret.strip() - - # Build token string without newlines - token = f'{user}!{token_id}={token_secret}' - - # Set headers with clean token - self.headers = {'Authorization': f'PVEAPIToken={token}'} - - def _get_json(self, url, ignore_errors=None): - - if not self.use_cache or url not in self._cache.get(self.cache_key, {}): - - if self.cache_key not in self._cache: - self._cache[self.cache_key] = {'url': ''} - - data = [] - s = self._get_session() - while True: - ret = s.get(url, headers=self.headers) - if ignore_errors and ret.status_code in ignore_errors: - break - ret.raise_for_status() - json = ret.json() - - # process results - # FIXME: This assumes 'return type' matches a specific query, - # it will break if we expand the queries and they dont have different types - if 'data' not in json: - # /hosts/:id does not have a 'data' key - data = json - break - elif isinstance(json['data'], MutableMapping): - # /facts are returned as dict in 'data' - data = json['data'] - break - else: - if json['data']: - # /hosts 's 'results' is a list of all hosts, returned is paginated - data = data + json['data'] - break - - self._cache[self.cache_key][url] = data - - return make_unsafe(self._cache[self.cache_key][url]) - - def _get_nodes(self): - return self._get_json(f"{self.proxmox_url}/api2/json/nodes") - - def _get_pools(self): - return self._get_json(f"{self.proxmox_url}/api2/json/pools") - - def _get_lxc_per_node(self, node): - return self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/lxc") - - def _get_qemu_per_node(self, node): - return self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/qemu") - - def _get_members_per_pool(self, pool): - ret = self._get_json(f"{self.proxmox_url}/api2/json/pools/{pool}") - return ret['members'] - - def _get_node_ip(self, node): - ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/network") - - for iface in ret: - try: - return iface['address'] - except Exception: - return None - - def _get_lxc_interfaces(self, properties, node, vmid): - status_key = self._fact('status') - - if status_key not in properties or not properties[status_key] == 'running': - return - - ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/lxc/{vmid}/interfaces", ignore_errors=[501]) - if not ret: - return - - result = [] - - for iface in ret: - result_iface = { - 'name': iface['name'], - 'hwaddr': iface['hwaddr'] - } - - if 'inet' in iface: - result_iface['inet'] = iface['inet'] - - if 'inet6' in iface: - result_iface['inet6'] = iface['inet6'] - - result.append(result_iface) - - properties[self._fact('lxc_interfaces')] = result - - def _get_agent_network_interfaces(self, node, vmid, vmtype): - result = [] - - try: - ifaces = self._get_json( - f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/agent/network-get-interfaces" - )['result'] - - if "error" in ifaces: - if "class" in ifaces["error"]: - # This happens on Windows, even though qemu agent is running, the IP address - # cannot be fetched, as it is unsupported, also a command disabled can happen. - errorClass = ifaces["error"]["class"] - if errorClass in ["Unsupported"]: - self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported") - elif errorClass in ["CommandDisabled"]: - self.display.v("Retrieving network interfaces from guest agents has been disabled") - return result - - for iface in ifaces: - result.append({ - 'name': iface['name'], - 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '', - 'ip-addresses': [f"{ip['ip-address']}/{ip['prefix']}" for ip in iface['ip-addresses']] if 'ip-addresses' in iface else [] - }) - except requests.HTTPError: - pass - - return result - - def _get_vm_config(self, properties, node, vmid, vmtype, name): - ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/config") - - properties[self._fact('node')] = node - properties[self._fact('vmid')] = vmid - properties[self._fact('vmtype')] = vmtype - - plaintext_configs = [ - 'description', - ] - - for config in ret: - key = self._fact(config) - value = ret[config] - try: - # fixup disk images as they have no key - if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')): - value = f"disk_image={value}" - - # Additional field containing parsed tags as list - if config == 'tags': - stripped_value = value.strip() - if stripped_value: - parsed_key = f"{key}_parsed" - properties[parsed_key] = [tag.strip() for tag in stripped_value.replace(',', ';').split(";")] - - # The first field in the agent string tells you whether the agent is enabled - # the rest of the comma separated string is extra config for the agent. - # In some (newer versions of proxmox) instances it can be 'enabled=1'. - if config == 'agent': - agent_enabled = 0 - try: - agent_enabled = int(value.split(',')[0]) - except ValueError: - if value.split(',')[0] == "enabled=1": - agent_enabled = 1 - if agent_enabled: - agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype) - if agent_iface_value: - agent_iface_key = self.to_safe(f'{key}_interfaces') - properties[agent_iface_key] = agent_iface_value - - if config == 'lxc': - out_val = {} - for k, v in value: - if k.startswith('lxc.'): - k = k[len('lxc.'):] - out_val[k] = v - value = out_val - - if config not in plaintext_configs and isinstance(value, string_types) \ - and all("=" in v for v in value.split(",")): - # split off strings with commas to a dict - # skip over any keys that cannot be processed - try: - value = dict(key.split("=", 1) for key in value.split(",")) - except Exception: - continue - - properties[key] = value - except NameError: - return None - - def _get_vm_status(self, properties, node, vmid, vmtype, name): - ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/status/current") - properties[self._fact('status')] = ret['status'] - if vmtype == 'qemu': - properties[self._fact('qmpstatus')] = ret['qmpstatus'] - - def _get_vm_snapshots(self, properties, node, vmid, vmtype, name): - ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/snapshot") - snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current'] - properties[self._fact('snapshots')] = snapshots - - def to_safe(self, word): - '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups - #> ProxmoxInventory.to_safe("foo-bar baz") - 'foo_barbaz' - ''' - regex = r"[^A-Za-z0-9\_]" - return re.sub(regex, "_", word.replace(" ", "")) - - def _fact(self, name): - '''Generate a fact's full name from the common prefix and a name.''' - return self.to_safe(f'{self.facts_prefix}{name.lower()}') - - def _group(self, name): - '''Generate a group's full name from the common prefix and a name.''' - return self.to_safe(f'{self.group_prefix}{name.lower()}') - - def _can_add_host(self, name, properties): - '''Ensure that a host satisfies all defined hosts filters. If strict mode is - enabled, any error during host filter compositing will lead to an AnsibleError - being raised, otherwise the filter will be ignored. - ''' - for host_filter in self.host_filters: - try: - if not self._compose(host_filter, properties): - return False - except Exception as e: # pylint: disable=broad-except - message = f"Could not evaluate host filter {host_filter} for host {name} - {e}" - if self.strict: - raise AnsibleError(message) - display.warning(message) - return True - - def _add_host(self, name, variables): - self.inventory.add_host(name) - for k, v in variables.items(): - self.inventory.set_variable(name, k, v) - variables = self.inventory.get_host(name).get_vars() - self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict) - self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict) - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict) - - def _handle_item(self, node, ittype, item): - '''Handle an item from the list of LXC containers and Qemu VM. The - return value will be either None if the item was skipped or the name of - the item if it was added to the inventory.''' - if item.get('template'): - return None - - properties = dict() - name, vmid = item['name'], item['vmid'] - - # get status, config and snapshots if want_facts == True - want_facts = self.get_option('want_facts') - if want_facts: - self._get_vm_status(properties, node, vmid, ittype, name) - self._get_vm_config(properties, node, vmid, ittype, name) - self._get_vm_snapshots(properties, node, vmid, ittype, name) - - if ittype == 'lxc': - self._get_lxc_interfaces(properties, node, vmid) - - # ensure the host satisfies filters - if not self._can_add_host(name, properties): - return None - - # add the host to the inventory - self._add_host(name, properties) - node_type_group = self._group(f'{node}_{ittype}') - self.inventory.add_child(self._group(f"all_{ittype}"), name) - self.inventory.add_child(node_type_group, name) - - item_status = item['status'] - if item_status == 'running': - if want_facts and ittype == 'qemu' and self.get_option('qemu_extended_statuses'): - # get more details about the status of the qemu VM - item_status = properties.get(self._fact('qmpstatus'), item_status) - self.inventory.add_child(self._group(f'all_{item_status}'), name) - - return name - - def _populate_pool_groups(self, added_hosts): - '''Generate groups from Proxmox resource pools, ignoring VMs and - containers that were skipped.''' - for pool in self._get_pools(): - poolid = pool.get('poolid') - if not poolid: - continue - pool_group = self._group(f"pool_{poolid}") - self.inventory.add_group(pool_group) - - for member in self._get_members_per_pool(poolid): - name = member.get('name') - if name and name in added_hosts: - self.inventory.add_child(pool_group, name) - - def _populate(self): - - # create common groups - default_groups = ['lxc', 'qemu', 'running', 'stopped'] - - if self.get_option('qemu_extended_statuses'): - default_groups.extend(['prelaunch', 'paused']) - - for group in default_groups: - self.inventory.add_group(self._group(f'all_{group}')) - nodes_group = self._group('nodes') - if not self.exclude_nodes: - self.inventory.add_group(nodes_group) - - want_proxmox_nodes_ansible_host = self.get_option("want_proxmox_nodes_ansible_host") - - # gather vm's on nodes - self._get_auth() - hosts = [] - for node in self._get_nodes(): - if not node.get('node'): - continue - if not self.exclude_nodes: - self.inventory.add_host(node['node']) - if node['type'] == 'node' and not self.exclude_nodes: - self.inventory.add_child(nodes_group, node['node']) - - if node['status'] == 'offline': - continue - - # get node IP address - if want_proxmox_nodes_ansible_host and not self.exclude_nodes: - ip = self._get_node_ip(node['node']) - self.inventory.set_variable(node['node'], 'ansible_host', ip) - - # Setting composite variables - if not self.exclude_nodes: - variables = self.inventory.get_host(node['node']).get_vars() - self._set_composite_vars(self.get_option('compose'), variables, node['node'], strict=self.strict) - - # add LXC/Qemu groups for the node - for ittype in ('lxc', 'qemu'): - node_type_group = self._group(f"{node['node']}_{ittype}") - self.inventory.add_group(node_type_group) - - # get LXC containers and Qemu VMs for this node - lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node'])) - qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node'])) - for ittype, item in itertools.chain(lxc_objects, qemu_objects): - name = self._handle_item(node['node'], ittype, item) - if name is not None: - hosts.append(name) - - # gather vm's in pools - self._populate_pool_groups(hosts) - - def parse(self, inventory, loader, path, cache=True): - if not HAS_REQUESTS: - raise AnsibleError('This module requires Python Requests 1.1.0 or higher: ' - 'https://github.com/psf/requests.') - - super(InventoryModule, self).parse(inventory, loader, path) - - # read config from file, this sets 'options' - self._read_config_data(path) - - # read and template auth options - for o in ('url', 'user', 'password', 'token_id', 'token_secret'): - v = self.get_option(o) - if self.templar.is_template(v): - v = self.templar.template(v, disable_lookups=False) - setattr(self, f'proxmox_{o}', v) - - # some more cleanup and validation - self.proxmox_url = self.proxmox_url.rstrip('/') - - if self.proxmox_password is None and (self.proxmox_token_id is None or self.proxmox_token_secret is None): - raise AnsibleError('You must specify either a password or both token_id and token_secret.') - - if self.get_option('qemu_extended_statuses') and not self.get_option('want_facts'): - raise AnsibleError('You must set want_facts to True if you want to use qemu_extended_statuses.') - # read rest of options - self.exclude_nodes = self.get_option('exclude_nodes') - self.cache_key = self.get_cache_key(path) - self.use_cache = cache and self.get_option('cache') - self.host_filters = self.get_option('filters') - self.group_prefix = self.get_option('group_prefix') - self.facts_prefix = self.get_option('facts_prefix') - self.strict = self.get_option('strict') - - # actually populate inventory - self._populate() diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 9a40243ee7..c730049833 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -3,80 +3,79 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' - name: scaleway - author: - - Remy Leone (@remyleone) - short_description: Scaleway inventory source +DOCUMENTATION = r""" +name: scaleway +author: + - Remy Leone (@remyleone) +short_description: Scaleway inventory source +description: + - Get inventory hosts from Scaleway. +requirements: + - PyYAML +options: + plugin: + description: Token that ensures this is a source file for the 'scaleway' plugin. + required: true + type: string + choices: ['scaleway', 'community.general.scaleway'] + regions: + description: Filter results on a specific Scaleway region. + type: list + elements: string + default: + - ams1 + - par1 + - par2 + - waw1 + tags: + description: Filter results on a specific tag. + type: list + elements: string + scw_profile: description: - - Get inventory hosts from Scaleway. - requirements: - - PyYAML - options: - plugin: - description: Token that ensures this is a source file for the 'scaleway' plugin. - required: true - type: string - choices: ['scaleway', 'community.general.scaleway'] - regions: - description: Filter results on a specific Scaleway region. - type: list - elements: string - default: - - ams1 - - par1 - - par2 - - waw1 - tags: - description: Filter results on a specific tag. - type: list - elements: string - scw_profile: - description: - - The config profile to use in config file. - - By default uses the one specified as C(active_profile) in the config file, or falls back to V(default) if that is not defined. - type: string - version_added: 4.4.0 - oauth_token: - description: - - Scaleway OAuth token. - - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file - (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). - - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). - type: string - env: - # in order of precedence - - name: SCW_TOKEN - - name: SCW_API_KEY - - name: SCW_OAUTH_TOKEN - hostnames: - description: List of preference about what to use as an hostname. - type: list - elements: string - default: - - public_ipv4 - choices: - - public_ipv4 - - private_ipv4 - - public_ipv6 - - hostname - - id - variables: - description: 'Set individual variables: keys are variable names and - values are templates. Any value returned by the - L(Scaleway API, https://developer.scaleway.com/#servers-server-get) - can be used.' - type: dict -''' + - The config profile to use in config file. + - By default uses the one specified as C(active_profile) in the config file, or falls back to V(default) if that is + not defined. + type: string + version_added: 4.4.0 + oauth_token: + description: + - Scaleway OAuth token. + - If not explicitly defined or in environment variables, it tries to lookup in the C(scaleway-cli) configuration file + (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). + - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). + type: string + env: + # in order of precedence + - name: SCW_TOKEN + - name: SCW_API_KEY + - name: SCW_OAUTH_TOKEN + hostnames: + description: List of preference about what to use as an hostname. + type: list + elements: string + default: + - public_ipv4 + choices: + - public_ipv4 + - private_ipv4 + - public_ipv6 + - hostname + - id + variables: + description: 'Set individual variables: keys are variable names and values are templates. Any value returned by the L(Scaleway + API, https://developer.scaleway.com/#servers-server-get) can be used.' + type: dict +""" -EXAMPLES = r''' +EXAMPLES = r""" # scaleway_inventory.yml file in YAML format # Example command line: ansible-inventory --list -i scaleway_inventory.yml +--- # use hostname as inventory_hostname # use the private IP address to connect to the host plugin: community.general.scaleway @@ -91,6 +90,7 @@ variables: ansible_host: private_ip state: state +--- # use hostname as inventory_hostname and public IP address to connect to the host plugin: community.general.scaleway hostnames: @@ -100,6 +100,7 @@ regions: variables: ansible_host: public_ip.address +--- # Using static strings as variables plugin: community.general.scaleway hostnames: @@ -108,7 +109,7 @@ variables: ansible_host: public_ip.address ansible_connection: "'ssh'" ansible_user: "'admin'" -''' +""" import os import json diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py deleted file mode 100644 index c87d0e5277..0000000000 --- a/plugins/inventory/stackpath_compute.py +++ /dev/null @@ -1,286 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2020 Shay Rybak -# Copyright (c) 2020 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: stackpath_compute - short_description: StackPath Edge Computing inventory source - version_added: 1.2.0 - author: - - UNKNOWN (@shayrybak) - extends_documentation_fragment: - - inventory_cache - - constructed - description: - - Get inventory hosts from StackPath Edge Computing. - - Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml). - options: - plugin: - description: - - A token that ensures this is a source file for the plugin. - required: true - type: string - choices: ['community.general.stackpath_compute'] - client_id: - description: - - An OAuth client ID generated from the API Management section of the StackPath customer portal - U(https://control.stackpath.net/api-management). - required: true - type: str - client_secret: - description: - - An OAuth client secret generated from the API Management section of the StackPath customer portal - U(https://control.stackpath.net/api-management). - required: true - type: str - stack_slugs: - description: - - A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account. - type: list - elements: str - use_internal_ip: - description: - - Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise. - - If an instance doesn't have an external IP it will not be returned when this option is set to false. - type: bool -''' - -EXAMPLES = ''' -# Example using credentials to fetch all workload instances in a stack. ---- -plugin: community.general.stackpath_compute -client_id: my_client_id -client_secret: my_client_secret -stack_slugs: -- my_first_stack_slug -- my_other_stack_slug -use_internal_ip: false -''' - -import traceback -import json - -from ansible.errors import AnsibleError -from ansible.module_utils.urls import open_url -from ansible.plugins.inventory import ( - BaseInventoryPlugin, - Constructable, - Cacheable -) -from ansible.utils.display import Display - -from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe - - -display = Display() - - -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - - NAME = 'community.general.stackpath_compute' - - def __init__(self): - super(InventoryModule, self).__init__() - - # credentials - self.client_id = None - self.client_secret = None - self.stack_slug = None - self.api_host = "https://gateway.stackpath.com" - self.group_keys = [ - "stackSlug", - "workloadId", - "cityCode", - "countryCode", - "continent", - "target", - "name", - "workloadSlug" - ] - - def _validate_config(self, config): - if config['plugin'] != 'community.general.stackpath_compute': - raise AnsibleError("plugin doesn't match this plugin") - try: - client_id = config['client_id'] - if len(client_id) != 32: - raise AnsibleError("client_id must be 32 characters long") - except KeyError: - raise AnsibleError("config missing client_id, a required option") - try: - client_secret = config['client_secret'] - if len(client_secret) != 64: - raise AnsibleError("client_secret must be 64 characters long") - except KeyError: - raise AnsibleError("config missing client_id, a required option") - return True - - def _set_credentials(self): - ''' - :param config_data: contents of the inventory config file - ''' - self.client_id = self.get_option('client_id') - self.client_secret = self.get_option('client_secret') - - def _authenticate(self): - payload = json.dumps( - { - "client_id": self.client_id, - "client_secret": self.client_secret, - "grant_type": "client_credentials", - } - ) - headers = { - "Content-Type": "application/json", - } - resp = open_url( - f"{self.api_host}/identity/v1/oauth2/token", - headers=headers, - data=payload, - method="POST" - ) - status_code = resp.code - if status_code == 200: - body = resp.read() - self.auth_token = json.loads(body)["access_token"] - - def _query(self): - results = [] - workloads = [] - self._authenticate() - for stack_slug in self.stack_slugs: - try: - workloads = self._stackpath_query_get_list(f"{self.api_host}/workload/v1/stacks/{stack_slug}/workloads") - except Exception: - raise AnsibleError(f"Failed to get workloads from the StackPath API: {traceback.format_exc()}") - for workload in workloads: - try: - workload_instances = self._stackpath_query_get_list( - f"{self.api_host}/workload/v1/stacks/{stack_slug}/workloads/{workload['id']}/instances" - ) - except Exception: - raise AnsibleError(f"Failed to get workload instances from the StackPath API: {traceback.format_exc()}") - for instance in workload_instances: - if instance["phase"] == "RUNNING": - instance["stackSlug"] = stack_slug - instance["workloadId"] = workload["id"] - instance["workloadSlug"] = workload["slug"] - instance["cityCode"] = instance["location"]["cityCode"] - instance["countryCode"] = instance["location"]["countryCode"] - instance["continent"] = instance["location"]["continent"] - instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"] - try: - if instance[self.hostname_key]: - results.append(instance) - except KeyError: - pass - return results - - def _populate(self, instances): - for instance in instances: - for group_key in self.group_keys: - group = f"{group_key}_{instance[group_key]}" - group = group.lower().replace(" ", "_").replace("-", "_") - self.inventory.add_group(group) - self.inventory.add_host(instance[self.hostname_key], - group=group) - - def _stackpath_query_get_list(self, url): - self._authenticate() - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {self.auth_token}", - } - next_page = True - result = [] - cursor = '-1' - while next_page: - resp = open_url( - f"{url}?page_request.first=10&page_request.after={cursor}", - headers=headers, - method="GET" - ) - status_code = resp.code - if status_code == 200: - body = resp.read() - body_json = json.loads(body) - result.extend(body_json["results"]) - next_page = body_json["pageInfo"]["hasNextPage"] - if next_page: - cursor = body_json["pageInfo"]["endCursor"] - return result - - def _get_stack_slugs(self, stacks): - self.stack_slugs = [stack["slug"] for stack in stacks] - - def verify_file(self, path): - ''' - :param loader: an ansible.parsing.dataloader.DataLoader object - :param path: the path to the inventory config file - :return the contents of the config file - ''' - if super(InventoryModule, self).verify_file(path): - if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')): - return True - display.debug( - "stackpath_compute inventory filename must end with \ - 'stackpath_compute.yml' or 'stackpath_compute.yaml'" - ) - return False - - def parse(self, inventory, loader, path, cache=True): - - super(InventoryModule, self).parse(inventory, loader, path) - - config = self._read_config_data(path) - self._validate_config(config) - self._set_credentials() - - # get user specifications - self.use_internal_ip = self.get_option('use_internal_ip') - if self.use_internal_ip: - self.hostname_key = "ipAddress" - else: - self.hostname_key = "externalIpAddress" - - self.stack_slugs = self.get_option('stack_slugs') - if not self.stack_slugs: - try: - stacks = self._stackpath_query_get_list(f"{self.api_host}/stack/v1/stacks") - self._get_stack_slugs(stacks) - except Exception: - raise AnsibleError(f"Failed to get stack IDs from the Stackpath API: {traceback.format_exc()}") - - cache_key = self.get_cache_key(path) - # false when refresh_cache or --flush-cache is used - if cache: - # get the user-specified directive - cache = self.get_option('cache') - - # Generate inventory - cache_needs_update = False - if cache: - try: - results = self._cache[cache_key] - except KeyError: - # if cache expires or cache file doesn't exist - cache_needs_update = True - - if not cache or cache_needs_update: - results = self._query() - - self._populate(make_unsafe(results)) - - # If the cache has expired/doesn't exist or - # if refresh_inventory/flush cache is used - # when the user is using caching, update the cached inventory - try: - if cache_needs_update or (not cache and self.get_option('cache')): - self._cache[cache_key] = results - except Exception: - raise AnsibleError(f"Failed to populate data: {traceback.format_exc()}") diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index b0545319a1..2eb52a617c 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -3,73 +3,74 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: virtualbox - short_description: virtualbox inventory source +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: virtualbox +short_description: Virtualbox inventory source +description: + - Get inventory hosts from the local virtualbox installation. + - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml). + - The inventory_hostname is always the 'Name' of the virtualbox instance. + - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter. + - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation + for details. +extends_documentation_fragment: + - constructed + - inventory_cache +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.virtualbox#inventory) plugin. + type: string + required: true + choices: ['virtualbox', 'community.general.virtualbox'] + running_only: + description: Toggles showing all VMs instead of only those currently running. + type: boolean + default: false + settings_password_file: + description: Provide a file containing the settings password (equivalent to C(--settingspwfile)). + type: string + network_info_path: + description: Property path to query for network information (C(ansible_host)). + type: string + default: "/VirtualBox/GuestInfo/Net/0/V4/IP" + query: + description: Create vars from virtualbox properties. + type: dictionary + default: {} + enable_advanced_group_parsing: description: - - Get inventory hosts from the local virtualbox installation. - - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml). - - The inventory_hostname is always the 'Name' of the virtualbox instance. - - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter. - - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation for details. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: token that ensures this is a source file for the 'virtualbox' plugin - type: string - required: true - choices: ['virtualbox', 'community.general.virtualbox'] - running_only: - description: toggles showing all vms vs only those currently running - type: boolean - default: false - settings_password_file: - description: provide a file containing the settings password (equivalent to --settingspwfile) - type: string - network_info_path: - description: property path to query for network information (ansible_host) - type: string - default: "/VirtualBox/GuestInfo/Net/0/V4/IP" - query: - description: create vars from virtualbox properties - type: dictionary - default: {} - enable_advanced_group_parsing: - description: - - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based on the V(/) character and - assign the resulting list elements as an Ansible Group. - - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups according to - U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups). - Groups are now split using the V(,) character, and the V(/) character indicates nested groups. - - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3") will result in - the group C(TestGroup2) being a child group of C(TestGroup); and - the VM being a part of C(TestGroup2) and C(TestGroup3). - default: false - type: bool - version_added: 9.2.0 -''' + - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based + on the V(/) character and assign the resulting list elements as an Ansible Group. + - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups + according to U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups). Groups are now split using the V(,) + character, and the V(/) character indicates nested groups. + - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3") + results in the group C(TestGroup2) being a child group of C(TestGroup); and the VM being a part of C(TestGroup2) + and C(TestGroup3). + default: false + type: bool + version_added: 9.2.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # file must be named vbox.yaml or vbox.yml -simple_config_file: - plugin: community.general.virtualbox - settings_password_file: /etc/virtulbox/secrets - query: - logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList - compose: - ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh') +plugin: community.general.virtualbox +settings_password_file: /etc/virtualbox/secrets +query: + logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList +compose: + ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh') +--- # add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory' plugin: community.general.virtualbox groups: container: "'minis' in (inventory_hostname)" -''' +""" import os diff --git a/plugins/inventory/xen_orchestra.py b/plugins/inventory/xen_orchestra.py index 0a050d0bf9..e6d828845a 100644 --- a/plugins/inventory/xen_orchestra.py +++ b/plugins/inventory/xen_orchestra.py @@ -3,65 +3,83 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: xen_orchestra - short_description: Xen Orchestra inventory source - version_added: 4.1.0 - author: - - Dom Del Nano (@ddelnano) - - Samori Gorse (@shinuza) - requirements: - - websocket-client >= 1.0.0 +DOCUMENTATION = r""" +name: xen_orchestra +short_description: Xen Orchestra inventory source +version_added: 4.1.0 +author: + - Dom Del Nano (@ddelnano) + - Samori Gorse (@shinuza) +requirements: + - websocket-client >= 1.0.0 +description: + - Get inventory hosts from a Xen Orchestra deployment. + - Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml). +extends_documentation_fragment: + - constructed + - inventory_cache +options: + plugin: + description: The name of this plugin, it should always be set to V(community.general.xen_orchestra) for this plugin to + recognize it as its own. + required: true + choices: ['community.general.xen_orchestra'] + type: str + api_host: description: - - Get inventory hosts from a Xen Orchestra deployment. - - 'Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).' - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to V(community.general.xen_orchestra) for this plugin to recognize it as its own. - required: true - choices: ['community.general.xen_orchestra'] - type: str - api_host: - description: - - API host to XOA API. - - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_HOST) will be used instead. - type: str - env: - - name: ANSIBLE_XO_HOST - user: - description: - - Xen Orchestra user. - - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_USER) will be used instead. - required: true - type: str - env: - - name: ANSIBLE_XO_USER - password: - description: - - Xen Orchestra password. - - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_PASSWORD) will be used instead. - required: true - type: str - env: - - name: ANSIBLE_XO_PASSWORD - validate_certs: - description: Verify TLS certificate if using HTTPS. - type: boolean - default: true - use_ssl: - description: Use wss when connecting to the Xen Orchestra API - type: boolean - default: true -''' + - API host to XOA API. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_HOST) + is used instead. + type: str + env: + - name: ANSIBLE_XO_HOST + user: + description: + - Xen Orchestra user. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_USER) + is used instead. + required: true + type: str + env: + - name: ANSIBLE_XO_USER + password: + description: + - Xen Orchestra password. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_PASSWORD) + is used instead. + required: true + type: str + env: + - name: ANSIBLE_XO_PASSWORD + validate_certs: + description: Verify TLS certificate if using HTTPS. + type: boolean + default: true + use_ssl: + description: Use wss when connecting to the Xen Orchestra API. + type: boolean + default: true + use_vm_uuid: + description: + - Import Xen VMs to inventory using their UUID as the VM entry name. + - If set to V(false) use VM name labels instead of UUIDs. + type: boolean + default: true + version_added: 10.4.0 + use_host_uuid: + description: + - Import Xen Hosts to inventory using their UUID as the Host entry name. + - If set to V(false) use Host name labels instead of UUIDs. + type: boolean + default: true + version_added: 10.4.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # file must be named xen_orchestra.yaml or xen_orchestra.yml plugin: community.general.xen_orchestra api_host: 192.168.1.255 @@ -70,11 +88,12 @@ password: xo_pwd validate_certs: true use_ssl: true groups: - kube_nodes: "'kube_node' in tags" + kube_nodes: "'kube_node' in tags" compose: - ansible_port: 2222 - -''' + ansible_port: 2222 +use_vm_uuid: false +use_host_uuid: true +""" import json import ssl @@ -197,10 +216,20 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict) def _add_vms(self, vms, hosts, pools): + vm_name_list = [] for uuid, vm in vms.items(): + if self.vm_entry_name_type == 'name_label': + if vm['name_label'] not in vm_name_list: + entry_name = vm['name_label'] + vm_name_list.append(vm['name_label']) + else: + vm_duplicate_count = vm_name_list.count(vm['name_label']) + entry_name = f"{vm['name_label']}_{vm_duplicate_count}" + vm_name_list.append(vm['name_label']) + else: + entry_name = uuid group = 'with_ip' ip = vm.get('mainIpAddress') - entry_name = uuid power_state = vm['power_state'].lower() pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId']) host_name = self._host_group_name_for_uuid(hosts, vm['$container']) @@ -247,8 +276,19 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars()) def _add_hosts(self, hosts, pools): + host_name_list = [] for host in hosts.values(): - entry_name = host['uuid'] + if self.host_entry_name_type == 'name_label': + if host['name_label'] not in host_name_list: + entry_name = host['name_label'] + host_name_list.append(host['name_label']) + else: + host_duplicate_count = host_name_list.count(host['name_label']) + entry_name = f"{host['name_label']}_{host_duplicate_count}" + host_name_list.append(host['name_label']) + else: + entry_name = host['uuid'] + group_name = f"xo_host_{clean_group_name(host['name_label'])}" pool_name = self._pool_group_name_for_uuid(pools, host['$poolId']) @@ -338,5 +378,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if not self.get_option('use_ssl'): self.protocol = 'ws' + self.vm_entry_name_type = 'uuid' + if not self.get_option('use_vm_uuid'): + self.vm_entry_name_type = 'name_label' + + self.host_entry_name_type = 'uuid' + if not self.get_option('use_host_uuid'): + self.host_entry_name_type = 'name_label' + objects = self._get_objects() self._populate(make_unsafe(objects)) diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py index 9a8b5749c2..7d65792b7f 100644 --- a/plugins/lookup/bitwarden.py +++ b/plugins/lookup/bitwarden.py @@ -5,52 +5,65 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = """ - name: bitwarden - author: - - Jonathan Lung (@lungj) - requirements: - - bw (command line utility) - - be logged into bitwarden - - bitwarden vault unlocked - - E(BW_SESSION) environment variable set - short_description: Retrieve secrets from Bitwarden - version_added: 5.4.0 +DOCUMENTATION = r""" +name: bitwarden +author: + - Jonathan Lung (@lungj) +requirements: + - bw (command line utility) + - be logged into bitwarden + - bitwarden vault unlocked + - E(BW_SESSION) environment variable set +short_description: Retrieve secrets from Bitwarden +version_added: 5.4.0 +description: + - Retrieve secrets from Bitwarden. +options: + _terms: + description: Key(s) to fetch values for from login info. + required: true + type: list + elements: str + search: description: - - Retrieve secrets from Bitwarden. - options: - _terms: - description: Key(s) to fetch values for from login info. - required: true - type: list - elements: str - search: - description: - - Field to retrieve, for example V(name) or V(id). - - If set to V(id), only zero or one element can be returned. - Use the Jinja C(first) filter to get the only list element. - - If set to V(None) or V(''), or if O(_terms) is empty, records are not filtered by fields. - type: str - default: name - version_added: 5.7.0 - field: - description: Field to fetch. Leave unset to fetch whole response. - type: str - collection_id: - description: Collection ID to filter results by collection. Leave unset to skip filtering. - type: str - version_added: 6.3.0 - organization_id: - description: Organization ID to filter results by organization. Leave unset to skip filtering. - type: str - version_added: 8.5.0 - bw_session: - description: Pass session key instead of reading from env. - type: str - version_added: 8.4.0 + - Field to retrieve, for example V(name) or V(id). + - If set to V(id), only zero or one element can be returned. Use the Jinja C(first) filter to get the only list element. + - If set to V(None) or V(''), or if O(_terms) is empty, records are not filtered by fields. + type: str + default: name + version_added: 5.7.0 + field: + description: Field to fetch. Leave unset to fetch whole response. + type: str + collection_id: + description: + - Collection ID to filter results by collection. Leave unset to skip filtering. + - O(collection_id) and O(collection_name) are mutually exclusive. + type: str + version_added: 6.3.0 + collection_name: + description: + - Collection name to filter results by collection. Leave unset to skip filtering. + - O(collection_id) and O(collection_name) are mutually exclusive. + type: str + version_added: 10.4.0 + organization_id: + description: Organization ID to filter results by organization. Leave unset to skip filtering. + type: str + version_added: 8.5.0 + bw_session: + description: Pass session key instead of reading from env. + type: str + version_added: 8.4.0 + result_count: + description: + - Number of results expected for the lookup query. Task fails if O(result_count) is set but does not match the number + of query results. Leave empty to skip this check. + type: int + version_added: 10.4.0 """ -EXAMPLES = """ +EXAMPLES = r""" - name: "Get 'password' from all Bitwarden records named 'a_test'" ansible.builtin.debug: msg: >- @@ -85,21 +98,31 @@ EXAMPLES = """ ansible.builtin.debug: msg: >- {{ lookup('community.general.bitwarden', None, collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }} + +- name: "Get all Bitwarden records from collection" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', None, collection_name='my_collections/test_collection') }} + +- name: "Get Bitwarden record named 'a_test', ensure there is exactly one match" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', result_count=1) }} """ -RETURN = """ - _raw: - description: - - A one-element list that contains a list of requested fields or JSON objects of matches. - - If you use C(query), you get a list of lists. If you use C(lookup) without C(wantlist=true), - this always gets reduced to a list of field values or JSON objects. - type: list - elements: list +RETURN = r""" +_raw: + description: + - A one-element list that contains a list of requested fields or JSON objects of matches. + - If you use C(query), you get a list of lists. If you use C(lookup) without C(wantlist=true), this always gets reduced + to a list of field values or JSON objects. + type: list + elements: list """ from subprocess import Popen, PIPE -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.parsing.ajson import AnsibleJSONDecoder from ansible.plugins.lookup import LookupBase @@ -211,6 +234,24 @@ class Bitwarden(object): return field_matches + def get_collection_ids(self, collection_name: str, organization_id=None) -> list[str]: + """Return matching IDs of collections whose name is equal to collection_name.""" + + # Prepare set of params for Bitwarden CLI + params = ['list', 'collections', '--search', collection_name] + + if organization_id: + params.extend(['--organizationid', organization_id]) + + out, err = self._run(params) + + # This includes things that matched in different fields. + initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] + + # Filter to only return the ID of a collections with exactly matching name + return [item['id'] for item in initial_matches + if str(item.get('name')).lower() == collection_name.lower()] + class LookupModule(LookupBase): @@ -219,7 +260,9 @@ class LookupModule(LookupBase): field = self.get_option('field') search_field = self.get_option('search') collection_id = self.get_option('collection_id') + collection_name = self.get_option('collection_name') organization_id = self.get_option('organization_id') + result_count = self.get_option('result_count') _bitwarden.session = self.get_option('bw_session') if not _bitwarden.unlocked: @@ -228,7 +271,27 @@ class LookupModule(LookupBase): if not terms: terms = [None] - return [_bitwarden.get_field(field, term, search_field, collection_id, organization_id) for term in terms] + if collection_name and collection_id: + raise AnsibleOptionsError("'collection_name' and 'collection_id' are mutually exclusive!") + elif collection_name: + collection_ids = _bitwarden.get_collection_ids(collection_name, organization_id) + if not collection_ids: + raise BitwardenException("No matching collections found!") + else: + collection_ids = [collection_id] + + results = [ + _bitwarden.get_field(field, term, search_field, collection_id, organization_id) + for collection_id in collection_ids + for term in terms + ] + + for result in results: + if result_count is not None and len(result) != result_count: + raise BitwardenException( + f"Number of results doesn't match result_count! ({len(result)} != {result_count})") + + return results _bitwarden = Bitwarden() diff --git a/plugins/lookup/bitwarden_secrets_manager.py b/plugins/lookup/bitwarden_secrets_manager.py index 3d08067105..431384c079 100644 --- a/plugins/lookup/bitwarden_secrets_manager.py +++ b/plugins/lookup/bitwarden_secrets_manager.py @@ -6,31 +6,31 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = """ - name: bitwarden_secrets_manager - author: - - jantari (@jantari) - requirements: - - bws (command line utility) - short_description: Retrieve secrets from Bitwarden Secrets Manager - version_added: 7.2.0 - description: - - Retrieve secrets from Bitwarden Secrets Manager. - options: - _terms: - description: Secret ID(s) to fetch values for. - required: true - type: list - elements: str - bws_access_token: - description: The BWS access token to use for this lookup. - env: - - name: BWS_ACCESS_TOKEN - required: true - type: str +DOCUMENTATION = r""" +name: bitwarden_secrets_manager +author: + - jantari (@jantari) +requirements: + - bws (command line utility) +short_description: Retrieve secrets from Bitwarden Secrets Manager +version_added: 7.2.0 +description: + - Retrieve secrets from Bitwarden Secrets Manager. +options: + _terms: + description: Secret ID(s) to fetch values for. + required: true + type: list + elements: str + bws_access_token: + description: The BWS access token to use for this lookup. + env: + - name: BWS_ACCESS_TOKEN + required: true + type: str """ -EXAMPLES = """ +EXAMPLES = r""" - name: Get a secret relying on the BWS_ACCESS_TOKEN environment variable for authentication ansible.builtin.debug: msg: >- @@ -62,11 +62,11 @@ EXAMPLES = """ {{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972").value }} """ -RETURN = """ - _raw: - description: List containing one or more secrets. - type: list - elements: dict +RETURN = r""" +_raw: + description: List containing one or more secrets. + type: list + elements: dict """ from subprocess import Popen, PIPE diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py index d63f3943b0..f2ad576907 100644 --- a/plugins/lookup/cartesian.py +++ b/plugins/lookup/cartesian.py @@ -6,24 +6,24 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cartesian - short_description: returns the cartesian product of lists +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cartesian +short_description: Returns the cartesian product of lists +description: + - Takes the input lists and returns a list that represents the product of the input lists. + - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]. + - You can see the exact syntax in the examples section. +options: + _terms: description: - - Takes the input lists and returns a list that represents the product of the input lists. - - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]. - You can see the exact syntax in the examples section. - options: - _terms: - description: - - a set of lists - type: list - elements: list - required: true -''' + - A set of lists. + type: list + elements: list + required: true +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Example of the change in the description ansible.builtin.debug: msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}" @@ -34,15 +34,15 @@ EXAMPLES = """ with_community.general.cartesian: - "{{list1}}" - "{{list2}}" - - [1,2,3,4,5,6] + - [1, 2, 3, 4, 5, 6] """ -RETURN = """ - _list: - description: - - list of lists composed of elements of the input lists - type: list - elements: list +RETURN = r""" +_list: + description: + - List of lists composed of elements of the input lists. + type: list + elements: list """ from itertools import product @@ -66,13 +66,7 @@ class LookupModule(LookupBase): """ results = [] for x in terms: - try: - intermediate = listify_lookup_plugin_terms(x, templar=self._templar) - except TypeError: - # The loader argument is deprecated in ansible-core 2.14+. Fall back to - # pre-2.14 behavior for older ansible-core versions. - intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader) - results.append(intermediate) + results.append(listify_lookup_plugin_terms(x, templar=self._templar)) return results def run(self, terms, variables=None, **kwargs): diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index eaa6a1aefa..8fe53744ee 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -6,42 +6,41 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: chef_databag - short_description: fetches data from a Chef Databag +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: chef_databag +short_description: Fetches data from a Chef Databag +description: + - 'This is a lookup plugin to provide access to chef data bags using the pychef package. It interfaces with the chef server + API using the same methods to find a knife or chef-client config file to load parameters from, starting from either the + given base path or the current working directory. The lookup order mirrors the one from Chef, all folders in the base + path are walked back looking for the following configuration file in order: C(.chef/knife.rb), C(~/.chef/knife.rb), C(/etc/chef/client.rb).' +requirements: + - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))" +options: + name: description: - - "This is a lookup plugin to provide access to chef data bags using the pychef package. - It interfaces with the chef server api using the same methods to find a knife or chef-client config file to load parameters from, - starting from either the given base path or the current working directory. - The lookup order mirrors the one from Chef, all folders in the base path are walked back looking for the following configuration - file in order : .chef/knife.rb, ~/.chef/knife.rb, /etc/chef/client.rb" - requirements: - - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))" - options: - name: - description: - - Name of the databag - type: string - required: true - item: - description: - - Item to fetch - type: string - required: true -''' - -EXAMPLES = """ - - ansible.builtin.debug: - msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}" + - Name of the databag. + type: string + required: true + item: + description: + - Item to fetch. + type: string + required: true """ -RETURN = """ - _raw: - description: - - The value from the databag. - type: list - elements: dict +EXAMPLES = r""" +- ansible.builtin.debug: + msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}" +""" + +RETURN = r""" +_raw: + description: + - The value from the databag. + type: list + elements: dict """ from ansible.errors import AnsibleError diff --git a/plugins/lookup/collection_version.py b/plugins/lookup/collection_version.py index 28a9c34420..142c516df5 100644 --- a/plugins/lookup/collection_version.py +++ b/plugins/lookup/collection_version.py @@ -5,18 +5,17 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = """ +DOCUMENTATION = r""" name: collection_version author: Felix Fontein (@felixfontein) version_added: "4.0.0" short_description: Retrieves the version of an installed collection description: - - This lookup allows to query the version of an installed collection, and to determine whether a - collection is installed at all. - - By default it returns V(none) for non-existing collections and V(*) for collections without a - version number. The latter should only happen in development environments, or when installing - a collection from git which has no version in its C(galaxy.yml). This behavior can be adjusted - by providing other values with O(result_not_found) and O(result_no_version). + - This lookup allows to query the version of an installed collection, and to determine whether a collection is installed + at all. + - By default it returns V(none) for non-existing collections and V(*) for collections without a version number. The latter + should only happen in development environments, or when installing a collection from git which has no version in its C(galaxy.yml). + This behavior can be adjusted by providing other values with O(result_not_found) and O(result_no_version). options: _terms: description: @@ -34,30 +33,27 @@ options: result_no_version: description: - The value to return when the collection has no version number. - - This can happen for collections installed from git which do not have a version number - in C(galaxy.yml). + - This can happen for collections installed from git which do not have a version number in C(galaxy.yml). - By default, V(*) is returned. type: string default: '*' """ -EXAMPLES = """ +EXAMPLES = r""" - name: Check version of community.general ansible.builtin.debug: msg: "community.general version {{ lookup('community.general.collection_version', 'community.general') }}" """ -RETURN = """ - _raw: - description: - - The version number of the collections listed as input. - - If a collection can not be found, it will return the value provided in O(result_not_found). - By default, this is V(none). - - If a collection can be found, but the version not identified, it will return the value provided in - O(result_no_version). By default, this is V(*). This can happen for collections installed - from git which do not have a version number in V(galaxy.yml). - type: list - elements: str +RETURN = r""" +_raw: + description: + - The version number of the collections listed as input. + - If a collection can not be found, it returns the value provided in O(result_not_found). By default, this is V(none). + - If a collection can be found, but the version not identified, it returns the value provided in O(result_no_version). + By default, this is V(*). This can happen for collections installed from git which do not have a version number in V(galaxy.yml). + type: list + elements: str """ import json diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index cf7226d579..f57b3da891 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -7,109 +7,109 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: consul_kv - short_description: Fetch metadata from a Consul key value store. +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: consul_kv +short_description: Fetch metadata from a Consul key value store +description: + - Lookup metadata for a playbook from the key value store in a Consul cluster. Values can be easily set in the kv store + with simple rest commands. + - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata). +requirements: + - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)' +options: + _raw: + description: List of key(s) to retrieve. + type: list + elements: string + recurse: + type: boolean + description: If V(true), retrieves all the values that have the given key as prefix. + default: false + index: description: - - Lookup metadata for a playbook from the key value store in a Consul cluster. - Values can be easily set in the kv store with simple rest commands - - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata) - requirements: - - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)' - options: - _raw: - description: List of key(s) to retrieve. - type: list - elements: string - recurse: - type: boolean - description: If true, will retrieve all the values that have the given key as prefix. - default: false - index: - description: - - If the key has a value with the specified index then this is returned allowing access to historical values. - type: int - datacenter: - description: - - Retrieve the key from a consul datacenter other than the default for the consul host. - type: str - token: - description: The acl token to allow access to restricted values. - type: str - host: - default: localhost - type: str - description: - - The target to connect to, must be a resolvable address. - - Will be determined from E(ANSIBLE_CONSUL_URL) if that is set. - ini: - - section: lookup_consul - key: host - port: - description: - - The port of the target host to connect to. - - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there. - type: int - default: 8500 - scheme: - default: http - type: str - description: - - Whether to use http or https. - - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there. - validate_certs: - default: true - description: Whether to verify the TLS connection or not. - type: bool - env: - - name: ANSIBLE_CONSUL_VALIDATE_CERTS - ini: - - section: lookup_consul - key: validate_certs - client_cert: - description: The client cert to verify the TLS connection. - type: str - env: - - name: ANSIBLE_CONSUL_CLIENT_CERT - ini: - - section: lookup_consul - key: client_cert - url: - description: - - The target to connect to. - - "Should look like this: V(https://my.consul.server:8500)." - type: str - version_added: 1.0.0 - env: - - name: ANSIBLE_CONSUL_URL - ini: - - section: lookup_consul - key: url -''' - -EXAMPLES = """ - - ansible.builtin.debug: - msg: 'key contains {{item}}' - with_community.general.consul_kv: - - 'key/to/retrieve' - - - name: Parameters can be provided after the key be more specific about what to retrieve - ansible.builtin.debug: - msg: 'key contains {{item}}' - with_community.general.consul_kv: - - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98' - - - name: retrieving a KV from a remote cluster on non default port - ansible.builtin.debug: - msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}" + - If the key has a value with the specified index then this is returned allowing access to historical values. + type: int + datacenter: + description: + - Retrieve the key from a consul datacenter other than the default for the consul host. + type: str + token: + description: The acl token to allow access to restricted values. + type: str + host: + default: localhost + type: str + description: + - The target to connect to, must be a resolvable address. + - It is determined from E(ANSIBLE_CONSUL_URL) if that is set. + ini: + - section: lookup_consul + key: host + port: + description: + - The port of the target host to connect to. + - If you use E(ANSIBLE_CONSUL_URL) this value is used from there. + type: int + default: 8500 + scheme: + default: http + type: str + description: + - Whether to use http or https. + - If you use E(ANSIBLE_CONSUL_URL) this value is used from there. + validate_certs: + default: true + description: Whether to verify the TLS connection or not. + type: bool + env: + - name: ANSIBLE_CONSUL_VALIDATE_CERTS + ini: + - section: lookup_consul + key: validate_certs + client_cert: + description: The client cert to verify the TLS connection. + type: str + env: + - name: ANSIBLE_CONSUL_CLIENT_CERT + ini: + - section: lookup_consul + key: client_cert + url: + description: + - The target to connect to. + - 'Should look like this: V(https://my.consul.server:8500).' + type: str + version_added: 1.0.0 + env: + - name: ANSIBLE_CONSUL_URL + ini: + - section: lookup_consul + key: url """ -RETURN = """ - _raw: - description: - - Value(s) stored in consul. - type: dict +EXAMPLES = r""" +- ansible.builtin.debug: + msg: 'key contains {{item}}' + with_community.general.consul_kv: + - 'key/to/retrieve' + +- name: Parameters can be provided after the key be more specific about what to retrieve + ansible.builtin.debug: + msg: 'key contains {{item}}' + with_community.general.consul_kv: + - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98' + +- name: retrieving a KV from a remote cluster on non default port + ansible.builtin.debug: + msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}" +""" + +RETURN = r""" +_raw: + description: + - Value(s) stored in consul. + type: dict """ from ansible.module_utils.six.moves.urllib.parse import urlparse diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index 0700a5ddcb..a170b13d03 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -6,54 +6,54 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: credstash - short_description: retrieve secrets from Credstash on AWS - requirements: - - credstash (python library) - description: - - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash" - options: - _terms: - description: term or list of terms to lookup in the credit store - type: list - elements: string - required: true - table: - description: name of the credstash table to query - type: str - default: 'credential-store' - version: - description: Credstash version - type: str - default: '' - region: - description: AWS region - type: str - profile_name: - description: AWS profile to use for authentication - type: str - env: - - name: AWS_PROFILE - aws_access_key_id: - description: AWS access key ID - type: str - env: - - name: AWS_ACCESS_KEY_ID - aws_secret_access_key: - description: AWS access key - type: str - env: - - name: AWS_SECRET_ACCESS_KEY - aws_session_token: - description: AWS session token - type: str - env: - - name: AWS_SESSION_TOKEN -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: credstash +short_description: Retrieve secrets from Credstash on AWS +requirements: + - credstash (python library) +description: + - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash." +options: + _terms: + description: Term or list of terms to lookup in the credit store. + type: list + elements: string + required: true + table: + description: Name of the credstash table to query. + type: str + default: 'credential-store' + version: + description: Credstash version. + type: str + default: '' + region: + description: AWS region. + type: str + profile_name: + description: AWS profile to use for authentication. + type: str + env: + - name: AWS_PROFILE + aws_access_key_id: + description: AWS access key ID. + type: str + env: + - name: AWS_ACCESS_KEY_ID + aws_secret_access_key: + description: AWS access key. + type: str + env: + - name: AWS_SECRET_ACCESS_KEY + aws_session_token: + description: AWS session token. + type: str + env: + - name: AWS_SESSION_TOKEN +""" -EXAMPLES = """ +EXAMPLES = r""" - name: first use credstash to store your secrets ansible.builtin.shell: credstash put my-github-password secure123 @@ -77,20 +77,20 @@ EXAMPLES = """ environment: production tasks: - - name: "Test credstash lookup plugin -- get the password with a context passed as a variable" - ansible.builtin.debug: - msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}" + - name: "Test credstash lookup plugin -- get the password with a context passed as a variable" + ansible.builtin.debug: + msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}" - - name: "Test credstash lookup plugin -- get the password with a context defined here" - ansible.builtin.debug: - msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}" + - name: "Test credstash lookup plugin -- get the password with a context defined here" + ansible.builtin.debug: + msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}" """ -RETURN = """ - _raw: - description: - - Value(s) stored in Credstash. - type: str +RETURN = r""" +_raw: + description: + - Value(s) stored in Credstash. + type: str """ from ansible.errors import AnsibleError diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index 4ed040dc6d..63834dce9b 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -6,62 +6,64 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cyberarkpassword - short_description: get secrets from CyberArk AIM - requirements: - - CyberArk AIM tool installed +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cyberarkpassword +short_description: Get secrets from CyberArk AIM +requirements: + - CyberArk AIM tool installed +description: + - Get secrets from CyberArk AIM. +options: + _command: + description: Cyberark CLI utility. + type: string + env: + - name: AIM_CLIPASSWORDSDK_CMD + default: '/opt/CARKaim/sdk/clipasswordsdk' + appid: + description: Defines the unique ID of the application that is issuing the password request. + type: string + required: true + query: + description: Describes the filter criteria for the password retrieval. + type: string + required: true + output: description: - - Get secrets from CyberArk AIM. - options : - _command: - description: Cyberark CLI utility. - type: string - env: - - name: AIM_CLIPASSWORDSDK_CMD - default: '/opt/CARKaim/sdk/clipasswordsdk' - appid: - description: Defines the unique ID of the application that is issuing the password request. - type: string - required: true - query: - description: Describes the filter criteria for the password retrieval. - type: string - required: true - output: - description: - - Specifies the desired output fields separated by commas. - - "They could be: Password, PassProps., PasswordChangeInProcess" - type: string - default: 'password' - _extra: - description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide" - notes: - - For Ansible on Windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe. -''' - -EXAMPLES = """ - - name: passing options to the lookup - ansible.builtin.debug: - msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}' - vars: - cyquery: - appid: "app_ansible" - query: "safe=CyberArk_Passwords;folder=root;object=AdminPass" - output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess" - - - - name: used in a loop - ansible.builtin.debug: - msg: "{{item}}" - with_community.general.cyberarkpassword: - appid: 'app_ansible' - query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass' - output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess' + - Specifies the desired output fields separated by commas. + - 'They could be: Password, PassProps., PasswordChangeInProcess.' + type: string + default: 'password' + _extra: + description: For extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and + ASCP Implementation Guide". +notes: + - For Ansible on Windows, please change the -parameters (C(-p), C(-d), and C(-o)) to /parameters (C(/p), C(/d), and C(/o)) + and change the location of C(CLIPasswordSDK.exe). """ -RETURN = """ +EXAMPLES = r""" +- name: passing options to the lookup + ansible.builtin.debug: + msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}' + vars: + cyquery: + appid: "app_ansible" + query: "safe=CyberArk_Passwords;folder=root;object=AdminPass" + output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess" + + +- name: used in a loop + ansible.builtin.debug: + msg: "{{item}}" + with_community.general.cyberarkpassword: + appid: 'app_ansible' + query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass' + output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess' +""" + +RETURN = r""" _result: description: A list containing one dictionary. type: list @@ -69,12 +71,12 @@ _result: contains: password: description: - - The actual value stored + - The actual value stored. passprops: - description: properties assigned to the entry + description: Properties assigned to the entry. type: dictionary passwordchangeinprocess: - description: did the password change? + description: Did the password change? """ import os diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index 1ec4369b32..18d2a000d9 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -6,31 +6,30 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = """ +DOCUMENTATION = r""" name: dependent short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables author: Felix Fontein (@felixfontein) version_added: 3.1.0 description: - - "Takes the input lists and returns a list with elements that are lists, dictionaries, - or template expressions which evaluate to lists or dicts, composed of the elements of - the input evaluated lists and dictionaries." + - Takes the input lists and returns a list with elements that are lists, dictionaries, or template expressions which evaluate + to lists or dicts, composed of the elements of the input evaluated lists and dictionaries. options: _terms: description: - - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. - The name is the index that is used in the result object. The value is iterated over as described below. + - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. The name + is the index that is used in the result object. The value is iterated over as described below. - If the value is a list, it is simply iterated over. - - If the value is a dictionary, it is iterated over and returned as if they would be processed by the - P(ansible.builtin.dict2items#filter) filter. - - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen - elements with C(item.). The result must be a list or a dictionary. + - If the value is a dictionary, it is iterated over and returned as if they would be processed by the P(ansible.builtin.dict2items#filter) + filter. + - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen elements with + C(item.). The result must be a list or a dictionary. type: list elements: dict required: true """ -EXAMPLES = """ +EXAMPLES = r""" - name: Install/remove public keys for active admin users ansible.posix.authorized_key: user: "{{ item.admin.key }}" @@ -76,9 +75,9 @@ EXAMPLES = """ loop_control: # Makes the output readable, so that it doesn't contain the whole subdictionaries and lists label: |- - {{ [item.zone.key, item.prefix.key, item.entry.key, - item.entry.value.ttl | default(3600), - item.entry.value.absent | default(False), item.entry.value.value] }} + {{ [item.zone.key, item.prefix.key, item.entry.key, + item.entry.value.ttl | default(3600), + item.entry.value.absent | default(False), item.entry.value.value] }} with_community.general.dependent: - zone: dns_setup - prefix: item.zone.value @@ -89,51 +88,55 @@ EXAMPLES = """ '': A: value: - - 1.2.3.4 + - 1.2.3.4 AAAA: value: - - "2a01:1:2:3::1" + - "2a01:1:2:3::1" 'test._domainkey': TXT: ttl: 300 value: - - '"k=rsa; t=s; p=MIGfMA..."' + - '"k=rsa; t=s; p=MIGfMA..."' example.org: 'www': A: value: - - 1.2.3.4 - - 5.6.7.8 + - 1.2.3.4 + - 5.6.7.8 """ -RETURN = """ - _list: - description: - - A list composed of dictionaries whose keys are the variable names from the input list. - type: list - elements: dict - sample: - - key1: a - key2: test - - key1: a - key2: foo - - key1: b - key2: bar +RETURN = r""" +_list: + description: + - A list composed of dictionaries whose keys are the variable names from the input list. + type: list + elements: dict + sample: + - key1: a + key2: test + - key1: a + key2: foo + - key1: b + key2: bar """ from ansible.errors import AnsibleLookupError from ansible.module_utils.common._collections_compat import Mapping, Sequence from ansible.module_utils.six import string_types from ansible.plugins.lookup import LookupBase -from ansible.release import __version__ as ansible_version from ansible.template import Templar -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +try: + from ansible.template import trust_as_template as _trust_as_template + HAS_DATATAGGING = True +except ImportError: + HAS_DATATAGGING = False -# Whether Templar has a cache, which can be controlled by Templar.template()'s cache option. -# The cache was removed for ansible-core 2.14 (https://github.com/ansible/ansible/pull/78419) -_TEMPLAR_HAS_TEMPLATE_CACHE = LooseVersion(ansible_version) < LooseVersion('2.14.0') +def _make_safe(value): + if HAS_DATATAGGING and isinstance(value, str): + return _trust_as_template(value) + return value class LookupModule(LookupBase): @@ -144,10 +147,11 @@ class LookupModule(LookupBase): ``variables`` are the variables to use. """ templar.available_variables = variables or {} - expression = "{0}{1}{2}".format("{{", expression, "}}") - if _TEMPLAR_HAS_TEMPLATE_CACHE: - return templar.template(expression, cache=False) - return templar.template(expression) + quoted_expression = "{0}{1}{2}".format("{{", expression, "}}") + if hasattr(templar, 'evaluate_expression'): + # This is available since the Data Tagging PR has been merged + return templar.evaluate_expression(_make_safe(expression)) + return templar.template(quoted_expression) def __process(self, result, terms, index, current, templar, variables): """Fills ``result`` list with evaluated items. @@ -193,7 +197,10 @@ class LookupModule(LookupBase): result = [] if len(terms) > 0: - templar = Templar(loader=self._templar._loader) + if HAS_DATATAGGING: + templar = self._templar.copy_with_new_env(available_variables={}) + else: + templar = Templar(loader=self._templar._loader) data = [] vars_so_far = set() for index, term in enumerate(terms): diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index cbb597b7b5..07fc287d71 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -6,89 +6,113 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: dig - author: Jan-Piet Mens (@jpmens) - short_description: query DNS using the dnspython library - requirements: - - dnspython (python library, http://www.dnspython.org/) +DOCUMENTATION = r""" +name: dig +author: Jan-Piet Mens (@jpmens) +short_description: Query DNS using the dnspython library +requirements: + - dnspython (python library, http://www.dnspython.org/) +description: + - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain + name). It is possible to lookup any DNS record in this manner. + - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name. + It is also possible to explicitly specify the DNS server(s) to use for lookups. + - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN. + - In addition to (default) A record, it is also possible to specify a different record type that should be queried. This + can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to + the FQDN being queried. + - If multiple values are associated with the requested record, the results are returned as a comma-separated list. In + such cases you may want to pass option C(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup), + which results in the record values being returned as a list over which you can iterate later on. + - By default, the lookup relies on system-wide configured DNS servers for performing the query. It is also possible to + explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. This needs to + be passed-in as an additional parameter to the lookup. +options: + _terms: + description: Domain(s) to query. + type: list + elements: str + qtype: description: - - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name). - It is possible to lookup any DNS record in this manner. - - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name. - It is also possible to explicitly specify the DNS server(s) to use for lookups. - - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN - - In addition to (default) A record, it is also possible to specify a different record type that should be queried. - This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried. - - If multiple values are associated with the requested record, the results will be returned as a comma-separated list. - In such cases you may want to pass option C(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup), - which will result in the record values being returned as a list over which you can iterate later on. - - By default, the lookup will rely on system-wide configured DNS servers for performing the query. - It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. - This needs to be passed-in as an additional parameter to the lookup - options: - _terms: - description: Domain(s) to query. - type: list - elements: str - qtype: - description: - - Record type to query. - - V(DLV) has been removed in community.general 6.0.0. - - V(CAA) has been added in community.general 6.3.0. - type: str - default: 'A' - choices: [A, ALL, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT] - flat: - description: If 0 each record is returned as a dictionary, otherwise a string. - type: int - default: 1 - retry_servfail: - description: Retry a nameserver if it returns SERVFAIL. - default: false - type: bool - version_added: 3.6.0 - fail_on_error: - description: - - Abort execution on lookup errors. - - The default for this option will likely change to V(true) in the future. - The current default, V(false), is used for backwards compatibility, and will result in empty strings - or the string V(NXDOMAIN) in the result in case of errors. - default: false - type: bool - version_added: 5.4.0 - real_empty: - description: - - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN). - - The default for this option will likely change to V(true) in the future. - - This option will be forced to V(true) if multiple domains to be queried are specified. - default: false - type: bool - version_added: 6.0.0 - class: - description: - - "Class." - type: str - default: 'IN' - tcp: - description: Use TCP to lookup DNS records. - default: false - type: bool - version_added: 7.5.0 - port: - description: Use port as target port when looking up DNS records. - default: 53 - type: int - version_added: 9.5.0 - notes: - - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary. - - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary. - - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. - Syntax for specifying the record type is shown in the examples below. - - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake. -''' + - Record type to query. + - V(DLV) has been removed in community.general 6.0.0. + - V(CAA) has been added in community.general 6.3.0. + type: str + default: 'A' + choices: + - A + - ALL + - AAAA + - CAA + - CNAME + - DNAME + - DNSKEY + - DS + - HINFO + - LOC + - MX + - NAPTR + - NS + - NSEC3PARAM + - PTR + - RP + - RRSIG + - SOA + - SPF + - SRV + - SSHFP + - TLSA + - TXT + flat: + description: If 0 each record is returned as a dictionary, otherwise a string. + type: int + default: 1 + retry_servfail: + description: Retry a nameserver if it returns SERVFAIL. + default: false + type: bool + version_added: 3.6.0 + fail_on_error: + description: + - Abort execution on lookup errors. + - The default for this option is likely to change to V(true) in the future. The current default, V(false), is used for + backwards compatibility, and results in empty strings or the string V(NXDOMAIN) in the result in case of errors. + default: false + type: bool + version_added: 5.4.0 + real_empty: + description: + - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN). + - The default for this option is likely to change to V(true) in the future. + - This option is forced to V(true) if multiple domains to be queried are specified. + default: false + type: bool + version_added: 6.0.0 + class: + description: + - Class. + type: str + default: 'IN' + tcp: + description: Use TCP to lookup DNS records. + default: false + type: bool + version_added: 7.5.0 + port: + description: Use port as target port when looking up DNS records. + default: 53 + type: int + version_added: 9.5.0 +notes: + - V(ALL) is not a record in itself, merely the listed fields are available for any record results you retrieve in the form + of a dictionary. + - While the plugin supports anything which C(dnspython) supports out of the box, only a subset can be converted into a dictionary. + - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. Syntax for specifying + the record type is shown in the examples below. + - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake. +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Simple A record (IPV4 address) lookup for example.com ansible.builtin.debug: msg: "{{ lookup('community.general.dig', 'example.com.')}}" @@ -139,83 +163,83 @@ EXAMPLES = """ msg: "{{ lookup('community.general.dig', 'example.org./A', retry_servfail=true) }}" """ -RETURN = """ - _list: - description: - - List of composed strings or dictionaries with key and value - If a dictionary, fields shows the keys returned depending on query type - type: list - elements: raw - contains: - ALL: - description: - - owner, ttl, type - A: - description: - - address - AAAA: - description: - - address - CAA: - description: - - flags - - tag - - value - version_added: 6.3.0 - CNAME: - description: - - target - DNAME: - description: - - target - DNSKEY: - description: - - flags, algorithm, protocol, key - DS: - description: - - algorithm, digest_type, key_tag, digest - HINFO: - description: - - cpu, os - LOC: - description: - - latitude, longitude, altitude, size, horizontal_precision, vertical_precision - MX: - description: - - preference, exchange - NAPTR: - description: - - order, preference, flags, service, regexp, replacement - NS: - description: - - target - NSEC3PARAM: - description: - - algorithm, flags, iterations, salt - PTR: - description: - - target - RP: - description: - - mbox, txt - SOA: - description: - - mname, rname, serial, refresh, retry, expire, minimum - SPF: - description: - - strings - SRV: - description: - - priority, weight, port, target - SSHFP: - description: - - algorithm, fp_type, fingerprint - TLSA: - description: - - usage, selector, mtype, cert - TXT: - description: - - strings +RETURN = r""" +_list: + description: + - List of composed strings or of dictionaries, with fields depending + on query type. + type: list + elements: raw + contains: + ALL: + description: + - C(owner), C(ttl), C(type). + A: + description: + - C(address). + AAAA: + description: + - C(address). + CAA: + description: + - C(flags). + - C(tag). + - C(value). + version_added: 6.3.0 + CNAME: + description: + - C(target). + DNAME: + description: + - C(target). + DNSKEY: + description: + - C(flags), C(algorithm), C(protocol), C(key). + DS: + description: + - C(algorithm), C(digest_type), C(key_tag), C(digest). + HINFO: + description: + - C(cpu), C(os). + LOC: + description: + - C(latitude), C(longitude), C(altitude), C(size), C(horizontal_precision), C(vertical_precision). + MX: + description: + - C(preference), C(exchange). + NAPTR: + description: + - C(order), C(preference), C(flags), C(service), C(regexp), C(replacement). + NS: + description: + - C(target). + NSEC3PARAM: + description: + - C(algorithm), C(flags), C(iterations), C(salt). + PTR: + description: + - C(target). + RP: + description: + - C(mbox), C(txt). + SOA: + description: + - C(mname), C(rname), C(serial), C(refresh), C(retry), C(expire), C(minimum). + SPF: + description: + - C(strings). + SRV: + description: + - C(priority), C(weight), C(port), C(target). + SSHFP: + description: + - C(algorithm), C(fp_type), C(fingerprint). + TLSA: + description: + - C(usage), C(selector), C(mtype), C(cert). + TXT: + description: + - C(strings). """ from ansible.errors import AnsibleError diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index baaa63aa98..fb0a5d5138 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -6,30 +6,30 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: dnstxt - author: Jan-Piet Mens (@jpmens) - short_description: query a domain(s)'s DNS txt fields - requirements: - - dns/dns.resolver (python library) +DOCUMENTATION = r""" +name: dnstxt +author: Jan-Piet Mens (@jpmens) +short_description: Query a domain(s)'s DNS txt fields +requirements: + - dns/dns.resolver (python library) +description: + - Uses a python library to return the DNS TXT record for a domain. +options: + _terms: + description: Domain or list of domains to query TXT records from. + required: true + type: list + elements: string + real_empty: description: - - Uses a python library to return the DNS TXT record for a domain. - options: - _terms: - description: domain or list of domains to query TXT records from - required: true - type: list - elements: string - real_empty: - description: - - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN). - - The default for this option will likely change to V(true) in the future. - default: false - type: bool - version_added: 6.0.0 -''' + - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN). + - The default for this option is likely to change to V(true) in the future. + default: false + type: bool + version_added: 6.0.0 +""" -EXAMPLES = """ +EXAMPLES = r""" - name: show txt entry ansible.builtin.debug: msg: "{{lookup('community.general.dnstxt', ['test.example.com'])}}" @@ -48,11 +48,11 @@ EXAMPLES = """ with_community.general.dnstxt: "{{lookup('community.general.dnstxt', ['test.example.com']).split(',')}}" """ -RETURN = """ - _list: - description: - - values returned by the DNS TXT record. - type: list +RETURN = r""" +_list: + description: + - Values returned by the DNS TXT record. + type: list """ HAVE_DNS = False diff --git a/plugins/lookup/dsv.py b/plugins/lookup/dsv.py index eba3e36368..0b34b3ce31 100644 --- a/plugins/lookup/dsv.py +++ b/plugins/lookup/dsv.py @@ -12,81 +12,78 @@ author: Adam Migus (@amigus) short_description: Get secrets from Thycotic DevOps Secrets Vault version_added: 1.0.0 description: - - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a - DSV O(tenant) using a O(client_id) and O(client_secret). + - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a DSV O(tenant) using a O(client_id) and O(client_secret). requirements: - - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/ + - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/ options: - _terms: - description: The path to the secret, for example V(/staging/servers/web1). - required: true - tenant: - description: The first format parameter in the default O(url_template). - type: string - env: - - name: DSV_TENANT - ini: - - section: dsv_lookup - key: tenant - required: true - tld: - default: com - description: The top-level domain of the tenant; the second format - parameter in the default O(url_template). - type: string - env: - - name: DSV_TLD - ini: - - section: dsv_lookup - key: tld - required: false - client_id: - description: The client_id with which to request the Access Grant. - type: string - env: - - name: DSV_CLIENT_ID - ini: - - section: dsv_lookup - key: client_id - required: true - client_secret: - description: The client secret associated with the specific O(client_id). - type: string - env: - - name: DSV_CLIENT_SECRET - ini: - - section: dsv_lookup - key: client_secret - required: true - url_template: - default: https://{}.secretsvaultcloud.{}/v1 - description: The path to prepend to the base URL to form a valid REST - API request. - type: string - env: - - name: DSV_URL_TEMPLATE - ini: - - section: dsv_lookup - key: url_template - required: false + _terms: + description: The path to the secret, for example V(/staging/servers/web1). + required: true + tenant: + description: The first format parameter in the default O(url_template). + type: string + env: + - name: DSV_TENANT + ini: + - section: dsv_lookup + key: tenant + required: true + tld: + default: com + description: The top-level domain of the tenant; the second format parameter in the default O(url_template). + type: string + env: + - name: DSV_TLD + ini: + - section: dsv_lookup + key: tld + required: false + client_id: + description: The client_id with which to request the Access Grant. + type: string + env: + - name: DSV_CLIENT_ID + ini: + - section: dsv_lookup + key: client_id + required: true + client_secret: + description: The client secret associated with the specific O(client_id). + type: string + env: + - name: DSV_CLIENT_SECRET + ini: + - section: dsv_lookup + key: client_secret + required: true + url_template: + default: https://{}.secretsvaultcloud.{}/v1 + description: The path to prepend to the base URL to form a valid REST API request. + type: string + env: + - name: DSV_URL_TEMPLATE + ini: + - section: dsv_lookup + key: url_template + required: false """ RETURN = r""" _list: - description: - - One or more JSON responses to C(GET /secrets/{path}). - - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret). - type: list - elements: dict + description: + - One or more JSON responses to C(GET /secrets/{path}). + - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret). + type: list + elements: dict """ EXAMPLES = r""" - hosts: localhost vars: - secret: "{{ lookup('community.general.dsv', '/test/secret') }}" + secret: "{{ lookup('community.general.dsv', '/test/secret') }}" tasks: - - ansible.builtin.debug: - msg: 'the password is {{ secret["data"]["password"] }}' + - ansible.builtin.debug: + msg: 'the password is {{ secret["data"]["password"] }}' """ from ansible.errors import AnsibleError, AnsibleOptionsError diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index 1e7dc3c960..d8d992e79f 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -8,46 +8,46 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: - - Jan-Piet Mens (@jpmens) - name: etcd - short_description: get info from an etcd server +DOCUMENTATION = r""" +author: + - Jan-Piet Mens (@jpmens) +name: etcd +short_description: Get info from an etcd server +description: + - Retrieves data from an etcd server. +options: + _terms: description: - - Retrieves data from an etcd server - options: - _terms: - description: - - the list of keys to lookup on the etcd server - type: list - elements: string - required: true - url: - description: - - Environment variable with the URL for the etcd server - type: string - default: 'http://127.0.0.1:4001' - env: - - name: ANSIBLE_ETCD_URL - version: - description: - - Environment variable with the etcd protocol version - type: string - default: 'v1' - env: - - name: ANSIBLE_ETCD_VERSION - validate_certs: - description: - - toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs. - default: true - type: boolean - seealso: - - module: community.general.etcd3 - - plugin: community.general.etcd3 - plugin_type: lookup -''' + - The list of keys to lookup on the etcd server. + type: list + elements: string + required: true + url: + description: + - Environment variable with the URL for the etcd server. + type: string + default: 'http://127.0.0.1:4001' + env: + - name: ANSIBLE_ETCD_URL + version: + description: + - Environment variable with the etcd protocol version. + type: string + default: 'v1' + env: + - name: ANSIBLE_ETCD_VERSION + validate_certs: + description: + - Toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs. + default: true + type: boolean +seealso: + - module: community.general.etcd3 + - plugin: community.general.etcd3 + plugin_type: lookup +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "a value from a locally running etcd" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd', 'foo/bar') }}" @@ -59,15 +59,15 @@ EXAMPLES = ''' - name: "you can set server options inline" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}" -''' +""" -RETURN = ''' - _raw: - description: - - List of values associated with input keys. - type: list - elements: string -''' +RETURN = r""" +_raw: + description: + - List of values associated with input keys. + type: list + elements: string +""" import json diff --git a/plugins/lookup/etcd3.py b/plugins/lookup/etcd3.py index c67e975b97..2af1e9052b 100644 --- a/plugins/lookup/etcd3.py +++ b/plugins/lookup/etcd3.py @@ -7,101 +7,101 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' - author: - - Eric Belhomme (@eric-belhomme) - version_added: '0.2.0' - name: etcd3 - short_description: Get key values from etcd3 server +DOCUMENTATION = r""" +author: + - Eric Belhomme (@eric-belhomme) +version_added: '0.2.0' +name: etcd3 +short_description: Get key values from etcd3 server +description: + - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API. + - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some E(ETCDCTL_*) environment + variables. + - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview. +options: + _terms: description: - - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API. - - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some C(ETCDCTL_*) environment variables. - - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview. + - The list of keys (or key prefixes) to look up on the etcd3 server. + type: list + elements: str + required: true + prefix: + description: + - Look for key or prefix key. + type: bool + default: false + endpoints: + description: + - Counterpart of E(ETCDCTL_ENDPOINTS) environment variable. Specify the etcd3 connection with an URL form, for example + V(https://hostname:2379), or V(:) form. + - The V(host) part is overwritten by O(host) option, if defined. + - The V(port) part is overwritten by O(port) option, if defined. + env: + - name: ETCDCTL_ENDPOINTS + default: '127.0.0.1:2379' + type: str + host: + description: + - Etcd3 listening client host. + - Takes precedence over O(endpoints). + type: str + port: + description: + - Etcd3 listening client port. + - Takes precedence over O(endpoints). + type: int + ca_cert: + description: + - Etcd3 CA authority. + env: + - name: ETCDCTL_CACERT + type: str + cert_cert: + description: + - Etcd3 client certificate. + env: + - name: ETCDCTL_CERT + type: str + cert_key: + description: + - Etcd3 client private key. + env: + - name: ETCDCTL_KEY + type: str + timeout: + description: + - Client timeout. + default: 60 + env: + - name: ETCDCTL_DIAL_TIMEOUT + type: int + user: + description: + - Authenticated user name. + env: + - name: ETCDCTL_USER + type: str + password: + description: + - Authenticated user password. + env: + - name: ETCDCTL_PASSWORD + type: str - options: - _terms: - description: - - The list of keys (or key prefixes) to look up on the etcd3 server. - type: list - elements: str - required: true - prefix: - description: - - Look for key or prefix key. - type: bool - default: false - endpoints: - description: - - Counterpart of E(ETCDCTL_ENDPOINTS) environment variable. - Specify the etcd3 connection with and URL form, for example V(https://hostname:2379), or V(:) form. - - The V(host) part is overwritten by O(host) option, if defined. - - The V(port) part is overwritten by O(port) option, if defined. - env: - - name: ETCDCTL_ENDPOINTS - default: '127.0.0.1:2379' - type: str - host: - description: - - etcd3 listening client host. - - Takes precedence over O(endpoints). - type: str - port: - description: - - etcd3 listening client port. - - Takes precedence over O(endpoints). - type: int - ca_cert: - description: - - etcd3 CA authority. - env: - - name: ETCDCTL_CACERT - type: str - cert_cert: - description: - - etcd3 client certificate. - env: - - name: ETCDCTL_CERT - type: str - cert_key: - description: - - etcd3 client private key. - env: - - name: ETCDCTL_KEY - type: str - timeout: - description: - - Client timeout. - default: 60 - env: - - name: ETCDCTL_DIAL_TIMEOUT - type: int - user: - description: - - Authenticated user name. - env: - - name: ETCDCTL_USER - type: str - password: - description: - - Authenticated user password. - env: - - name: ETCDCTL_PASSWORD - type: str +notes: + - O(host) and O(port) options take precedence over (endpoints) option. + - The recommended way to connect to etcd3 server is using E(ETCDCTL_ENDPOINT) environment variable and keep O(endpoints), + O(host), and O(port) unused. +seealso: + - module: community.general.etcd3 + - plugin: community.general.etcd + plugin_type: lookup - notes: - - O(host) and O(port) options take precedence over (endpoints) option. - - The recommended way to connect to etcd3 server is using E(ETCDCTL_ENDPOINT) - environment variable and keep O(endpoints), O(host), and O(port) unused. - seealso: - - module: community.general.etcd3 - - plugin: community.general.etcd - plugin_type: lookup +requirements: + - "etcd3 >= 0.10" +""" - requirements: - - "etcd3 >= 0.10" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: "a value from a locally running etcd" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}" @@ -117,22 +117,22 @@ EXAMPLES = ''' - name: "connect to etcd3 with a client certificate" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}" -''' +""" -RETURN = ''' - _raw: - description: - - List of keys and associated values. - type: list - elements: dict - contains: - key: - description: The element's key. - type: str - value: - description: The element's value. - type: str -''' +RETURN = r""" +_raw: + description: + - List of keys and associated values. + type: list + elements: dict + contains: + key: + description: The element's key. + type: str + value: + description: The element's value. + type: str +""" import re diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index 3036e152c2..24e0c20eea 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -6,22 +6,23 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" name: filetree author: Dag Wieers (@dagwieers) -short_description: recursively match all files in a directory tree +short_description: Recursively match all files in a directory tree description: -- This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership. -- Supports directories, files and symlinks, including SELinux and other file properties. -- If you provide more than one path, it will implement a first_found logic, and will not process entries it already processed in previous paths. - This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role. + - This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership. + - Supports directories, files and symlinks, including SELinux and other file properties. + - If you provide more than one path, it implements a first_found logic, and does not process entries it already processed + in previous paths. This enables merging different trees in order of importance, or add role_vars to specific paths to + influence different instances of the same role. options: _terms: description: Path(s) of files to read. required: true type: list elements: string -''' +""" EXAMPLES = r""" - name: Create directories @@ -59,61 +60,61 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: List of dictionaries with file information. - type: list - elements: dict - contains: - src: - description: - - Full path to file. - - Not returned when RV(_raw[].state) is set to V(directory). - type: path - root: - description: Allows filtering by original location. - type: path - path: - description: Contains the relative path to root. - type: path - mode: - description: The permissions the resulting file or directory. - type: str - state: - description: TODO - type: str - owner: - description: Name of the user that owns the file/directory. - type: raw - group: - description: Name of the group that owns the file/directory. - type: raw - seuser: - description: The user part of the SELinux file context. - type: raw - serole: - description: The role part of the SELinux file context. - type: raw - setype: - description: The type part of the SELinux file context. - type: raw - selevel: - description: The level part of the SELinux file context. - type: raw - uid: - description: Owner ID of the file/directory. - type: int - gid: - description: Group ID of the file/directory. - type: int - size: - description: Size of the target. - type: int - mtime: - description: Time of last modification. - type: float - ctime: - description: Time of last metadata update or creation (depends on OS). - type: float +_raw: + description: List of dictionaries with file information. + type: list + elements: dict + contains: + src: + description: + - Full path to file. + - Not returned when RV(_raw[].state) is set to V(directory). + type: path + root: + description: Allows filtering by original location. + type: path + path: + description: Contains the relative path to root. + type: path + mode: + description: The permissions the resulting file or directory. + type: str + state: + description: TODO. + type: str + owner: + description: Name of the user that owns the file/directory. + type: raw + group: + description: Name of the group that owns the file/directory. + type: raw + seuser: + description: The user part of the SELinux file context. + type: raw + serole: + description: The role part of the SELinux file context. + type: raw + setype: + description: The type part of the SELinux file context. + type: raw + selevel: + description: The level part of the SELinux file context. + type: raw + uid: + description: Owner ID of the file/directory. + type: int + gid: + description: Group ID of the file/directory. + type: int + size: + description: Size of the target. + type: int + mtime: + description: Time of last modification. + type: float + ctime: + description: Time of last metadata update or creation (depends on OS). + type: float """ import os import pwd diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py index 5365f2ca99..de4a21fbdd 100644 --- a/plugins/lookup/flattened.py +++ b/plugins/lookup/flattened.py @@ -6,35 +6,35 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: flattened - author: Serge van Ginderachter (!UNKNOWN) - short_description: return single list completely flattened - description: - - Given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left. - options: - _terms: - description: lists to flatten - type: list - elements: raw - required: true - notes: - - Unlike the P(ansible.builtin.items#lookup) lookup which only flattens 1 level, - this plugin will continue to flatten until it cannot find lists anymore. - - Aka highlander plugin, there can only be one (list). -''' +DOCUMENTATION = r""" +name: flattened +author: Serge van Ginderachter (!UNKNOWN) +short_description: Return single list completely flattened +description: + - Given one or more lists, this lookup flattens any list elements found recursively until only 1 list is left. +options: + _terms: + description: Lists to flatten. + type: list + elements: raw + required: true +notes: + - Unlike the P(ansible.builtin.items#lookup) lookup which only flattens 1 level, this plugin continues to flatten until + it cannot find lists anymore. + - Aka highlander plugin, there can only be one (list). +""" -EXAMPLES = """ +EXAMPLES = r""" - name: "'unnest' all elements into single list" ansible.builtin.debug: msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], ['a','b','c'], [[5,6,1,3], [34,'a','b','c']])}}" """ -RETURN = """ - _raw: - description: - - flattened list - type: list +RETURN = r""" +_raw: + description: + - Flattened list. + type: list """ from ansible.errors import AnsibleError from ansible.module_utils.six import string_types @@ -67,12 +67,7 @@ class LookupModule(LookupBase): if isinstance(term, string_types): # convert a variable to a list - try: - term2 = listify_lookup_plugin_terms(term, templar=self._templar) - except TypeError: - # The loader argument is deprecated in ansible-core 2.14+. Fall back to - # pre-2.14 behavior for older ansible-core versions. - term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader) + term2 = listify_lookup_plugin_terms(term, templar=self._templar) # but avoid converting a plain string to a list of one string if term2 != [term]: term = term2 diff --git a/plugins/lookup/github_app_access_token.py b/plugins/lookup/github_app_access_token.py index 73fd09a0a9..dbc8cde3b5 100644 --- a/plugins/lookup/github_app_access_token.py +++ b/plugins/lookup/github_app_access_token.py @@ -5,49 +5,49 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: github_app_access_token - author: - - Poh Wei Sheng (@weisheng-p) - short_description: Obtain short-lived Github App Access tokens - version_added: '8.2.0' - requirements: - - jwt (https://github.com/GehirnInc/python-jwt) +DOCUMENTATION = r""" +name: github_app_access_token +author: + - Poh Wei Sheng (@weisheng-p) +short_description: Obtain short-lived Github App Access tokens +version_added: '8.2.0' +requirements: + - jwt (https://github.com/GehirnInc/python-jwt) +description: + - This generates a Github access token that can be used with a C(git) command, if you use a Github App. +options: + key_path: description: - - This generates a Github access token that can be used with a C(git) command, if you use a Github App. - options: - key_path: - description: - - Path to your private key. - - Either O(key_path) or O(private_key) must be specified. - type: path - app_id: - description: - - Your GitHub App ID, you can find this in the Settings page. - required: true - type: str - installation_id: - description: - - The installation ID that contains the git repository you would like access to. - - As of 2023-12-24, this can be found via Settings page > Integrations > Application. The last part of the URL in the - configure button is the installation ID. - - Alternatively, you can use PyGithub (U(https://github.com/PyGithub/PyGithub)) to get your installation ID. - required: true - type: str - private_key: - description: - - GitHub App private key in PEM file format as string. - - Either O(key_path) or O(private_key) must be specified. - type: str - version_added: 10.0.0 - token_expiry: - description: - - How long the token should last for in seconds. - default: 600 - type: int -''' + - Path to your private key. + - Either O(key_path) or O(private_key) must be specified. + type: path + app_id: + description: + - Your GitHub App ID, you can find this in the Settings page. + required: true + type: str + installation_id: + description: + - The installation ID that contains the git repository you would like access to. + - As of 2023-12-24, this can be found at Settings page > Integrations > Application. The last part of the URL in the + configure button is the installation ID. + - Alternatively, you can use PyGithub (U(https://github.com/PyGithub/PyGithub)) to get your installation ID. + required: true + type: str + private_key: + description: + - GitHub App private key in PEM file format as string. + - Either O(key_path) or O(private_key) must be specified. + type: str + version_added: 10.0.0 + token_expiry: + description: + - How long the token should last for in seconds. + default: 600 + type: int +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get access token to be used for git checkout with app_id=123456, installation_id=64209 ansible.builtin.git: repo: >- @@ -57,14 +57,14 @@ EXAMPLES = ''' github_token: >- {{ lookup('community.general.github_app_access_token', key_path='/home/to_your/key', app_id='123456', installation_id='64209') }} -''' +""" -RETURN = ''' - _raw: - description: A one-element list containing your GitHub access token. - type: list - elements: str -''' +RETURN = r""" +_raw: + description: A one-element list containing your GitHub access token. + type: list + elements: str +""" try: diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index 8463a8844e..27f133d78a 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -6,40 +6,40 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - author: - - Juan Manuel Parrilla (@jparrill) - name: hiera - short_description: get info from hiera data - requirements: - - hiera (command line utility) +DOCUMENTATION = r""" +author: + - Juan Manuel Parrilla (@jparrill) +name: hiera +short_description: Get info from hiera data +requirements: + - hiera (command line utility) +description: + - Retrieves data from an Puppetmaster node using Hiera as ENC. +options: + _terms: description: - - Retrieves data from an Puppetmaster node using Hiera as ENC. - options: - _terms: - description: - - The list of keys to lookup on the Puppetmaster. - type: list - elements: string - required: true - executable: - description: - - Binary file to execute Hiera. - type: string - default: '/usr/bin/hiera' - env: - - name: ANSIBLE_HIERA_BIN - config_file: - description: - - File that describes the hierarchy of Hiera. - type: string - default: '/etc/hiera.yaml' - env: - - name: ANSIBLE_HIERA_CFG + - The list of keys to lookup on the Puppetmaster. + type: list + elements: string + required: true + executable: + description: + - Binary file to execute Hiera. + type: string + default: '/usr/bin/hiera' + env: + - name: ANSIBLE_HIERA_BIN + config_file: + description: + - File that describes the hierarchy of Hiera. + type: string + default: '/etc/hiera.yaml' + env: + - name: ANSIBLE_HIERA_CFG # FIXME: incomplete options .. _terms? environment/fqdn? -''' +""" -EXAMPLES = """ +EXAMPLES = r""" # All this examples depends on hiera.yml that describes the hierarchy - name: "a value from Hiera 'DB'" @@ -55,12 +55,12 @@ EXAMPLES = """ msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}" """ -RETURN = """ - _raw: - description: - - a value associated with input key - type: list - elements: str +RETURN = r""" +_raw: + description: + - A value associated with input key. + type: list + elements: str """ from ansible.plugins.lookup import LookupBase diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py index ebc35a8ee1..75d808e736 100644 --- a/plugins/lookup/keyring.py +++ b/plugins/lookup/keyring.py @@ -7,18 +7,18 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: keyring - author: - - Samuel Boucher (!UNKNOWN) - requirements: - - keyring (python library) - short_description: grab secrets from the OS keyring - description: - - Allows you to access data stored in the OS provided keyring/keychain. -''' +DOCUMENTATION = r""" +name: keyring +author: + - Samuel Boucher (!UNKNOWN) +requirements: + - keyring (python library) +short_description: Grab secrets from the OS keyring +description: + - Allows you to access data stored in the OS provided keyring/keychain. +""" -EXAMPLES = """ +EXAMPLES = r""" - name: output secrets to screen (BAD IDEA) ansible.builtin.debug: msg: "Password: {{item}}" @@ -31,11 +31,11 @@ EXAMPLES = """ login_user: joe """ -RETURN = """ - _raw: - description: Secrets stored. - type: list - elements: str +RETURN = r""" +_raw: + description: Secrets stored. + type: list + elements: str """ HAS_KEYRING = True diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index 70ef8d1414..2633848937 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -6,39 +6,39 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: lastpass - author: - - Andrew Zenk (!UNKNOWN) - requirements: - - lpass (command line utility) - - must have already logged into LastPass - short_description: fetch data from LastPass - description: - - Use the lpass command line utility to fetch specific fields from LastPass. - options: - _terms: - description: Key from which you want to retrieve the field. - required: true - type: list - elements: str - field: - description: Field to return from LastPass. - default: 'password' - type: str -''' +DOCUMENTATION = r""" +name: lastpass +author: + - Andrew Zenk (!UNKNOWN) +requirements: + - lpass (command line utility) + - must have already logged into LastPass +short_description: Fetch data from LastPass +description: + - Use the lpass command line utility to fetch specific fields from LastPass. +options: + _terms: + description: Key from which you want to retrieve the field. + required: true + type: list + elements: str + field: + description: Field to return from LastPass. + default: 'password' + type: str +""" -EXAMPLES = """ +EXAMPLES = r""" - name: get 'custom_field' from LastPass entry 'entry-name' ansible.builtin.debug: msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}" """ -RETURN = """ - _raw: - description: secrets stored - type: list - elements: str +RETURN = r""" +_raw: + description: Secrets stored. + type: list + elements: str """ from subprocess import Popen, PIPE diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index c09321d081..b3728abb17 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -6,30 +6,30 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: lmdb_kv - author: - - Jan-Piet Mens (@jpmens) - version_added: '0.2.0' - short_description: fetch data from LMDB - description: - - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it. - requirements: - - lmdb (Python library U(https://lmdb.readthedocs.io/en/release/)) - options: - _terms: - description: List of keys to query. - type: list - elements: str - db: - description: Path to LMDB database. - type: str - default: 'ansible.mdb' - vars: - - name: lmdb_kv_db -''' +DOCUMENTATION = r""" +name: lmdb_kv +author: + - Jan-Piet Mens (@jpmens) +version_added: '0.2.0' +short_description: Fetch data from LMDB +description: + - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it. +requirements: + - lmdb (Python library U(https://lmdb.readthedocs.io/en/release/)) +options: + _terms: + description: List of keys to query. + type: list + elements: str + db: + description: Path to LMDB database. + type: str + default: 'ansible.mdb' + vars: + - name: lmdb_kv_db +""" -EXAMPLES = """ +EXAMPLES = r""" - name: query LMDB for a list of country codes ansible.builtin.debug: msg: "{{ query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}" @@ -40,7 +40,7 @@ EXAMPLES = """ vars: - lmdb_kv_db: jp.mdb with_community.general.lmdb_kv: - - "n*" + - "n*" - name: get an item by key ansible.builtin.assert: @@ -52,9 +52,9 @@ EXAMPLES = """ - be """ -RETURN = """ +RETURN = r""" _raw: - description: value(s) stored in LMDB + description: Value(s) stored in LMDB. type: list elements: raw """ diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py deleted file mode 100644 index 9dbd2e118f..0000000000 --- a/plugins/lookup/manifold.py +++ /dev/null @@ -1,278 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2018, Arigato Machine Inc. -# Copyright (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: - - Kyrylo Galanov (!UNKNOWN) - name: manifold - short_description: get credentials from Manifold.co - description: - - Retrieves resources' credentials from Manifold.co - options: - _terms: - description: - - Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all - matched resources will be returned. - type: list - elements: string - required: false - api_token: - description: - - manifold API token - type: string - required: true - env: - - name: MANIFOLD_API_TOKEN - project: - description: - - The project label you want to get the resource for. - type: string - required: false - team: - description: - - The team label you want to get the resource for. - type: string - required: false -''' - -EXAMPLES = ''' - - name: all available resources - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}" - - name: all available resources for a specific project in specific team - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}" - - name: two specific resources - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}" -''' - -RETURN = ''' - _raw: - description: - - dictionary of credentials ready to be consumed as environment variables. If multiple resources define - the same environment variable(s), the last one returned by the Manifold API will take precedence. - type: dict -''' -from ansible.errors import AnsibleError -from ansible.plugins.lookup import LookupBase -from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError -from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils import six -from ansible.utils.display import Display -from traceback import format_exception -import json -import sys - -display = Display() - - -class ApiError(Exception): - pass - - -class ManifoldApiClient(object): - http_agent = 'python-manifold-ansible-1.0.0' - - def __init__(self, token): - self._token = token - - def _make_url(self, api, endpoint): - return f'https://api.{api}.manifold.co/v1/{endpoint}' - - def request(self, api, endpoint, *args, **kwargs): - """ - Send a request to API backend and pre-process a response. - :param api: API to send a request to - :type api: str - :param endpoint: API endpoint to fetch data from - :type endpoint: str - :param args: other args for open_url - :param kwargs: other kwargs for open_url - :return: server response. JSON response is automatically deserialized. - :rtype: dict | list | str - """ - - default_headers = { - 'Authorization': f"Bearer {self._token}", - 'Accept': "*/*" # Otherwise server doesn't set content-type header - } - - url = self._make_url(api, endpoint) - - headers = default_headers - arg_headers = kwargs.pop('headers', None) - if arg_headers: - headers.update(arg_headers) - - try: - display.vvvv(f'manifold lookup connecting to {url}') - response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs) - data = response.read() - if response.headers.get('content-type') == 'application/json': - data = json.loads(data) - return data - except ValueError: - raise ApiError(f'JSON response can\'t be parsed while requesting {url}:\n{data}') - except HTTPError as e: - raise ApiError(f'Server returned: {e} while requesting {url}:\n{e.read()}') - except URLError as e: - raise ApiError(f'Failed lookup url for {url} : {e}') - except SSLValidationError as e: - raise ApiError(f'Error validating the server\'s certificate for {url}: {e}') - except ConnectionError as e: - raise ApiError(f'Error connecting to {url}: {e}') - - def get_resources(self, team_id=None, project_id=None, label=None): - """ - Get resources list - :param team_id: ID of the Team to filter resources by - :type team_id: str - :param project_id: ID of the project to filter resources by - :type project_id: str - :param label: filter resources by a label, returns a list with one or zero elements - :type label: str - :return: list of resources - :rtype: list - """ - api = 'marketplace' - endpoint = 'resources' - query_params = {} - - if team_id: - query_params['team_id'] = team_id - if project_id: - query_params['project_id'] = project_id - if label: - query_params['label'] = label - - if query_params: - endpoint += f"?{urlencode(query_params)}" - - return self.request(api, endpoint) - - def get_teams(self, label=None): - """ - Get teams list - :param label: filter teams by a label, returns a list with one or zero elements - :type label: str - :return: list of teams - :rtype: list - """ - api = 'identity' - endpoint = 'teams' - data = self.request(api, endpoint) - # Label filtering is not supported by API, however this function provides uniform interface - if label: - data = list(filter(lambda x: x['body']['label'] == label, data)) - return data - - def get_projects(self, label=None): - """ - Get projects list - :param label: filter projects by a label, returns a list with one or zero elements - :type label: str - :return: list of projects - :rtype: list - """ - api = 'marketplace' - endpoint = 'projects' - query_params = {} - - if label: - query_params['label'] = label - - if query_params: - endpoint += f"?{urlencode(query_params)}" - - return self.request(api, endpoint) - - def get_credentials(self, resource_id): - """ - Get resource credentials - :param resource_id: ID of the resource to filter credentials by - :type resource_id: str - :return: - """ - api = 'marketplace' - endpoint = f"credentials?{urlencode({'resource_id': resource_id})}" - return self.request(api, endpoint) - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - """ - :param terms: a list of resources lookups to run. - :param variables: ansible variables active at the time of the lookup - :param api_token: API token - :param project: optional project label - :param team: optional team label - :return: a dictionary of resources credentials - """ - - self.set_options(var_options=variables, direct=kwargs) - - api_token = self.get_option('api_token') - project = self.get_option('project') - team = self.get_option('team') - - try: - labels = terms - client = ManifoldApiClient(api_token) - - if team: - team_data = client.get_teams(team) - if len(team_data) == 0: - raise AnsibleError(f"Team '{team}' does not exist") - team_id = team_data[0]['id'] - else: - team_id = None - - if project: - project_data = client.get_projects(project) - if len(project_data) == 0: - raise AnsibleError(f"Project '{project}' does not exist") - project_id = project_data[0]['id'] - else: - project_id = None - - if len(labels) == 1: # Use server-side filtering if one resource is requested - resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0]) - else: # Get all resources and optionally filter labels - resources_data = client.get_resources(team_id=team_id, project_id=project_id) - if labels: - resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data)) - - if labels and len(resources_data) < len(labels): - fetched_labels = [r['body']['label'] for r in resources_data] - not_found_labels = [label for label in labels if label not in fetched_labels] - raise AnsibleError(f"Resource(s) {', '.join(not_found_labels)} do not exist") - - credentials = {} - cred_map = {} - for resource in resources_data: - resource_credentials = client.get_credentials(resource['id']) - if len(resource_credentials) and resource_credentials[0]['body']['values']: - for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']): - label = resource['body']['label'] - if cred_key in credentials: - display.warning(f"'{cred_key}' with label '{cred_map[cred_key]}' was replaced by resource data with label '{label}'") - credentials[cred_key] = cred_val - cred_map[cred_key] = label - - ret = [credentials] - return ret - except ApiError as e: - raise AnsibleError(f'API Error: {e}') - except AnsibleError as e: - raise e - except Exception: - exc_type, exc_value, exc_traceback = sys.exc_info() - raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback)) diff --git a/plugins/lookup/merge_variables.py b/plugins/lookup/merge_variables.py index e352524292..ffe76c3ea0 100644 --- a/plugins/lookup/merge_variables.py +++ b/plugins/lookup/merge_variables.py @@ -6,72 +6,71 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = """ - author: - - Roy Lenferink (@rlenferink) - - Mark Ettema (@m-a-r-k-e) - - Alexander Petrenz (@alpex8) - name: merge_variables - short_description: merge variables whose names match a given pattern +DOCUMENTATION = r""" +author: + - Roy Lenferink (@rlenferink) + - Mark Ettema (@m-a-r-k-e) + - Alexander Petrenz (@alpex8) +name: merge_variables +short_description: Merge variables whose names match a given pattern +description: + - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or regular expressions, + optionally. +version_added: 6.5.0 +options: + _terms: description: - - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or - regular expressions, optionally. - version_added: 6.5.0 - options: - _terms: - description: - - Depending on the value of O(pattern_type), this is a list of prefixes, suffixes, or regular expressions - that will be used to match all variables that should be merged. - required: true - type: list - elements: str - pattern_type: - description: - - Change the way of searching for the specified pattern. - type: str - default: 'regex' - choices: - - prefix - - suffix - - regex - env: - - name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE - ini: - - section: merge_variables_lookup - key: pattern_type - initial_value: - description: - - An initial value to start with. - type: raw - override: - description: - - Return an error, print a warning or ignore it when a key will be overwritten. - - The default behavior V(error) makes the plugin fail when a key would be overwritten. - - When V(warn) and V(ignore) are used, note that it is important to know that the variables - are sorted by name before being merged. Keys for later variables in this order will overwrite - keys of the same name for variables earlier in this order. To avoid potential confusion, - better use O(override=error) whenever possible. - type: str - default: 'error' - choices: - - error - - warn - - ignore - env: - - name: ANSIBLE_MERGE_VARIABLES_OVERRIDE - ini: - - section: merge_variables_lookup - key: override - groups: - description: - - Search for variables accross hosts that belong to the given groups. This allows to collect configuration pieces - accross different hosts (for example a service on a host with its database on another host). - type: list - elements: str - version_added: 8.5.0 + - Depending on the value of O(pattern_type), this is a list of prefixes, suffixes, or regular expressions that is used + to match all variables that should be merged. + required: true + type: list + elements: str + pattern_type: + description: + - Change the way of searching for the specified pattern. + type: str + default: 'regex' + choices: + - prefix + - suffix + - regex + env: + - name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE + ini: + - section: merge_variables_lookup + key: pattern_type + initial_value: + description: + - An initial value to start with. + type: raw + override: + description: + - Return an error, print a warning or ignore it when a key is overwritten. + - The default behavior V(error) makes the plugin fail when a key would be overwritten. + - When V(warn) and V(ignore) are used, note that it is important to know that the variables are sorted by name before + being merged. Keys for later variables in this order overwrite keys of the same name for variables earlier in this + order. To avoid potential confusion, better use O(override=error) whenever possible. + type: str + default: 'error' + choices: + - error + - warn + - ignore + env: + - name: ANSIBLE_MERGE_VARIABLES_OVERRIDE + ini: + - section: merge_variables_lookup + key: override + groups: + description: + - Search for variables across hosts that belong to the given groups. This allows to collect configuration pieces across + different hosts (for example a service on a host with its database on another host). + type: list + elements: str + version_added: 8.5.0 """ -EXAMPLES = """ +EXAMPLES = r""" # Some example variables, they can be defined anywhere as long as they are in scope test_init_list: - "list init item 1" @@ -91,7 +90,6 @@ testb__test_dict: ports: - 3 - # Merge variables that end with '__test_dict' and store the result in a variable 'example_a' example_a: "{{ lookup('community.general.merge_variables', '__test_dict', pattern_type='suffix') }}" @@ -100,7 +98,6 @@ example_a: "{{ lookup('community.general.merge_variables', '__test_dict', patter # - 1 # - 3 - # Merge variables that match the '^.+__test_list$' regular expression, starting with an initial value and store the # result in a variable 'example_b' example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', initial_value=test_init_list) }}" @@ -112,12 +109,11 @@ example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', in # - "test b item 1" """ -RETURN = """ - _raw: - description: In case the search matches list items, a list will be returned. In case the search matches dicts, a - dict will be returned. - type: raw - elements: raw +RETURN = r""" +_raw: + description: In case the search matches list items, a list is returned. In case the search matches dicts, a dict is returned. + type: raw + elements: raw """ import re diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index 60e0b2a69c..3dc589eaaf 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -8,39 +8,39 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: onepassword - author: - - Scott Buchanan (@scottsb) - - Andrew Zenk (@azenk) - - Sam Doran (@samdoran) - short_description: Fetch field values from 1Password - description: - - P(community.general.onepassword#lookup) wraps the C(op) command line utility to fetch specific field values from 1Password. - requirements: - - C(op) 1Password command line utility - options: - _terms: - description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. - required: true - type: list - elements: string - account_id: - version_added: 7.5.0 - domain: - version_added: 3.2.0 - field: - description: Field to return from each matching item (case-insensitive). - default: 'password' - type: str - service_account_token: - version_added: 7.1.0 - extends_documentation_fragment: - - community.general.onepassword - - community.general.onepassword.lookup -''' +DOCUMENTATION = r""" +name: onepassword +author: + - Scott Buchanan (@scottsb) + - Andrew Zenk (@azenk) + - Sam Doran (@samdoran) +short_description: Fetch field values from 1Password +description: + - P(community.general.onepassword#lookup) wraps the C(op) command line utility to fetch specific field values from 1Password. +requirements: + - C(op) 1Password command line utility +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + account_id: + version_added: 7.5.0 + domain: + version_added: 3.2.0 + field: + description: Field to return from each matching item (case-insensitive). + default: 'password' + type: str + service_account_token: + version_added: 7.1.0 +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" -EXAMPLES = """ +EXAMPLES = r""" # These examples only work when already signed in to 1Password - name: Retrieve password for KITT when already signed in to 1Password ansible.builtin.debug: @@ -56,32 +56,24 @@ EXAMPLES = """ - name: Retrieve password for HAL when not signed in to 1Password ansible.builtin.debug: - var: lookup('community.general.onepassword', - 'HAL 9000', - subdomain='Discovery', - master_password=vault_master_password) + var: lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password) - name: Retrieve password for HAL when never signed in to 1Password ansible.builtin.debug: - var: lookup('community.general.onepassword', - 'HAL 9000', - subdomain='Discovery', - master_password=vault_master_password, - username='tweety@acme.com', - secret_key=vault_secret_key) + var: >- + lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password, + username='tweety@acme.com', secret_key=vault_secret_key) - name: Retrieve password from specific account ansible.builtin.debug: - var: lookup('community.general.onepassword', - 'HAL 9000', - account_id='abc123') + var: lookup('community.general.onepassword', 'HAL 9000', account_id='abc123') """ -RETURN = """ - _raw: - description: Field data requested. - type: list - elements: str +RETURN = r""" +_raw: + description: Field data requested. + type: list + elements: str """ import abc @@ -553,9 +545,7 @@ class OnePassCLIv2(OnePassCLIBase): environment_update = {"OP_SECRET_KEY": self.secret_key} return self._run(args, command_input=to_bytes(self.master_password), environment_update=environment_update) - def get_raw(self, item_id, vault=None, token=None): - args = ["item", "get", item_id, "--format", "json"] - + def _add_parameters_and_run(self, args, vault=None, token=None): if self.account_id: args.extend(["--account", self.account_id]) @@ -582,6 +572,10 @@ class OnePassCLIv2(OnePassCLIBase): return self._run(args) + def get_raw(self, item_id, vault=None, token=None): + args = ["item", "get", item_id, "--format", "json"] + return self._add_parameters_and_run(args, vault=vault, token=token) + def signin(self): self._check_required_params(['master_password']) diff --git a/plugins/lookup/onepassword_doc.py b/plugins/lookup/onepassword_doc.py index b1728fce89..82ca790a31 100644 --- a/plugins/lookup/onepassword_doc.py +++ b/plugins/lookup/onepassword_doc.py @@ -6,68 +6,53 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: onepassword_doc - author: - - Sam Doran (@samdoran) - requirements: - - C(op) 1Password command line utility version 2 or later. - short_description: Fetch documents stored in 1Password - version_added: "8.1.0" - description: - - P(community.general.onepassword_doc#lookup) wraps C(op) command line utility to fetch one or more documents from 1Password. - notes: - - The document contents are a string exactly as stored in 1Password. - - This plugin requires C(op) version 2 or later. +DOCUMENTATION = r""" +name: onepassword_doc +author: + - Sam Doran (@samdoran) +requirements: + - C(op) 1Password command line utility version 2 or later. +short_description: Fetch documents stored in 1Password +version_added: "8.1.0" +description: + - P(community.general.onepassword_doc#lookup) wraps C(op) command line utility to fetch one or more documents from 1Password. +notes: + - The document contents are a string exactly as stored in 1Password. + - This plugin requires C(op) version 2 or later. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string - options: - _terms: - description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. - required: true - type: list - elements: string +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" - extends_documentation_fragment: - - community.general.onepassword - - community.general.onepassword.lookup -''' - -EXAMPLES = """ +EXAMPLES = r""" +--- - name: Retrieve a private key from 1Password ansible.builtin.debug: var: lookup('community.general.onepassword_doc', 'Private key') """ -RETURN = """ - _raw: - description: Requested document - type: list - elements: string +RETURN = r""" +_raw: + description: Requested document. + type: list + elements: string """ from ansible_collections.community.general.plugins.lookup.onepassword import OnePass, OnePassCLIv2 -from ansible.errors import AnsibleLookupError -from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.lookup import LookupBase class OnePassCLIv2Doc(OnePassCLIv2): def get_raw(self, item_id, vault=None, token=None): args = ["document", "get", item_id] - if vault is not None: - args = [*args, f"--vault={vault}"] - - if self.service_account_token: - if vault is None: - raise AnsibleLookupError("'vault' is required with 'service_account_token'") - - environment_update = {"OP_SERVICE_ACCOUNT_TOKEN": self.service_account_token} - return self._run(args, environment_update=environment_update) - - if token is not None: - args = [*args, to_bytes("--session=") + token] - - return self._run(args) + return self._add_parameters_and_run(args, vault=vault, token=token) class LookupModule(LookupBase): diff --git a/plugins/lookup/onepassword_raw.py b/plugins/lookup/onepassword_raw.py index dc3e590329..2d9829ec9d 100644 --- a/plugins/lookup/onepassword_raw.py +++ b/plugins/lookup/onepassword_raw.py @@ -8,35 +8,36 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: onepassword_raw - author: - - Scott Buchanan (@scottsb) - - Andrew Zenk (@azenk) - - Sam Doran (@samdoran) - requirements: - - C(op) 1Password command line utility - short_description: Fetch an entire item from 1Password - description: - - P(community.general.onepassword_raw#lookup) wraps C(op) command line utility to fetch an entire item from 1Password. - options: - _terms: - description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. - required: true - type: list - elements: string - account_id: - version_added: 7.5.0 - domain: - version_added: 6.0.0 - service_account_token: - version_added: 7.1.0 - extends_documentation_fragment: - - community.general.onepassword - - community.general.onepassword.lookup -''' +DOCUMENTATION = r""" +name: onepassword_raw +author: + - Scott Buchanan (@scottsb) + - Andrew Zenk (@azenk) + - Sam Doran (@samdoran) +requirements: + - C(op) 1Password command line utility +short_description: Fetch an entire item from 1Password +description: + - P(community.general.onepassword_raw#lookup) wraps C(op) command line utility to fetch an entire item from 1Password. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + account_id: + version_added: 7.5.0 + domain: + version_added: 6.0.0 + service_account_token: + version_added: 7.1.0 +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" -EXAMPLES = """ +EXAMPLES = r""" +--- - name: Retrieve all data about Wintermute ansible.builtin.debug: var: lookup('community.general.onepassword_raw', 'Wintermute') @@ -46,11 +47,11 @@ EXAMPLES = """ var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl') """ -RETURN = """ - _raw: - description: Entire item requested. - type: list - elements: dict +RETURN = r""" +_raw: + description: Entire item requested. + type: list + elements: dict """ import json diff --git a/plugins/lookup/onepassword_ssh_key.py b/plugins/lookup/onepassword_ssh_key.py new file mode 100644 index 0000000000..395de59f23 --- /dev/null +++ b/plugins/lookup/onepassword_ssh_key.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: onepassword_ssh_key +author: + - Mohammed Babelly (@mohammedbabelly20) +requirements: + - C(op) 1Password command line utility version 2 or later. +short_description: Fetch SSH keys stored in 1Password +version_added: "10.3.0" +description: + - P(community.general.onepassword_ssh_key#lookup) wraps C(op) command line utility to fetch SSH keys from 1Password. +notes: + - By default, it returns the private key value in PKCS#8 format, unless O(ssh_format=true) is passed. + - The pluging works only for C(SSHKEY) type items. + - This plugin requires C(op) version 2 or later. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + ssh_format: + description: Output key in SSH format if V(true). Otherwise, outputs in the default format (PKCS#8). + default: false + type: bool + +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" + +EXAMPLES = r""" +--- +- name: Retrieve the private SSH key from 1Password + ansible.builtin.debug: + msg: "{{ lookup('community.general.onepassword_ssh_key', 'SSH Key', ssh_format=true) }}" +""" + +RETURN = r""" +_raw: + description: Private key of SSH keypair. + type: list + elements: string +""" +import json + +from ansible_collections.community.general.plugins.lookup.onepassword import ( + OnePass, + OnePassCLIv2, +) +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + def get_ssh_key(self, out, item_id, ssh_format=False): + data = json.loads(out) + + if data.get("category") != "SSH_KEY": + raise AnsibleLookupError(f"Item {item_id} is not an SSH key") + + private_key_field = next( + ( + field + for field in data.get("fields", {}) + if field.get("id") == "private_key" and field.get("type") == "SSHKEY" + ), + None, + ) + if not private_key_field: + raise AnsibleLookupError(f"No private key found for item {item_id}.") + + if ssh_format: + return ( + private_key_field.get("ssh_formats", {}) + .get("openssh", {}) + .get("value", "") + ) + return private_key_field.get("value", "") + + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + + ssh_format = self.get_option("ssh_format") + vault = self.get_option("vault") + subdomain = self.get_option("subdomain") + domain = self.get_option("domain", "1password.com") + username = self.get_option("username") + secret_key = self.get_option("secret_key") + master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") + + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + cli_class=OnePassCLIv2, + ) + op.assert_logged_in() + + return [ + self.get_ssh_key(op.get_raw(term, vault), term, ssh_format=ssh_format) + for term in terms + ] diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 584690c175..8f87e87034 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -7,167 +7,168 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: passwordstore - author: - - Patrick Deelman (!UNKNOWN) - short_description: manage passwords with passwordstore.org's pass utility +DOCUMENTATION = r""" +name: passwordstore +author: + - Patrick Deelman (!UNKNOWN) +short_description: Manage passwords with passwordstore.org's pass utility +description: + - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. It can also retrieve, + create or update YAML style keys stored as multilines in the passwordfile. + - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to C(~/.gnupg/gpg-agent.conf). Where + this is not possible, consider using O(lock=readwrite) instead. +options: + _terms: + description: Query key. + required: true + directory: description: - - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. - It can also retrieve, create or update YAML style keys stored as multilines in the passwordfile. - - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to - C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using O(lock=readwrite) instead. - options: - _terms: - description: query key. - required: true - directory: - description: - - The directory of the password store. - - If O(backend=pass), the default is V(~/.password-store) is used. - - If O(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml), - falling back to V(~/.local/share/gopass/stores/root) if C(path) is not defined in the gopass config. - type: path - vars: - - name: passwordstore - env: - - name: PASSWORD_STORE_DIR - create: - description: Create the password or the subkey if it does not already exist. Takes precedence over O(missing). - type: bool - default: false - overwrite: - description: Overwrite the password or the subkey if it does already exist. - type: bool - default: false - umask: - description: - - Sets the umask for the created V(.gpg) files. The first octed must be greater than 3 (user readable). - - Note pass' default value is V('077'). - type: string - env: - - name: PASSWORD_STORE_UMASK - version_added: 1.3.0 - returnall: - description: Return all the content of the password, not only the first line. - type: bool - default: false - subkey: - description: - - By default return a specific subkey of the password. When set to V(password), always returns the first line. - - With O(overwrite=true), it will create the subkey and return it. - type: str - default: password - userpass: - description: Specify a password to save, instead of a generated one. - type: str - length: - description: The length of the generated password. - type: integer - default: 16 - backup: - description: Used with O(overwrite=true). Backup the previous password or subkey in a subkey. - type: bool - default: false - nosymbols: - description: Use alphanumeric characters. - type: bool - default: false - missing: - description: - - List of preference about what to do if the password file is missing. - - If O(create=true), the value for this option is ignored and assumed to be V(create). - - If set to V(error), the lookup will error out if the passname does not exist. - - If set to V(create), the passname will be created with the provided length O(length) if it does not exist. - - If set to V(empty) or V(warn), will return a V(none) in case the passname does not exist. - When using C(lookup) and not C(query), this will be translated to an empty string. - version_added: 3.1.0 - type: str - default: error - choices: - - error - - warn - - empty - - create - lock: - description: - - How to synchronize operations. - - The default of V(write) only synchronizes write operations. - - V(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel. - - V(none) does not do any synchronization. - ini: - - section: passwordstore_lookup - key: lock - type: str - default: write - choices: - - readwrite - - write - - none - version_added: 4.5.0 - locktimeout: - description: - - Lock timeout applied when O(lock) is not V(none). - - Time with a unit suffix, V(s), V(m), V(h) for seconds, minutes, and hours, respectively. For example, V(900s) equals V(15m). - - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details. - ini: - - section: passwordstore_lookup - key: locktimeout - type: str - default: 15m - version_added: 4.5.0 - backend: - description: - - Specify which backend to use. - - Defaults to V(pass), passwordstore.org's original pass utility. - - V(gopass) support is incomplete. - ini: - - section: passwordstore_lookup - key: backend - vars: - - name: passwordstore_backend - type: str - default: pass - choices: - - pass - - gopass - version_added: 5.2.0 - timestamp: - description: Add the password generation information to the end of the file. - type: bool - default: true - version_added: 8.1.0 - preserve: - description: Include the old (edited) password inside the pass file. - type: bool - default: true - version_added: 8.1.0 - missing_subkey: - description: - - Preference about what to do if the password subkey is missing. - - If set to V(error), the lookup will error out if the subkey does not exist. - - If set to V(empty) or V(warn), will return a V(none) in case the subkey does not exist. - version_added: 8.6.0 - type: str - default: empty - choices: - - error - - warn - - empty - ini: - - section: passwordstore_lookup - key: missing_subkey - notes: - - The lookup supports passing all options as lookup parameters since community.general 6.0.0. -''' -EXAMPLES = """ + - The directory of the password store. + - If O(backend=pass), the default is V(~/.password-store) is used. + - If O(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml), falling back to V(~/.local/share/gopass/stores/root) + if C(path) is not defined in the gopass config. + type: path + vars: + - name: passwordstore + env: + - name: PASSWORD_STORE_DIR + create: + description: Create the password or the subkey if it does not already exist. Takes precedence over O(missing). + type: bool + default: false + overwrite: + description: Overwrite the password or the subkey if it does already exist. + type: bool + default: false + umask: + description: + - Sets the umask for the created V(.gpg) files. The first octed must be greater than 3 (user readable). + - Note pass' default value is V('077'). + type: string + env: + - name: PASSWORD_STORE_UMASK + version_added: 1.3.0 + returnall: + description: Return all the content of the password, not only the first line. + type: bool + default: false + subkey: + description: + - By default return a specific subkey of the password. When set to V(password), always returns the first line. + - With O(overwrite=true), it creates the subkey and returns it. + type: str + default: password + userpass: + description: Specify a password to save, instead of a generated one. + type: str + length: + description: The length of the generated password. + type: integer + default: 16 + backup: + description: Used with O(overwrite=true). Backup the previous password or subkey in a subkey. + type: bool + default: false + nosymbols: + description: Use alphanumeric characters. + type: bool + default: false + missing: + description: + - List of preference about what to do if the password file is missing. + - If O(create=true), the value for this option is ignored and assumed to be V(create). + - If set to V(error), the lookup fails out if the passname does not exist. + - If set to V(create), the passname is created with the provided length O(length) if it does not exist. + - If set to V(empty) or V(warn), it returns a V(none) in case the passname does not exist. When using C(lookup) and + not C(query), this is translated to an empty string. + version_added: 3.1.0 + type: str + default: error + choices: + - error + - warn + - empty + - create + lock: + description: + - How to synchronize operations. + - The default of V(write) only synchronizes write operations. + - V(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel. + - V(none) does not do any synchronization. + ini: + - section: passwordstore_lookup + key: lock + type: str + default: write + choices: + - readwrite + - write + - none + version_added: 4.5.0 + locktimeout: + description: + - Lock timeout applied when O(lock) is not V(none). + - Time with a unit suffix, V(s), V(m), V(h) for seconds, minutes, and hours, respectively. For example, V(900s) equals + V(15m). + - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details. + ini: + - section: passwordstore_lookup + key: locktimeout + type: str + default: 15m + version_added: 4.5.0 + backend: + description: + - Specify which backend to use. + - Defaults to V(pass), passwordstore.org's original pass utility. + - V(gopass) support is incomplete. + ini: + - section: passwordstore_lookup + key: backend + vars: + - name: passwordstore_backend + type: str + default: pass + choices: + - pass + - gopass + version_added: 5.2.0 + timestamp: + description: Add the password generation information to the end of the file. + type: bool + default: true + version_added: 8.1.0 + preserve: + description: Include the old (edited) password inside the pass file. + type: bool + default: true + version_added: 8.1.0 + missing_subkey: + description: + - Preference about what to do if the password subkey is missing. + - If set to V(error), the lookup fails out if the subkey does not exist. + - If set to V(empty) or V(warn), it returns a V(none) in case the subkey does not exist. + version_added: 8.6.0 + type: str + default: empty + choices: + - error + - warn + - empty + ini: + - section: passwordstore_lookup + key: missing_subkey +notes: + - The lookup supports passing all options as lookup parameters since community.general 6.0.0. +""" +EXAMPLES = r""" ansible.cfg: | [passwordstore_lookup] lock=readwrite locktimeout=45s missing_subkey=warn -tasks.yml: | +tasks.yml: |- --- # Debug is used for examples, BAD IDEA to show passwords on screen @@ -233,10 +234,10 @@ tasks.yml: | passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test', returnall=true)}}" """ -RETURN = """ +RETURN = r""" _raw: description: - - a password + - A password. type: list elements: str """ @@ -572,16 +573,20 @@ class LookupModule(LookupBase): for term in terms: self.parse_params(term) # parse the input into paramvals with self.opt_lock('readwrite'): - if self.check_pass(): # password exists - if self.paramvals['overwrite']: + if self.check_pass(): # password file exists + if self.paramvals['overwrite']: # if "overwrite", always update password with self.opt_lock('write'): result.append(self.update_password()) - elif self.paramvals["subkey"] != "password" and not self.passdict.get(self.paramvals['subkey']): # password exists but not the subkey + elif ( + self.paramvals["subkey"] != "password" + and not self.passdict.get(self.paramvals["subkey"]) + and self.paramvals["missing"] == "create" + ): # target is a subkey, this subkey is not in passdict BUT missing == create with self.opt_lock('write'): result.append(self.update_password()) else: result.append(self.get_passresult()) - else: # password does not exist + else: # password does not exist if self.paramvals['missing'] == 'create': with self.opt_lock('write'): if self.locked == 'write' and self.check_pass(): # lookup password again if under write lock diff --git a/plugins/lookup/random_pet.py b/plugins/lookup/random_pet.py index 77f1c34a51..8f9b3cbd00 100644 --- a/plugins/lookup/random_pet.py +++ b/plugins/lookup/random_pet.py @@ -8,38 +8,38 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = r''' - name: random_pet - author: - - Abhijeet Kasurde (@Akasurde) - short_description: Generates random pet names - version_added: '3.1.0' - requirements: - - petname U(https://github.com/dustinkirkland/python-petname) +DOCUMENTATION = r""" +name: random_pet +author: + - Abhijeet Kasurde (@Akasurde) +short_description: Generates random pet names +version_added: '3.1.0' +requirements: + - petname U(https://github.com/dustinkirkland/python-petname) +description: + - Generates random pet names that can be used as unique identifiers for the resources. +options: + words: description: - - Generates random pet names that can be used as unique identifiers for the resources. - options: - words: - description: - - The number of words in the pet name. - default: 2 - type: int - length: - description: - - The maximal length of every component of the pet name. - - Values below 3 will be set to 3 by petname. - default: 6 - type: int - prefix: - description: A string to prefix with the name. - type: str - separator: - description: The character to separate words in the pet name. - default: "-" - type: str -''' + - The number of words in the pet name. + default: 2 + type: int + length: + description: + - The maximal length of every component of the pet name. + - Values below V(3) are set to V(3) by petname. + default: 6 + type: int + prefix: + description: A string to prefix with the name. + type: str + separator: + description: The character to separate words in the pet name. + default: "-" + type: str +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Generate pet name ansible.builtin.debug: var: lookup('community.general.random_pet') @@ -59,14 +59,14 @@ EXAMPLES = r''' ansible.builtin.debug: var: lookup('community.general.random_pet', length=7) # Example result: 'natural-peacock' -''' +""" -RETURN = r''' - _raw: - description: A one-element list containing a random pet name - type: list - elements: str -''' +RETURN = r""" +_raw: + description: A one-element list containing a random pet name. + type: list + elements: str +""" try: import petname diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py index 9b811dd8b3..4b227d3dca 100644 --- a/plugins/lookup/random_string.py +++ b/plugins/lookup/random_string.py @@ -9,95 +9,94 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = r""" - name: random_string - author: - - Abhijeet Kasurde (@Akasurde) - short_description: Generates random string - version_added: '3.2.0' +name: random_string +author: + - Abhijeet Kasurde (@Akasurde) +short_description: Generates random string +version_added: '3.2.0' +description: + - Generates random string based upon the given constraints. + - Uses L(random.SystemRandom,https://docs.python.org/3/library/random.html#random.SystemRandom), so should be strong enough + for cryptographic purposes. +options: + length: + description: The length of the string. + default: 8 + type: int + upper: description: - - Generates random string based upon the given constraints. - - Uses L(random.SystemRandom,https://docs.python.org/3/library/random.html#random.SystemRandom), - so should be strong enough for cryptographic purposes. - options: - length: - description: The length of the string. - default: 8 - type: int - upper: - description: - - Include uppercase letters in the string. - default: true - type: bool - lower: - description: - - Include lowercase letters in the string. - default: true - type: bool - numbers: - description: - - Include numbers in the string. - default: true - type: bool - special: - description: - - Include special characters in the string. - - Special characters are taken from Python standard library C(string). - See L(the documentation of string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation) - for which characters will be used. - - The choice of special characters can be changed to setting O(override_special). - default: true - type: bool - min_numeric: - description: - - Minimum number of numeric characters in the string. - - If set, overrides O(numbers=false). - default: 0 - type: int - min_upper: - description: - - Minimum number of uppercase alphabets in the string. - - If set, overrides O(upper=false). - default: 0 - type: int - min_lower: - description: - - Minimum number of lowercase alphabets in the string. - - If set, overrides O(lower=false). - default: 0 - type: int - min_special: - description: - - Minimum number of special character in the string. - default: 0 - type: int - override_special: - description: - - Override a list of special characters to use in the string. - - If set O(min_special) should be set to a non-default value. - type: str - override_all: - description: - - Override all values of O(numbers), O(upper), O(lower), and O(special) with - the given list of characters. - type: str - ignore_similar_chars: - description: - - Ignore similar characters, such as V(l) and V(1), or V(O) and V(0). - - These characters can be configured in O(similar_chars). - default: false - type: bool - version_added: 7.5.0 - similar_chars: - description: - - Override a list of characters not to be use in the string. - default: "il1LoO0" - type: str - version_added: 7.5.0 - base64: - description: - - Returns base64 encoded string. - type: bool - default: false + - Include uppercase letters in the string. + default: true + type: bool + lower: + description: + - Include lowercase letters in the string. + default: true + type: bool + numbers: + description: + - Include numbers in the string. + default: true + type: bool + special: + description: + - Include special characters in the string. + - Special characters are taken from Python standard library C(string). See L(the documentation of + string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation) + for which characters are used. + - The choice of special characters can be changed to setting O(override_special). + default: true + type: bool + min_numeric: + description: + - Minimum number of numeric characters in the string. + - If set, overrides O(numbers=false). + default: 0 + type: int + min_upper: + description: + - Minimum number of uppercase alphabets in the string. + - If set, overrides O(upper=false). + default: 0 + type: int + min_lower: + description: + - Minimum number of lowercase alphabets in the string. + - If set, overrides O(lower=false). + default: 0 + type: int + min_special: + description: + - Minimum number of special character in the string. + default: 0 + type: int + override_special: + description: + - Override a list of special characters to use in the string. + - If set O(min_special) should be set to a non-default value. + type: str + override_all: + description: + - Override all values of O(numbers), O(upper), O(lower), and O(special) with the given list of characters. + type: str + ignore_similar_chars: + description: + - Ignore similar characters, such as V(l) and V(1), or V(O) and V(0). + - These characters can be configured in O(similar_chars). + default: false + type: bool + version_added: 7.5.0 + similar_chars: + description: + - Override a list of characters not to be use in the string. + default: "il1LoO0" + type: str + version_added: 7.5.0 + base64: + description: + - Returns base64 encoded string. + type: bool + default: false """ EXAMPLES = r""" @@ -142,10 +141,10 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: A one-element list containing a random string - type: list - elements: str +_raw: + description: A one-element list containing a random string. + type: list + elements: str """ import base64 diff --git a/plugins/lookup/random_words.py b/plugins/lookup/random_words.py index a4aa1b3178..247871dba0 100644 --- a/plugins/lookup/random_words.py +++ b/plugins/lookup/random_words.py @@ -10,44 +10,43 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = r""" - name: random_words - author: - - Thomas Sjögren (@konstruktoid) - short_description: Return a number of random words - version_added: "4.0.0" - requirements: - - xkcdpass U(https://github.com/redacted/XKCD-password-generator) +name: random_words +author: + - Thomas Sjögren (@konstruktoid) +short_description: Return a number of random words +version_added: "4.0.0" +requirements: + - xkcdpass U(https://github.com/redacted/XKCD-password-generator) +description: + - Returns a number of random words. The output can for example be used for passwords. + - See U(https://xkcd.com/936/) for background. +options: + numwords: description: - - Returns a number of random words. The output can for example be used for - passwords. - - See U(https://xkcd.com/936/) for background. - options: - numwords: - description: - - The number of words. - default: 6 - type: int - min_length: - description: - - Minimum length of words to make password. - default: 5 - type: int - max_length: - description: - - Maximum length of words to make password. - default: 9 - type: int - delimiter: - description: - - The delimiter character between words. - default: " " - type: str - case: - description: - - The method for setting the case of each word in the passphrase. - choices: ["alternating", "upper", "lower", "random", "capitalize"] - default: "lower" - type: str + - The number of words. + default: 6 + type: int + min_length: + description: + - Minimum length of words to make password. + default: 5 + type: int + max_length: + description: + - Maximum length of words to make password. + default: 9 + type: int + delimiter: + description: + - The delimiter character between words. + default: " " + type: str + case: + description: + - The method for setting the case of each word in the passphrase. + choices: ["alternating", "upper", "lower", "random", "capitalize"] + default: "lower" + type: str """ EXAMPLES = r""" @@ -74,10 +73,10 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: A single-element list containing random words. - type: list - elements: str +_raw: + description: A single-element list containing random words. + type: list + elements: str """ from ansible.errors import AnsibleLookupError diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index 5c669a7f23..bb5a122da3 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -6,50 +6,50 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: redis - author: - - Jan-Piet Mens (@jpmens) - - Ansible Core Team - short_description: fetch data from Redis - description: - - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it - requirements: - - redis (python library https://github.com/andymccurdy/redis-py/) - options: - _terms: - description: list of keys to query - type: list - elements: string - host: - description: location of Redis host - type: string - default: '127.0.0.1' - env: - - name: ANSIBLE_REDIS_HOST - ini: - - section: lookup_redis - key: host - port: - description: port on which Redis is listening on - default: 6379 - type: int - env: - - name: ANSIBLE_REDIS_PORT - ini: - - section: lookup_redis - key: port - socket: - description: path to socket on which to query Redis, this option overrides host and port options when set. - type: path - env: - - name: ANSIBLE_REDIS_SOCKET - ini: - - section: lookup_redis - key: socket -''' +DOCUMENTATION = r""" +name: redis +author: + - Jan-Piet Mens (@jpmens) + - Ansible Core Team +short_description: Fetch data from Redis +description: + - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it. +requirements: + - redis (python library https://github.com/andymccurdy/redis-py/) +options: + _terms: + description: List of keys to query. + type: list + elements: string + host: + description: Location of Redis host. + type: string + default: '127.0.0.1' + env: + - name: ANSIBLE_REDIS_HOST + ini: + - section: lookup_redis + key: host + port: + description: Port on which Redis is listening on. + default: 6379 + type: int + env: + - name: ANSIBLE_REDIS_PORT + ini: + - section: lookup_redis + key: port + socket: + description: Path to socket on which to query Redis, this option overrides host and port options when set. + type: path + env: + - name: ANSIBLE_REDIS_SOCKET + ini: + - section: lookup_redis + key: socket +""" -EXAMPLES = """ +EXAMPLES = r""" - name: query redis for somekey (default or configured settings used) ansible.builtin.debug: msg: "{{ lookup('community.general.redis', 'somekey') }}" @@ -66,12 +66,11 @@ EXAMPLES = """ - name: use list directly with a socket ansible.builtin.debug: msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}" - """ -RETURN = """ +RETURN = r""" _raw: - description: value(s) stored in Redis + description: Value(s) stored in Redis. type: list elements: str """ diff --git a/plugins/lookup/revbitspss.py b/plugins/lookup/revbitspss.py index 89c19cf23c..6b31963f4a 100644 --- a/plugins/lookup/revbitspss.py +++ b/plugins/lookup/revbitspss.py @@ -12,54 +12,55 @@ author: RevBits (@RevBits) short_description: Get secrets from RevBits PAM server version_added: 4.1.0 description: - - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM - Server using API key authentication with the REST API. + - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM Server using API key authentication with the REST + API. requirements: - - revbits_ansible - U(https://pypi.org/project/revbits_ansible/) + - revbits_ansible - U(https://pypi.org/project/revbits_ansible/) options: - _terms: - description: - - This will be an array of keys for secrets which you want to fetch from RevBits PAM. - required: true - type: list - elements: string - base_url: - description: - - This will be the base URL of the server, for example V(https://server-url-here). - required: true - type: string - api_key: - description: - - This will be the API key for authentication. You can get it from the RevBits PAM secret manager module. - required: true - type: string + _terms: + description: + - This is an array of keys for secrets which you want to fetch from RevBits PAM. + required: true + type: list + elements: string + base_url: + description: + - This is the base URL of the server, for example V(https://server-url-here). + required: true + type: string + api_key: + description: + - This is the API key for authentication. You can get it from the RevBits PAM secret manager module. + required: true + type: string """ RETURN = r""" _list: - description: - - The JSON responses which you can access with defined keys. - - If you are fetching secrets named as UUID, PASSWORD it will gives you the dict of all secrets. - type: list - elements: dict + description: + - The JSON responses which you can access with defined keys. + - If you are fetching secrets named as UUID, PASSWORD it returns the dict of all secrets. + type: list + elements: dict """ EXAMPLES = r""" +--- - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.revbitspss', - 'UUIDPAM', 'DB_PASS', - base_url='https://server-url-here', - api_key='API_KEY_GOES_HERE' - ) - }} + secret: >- + {{ + lookup( + 'community.general.revbitspss', + 'UUIDPAM', 'DB_PASS', + base_url='https://server-url-here', + api_key='API_KEY_GOES_HERE' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }} + - ansible.builtin.debug: + msg: >- + UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }} """ from ansible.plugins.lookup import LookupBase diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 4d965372fb..f4142f67c6 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -6,34 +6,35 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' - name: shelvefile - author: Alejandro Guirao (!UNKNOWN) - short_description: read keys from Python shelve file - description: - - Read keys from Python shelve file. - options: - _terms: - description: Sets of key value pairs of parameters. - type: list - elements: str - key: - description: Key to query. - type: str - required: true - file: - description: Path to shelve file. - type: path - required: true -''' +DOCUMENTATION = r""" +name: shelvefile +author: Alejandro Guirao (!UNKNOWN) +short_description: Read keys from Python shelve file +description: + - Read keys from Python shelve file. +options: + _terms: + description: Sets of key value pairs of parameters. + type: list + elements: str + key: + description: Key to query. + type: str + required: true + file: + description: Path to shelve file. + type: path + required: true +""" -EXAMPLES = """ +EXAMPLES = r""" +--- - name: Retrieve a string value corresponding to a key inside a Python shelve file ansible.builtin.debug: msg: "{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}" """ -RETURN = """ +RETURN = r""" _list: description: Value(s) of key(s) in shelve file(s). type: list diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index ffae6bb824..3d73fcbe99 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -12,200 +12,196 @@ author: Adam Migus (@amigus) short_description: Get secrets from Thycotic Secret Server version_added: 1.0.0 description: - - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret - Server using token authentication with O(username) and O(password) on - the REST API at O(base_url). - - When using self-signed certificates the environment variable - E(REQUESTS_CA_BUNDLE) can be set to a file containing the trusted certificates - (in C(.pem) format). - - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt'). + - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret Server using token authentication with O(username) + and O(password) on the REST API at O(base_url). + - When using self-signed certificates the environment variable E(REQUESTS_CA_BUNDLE) can be set to a file containing the + trusted certificates (in C(.pem) format). + - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt'). requirements: - - python-tss-sdk - https://pypi.org/project/python-tss-sdk/ + - python-tss-sdk - https://pypi.org/project/python-tss-sdk/ options: - _terms: - description: The integer ID of the secret. - required: true - type: list - elements: int - secret_path: - description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0. - required: false - type: str - version_added: 7.2.0 - fetch_secret_ids_from_folder: - description: - - Boolean flag which indicates whether secret ids are in a folder is fetched by folder ID or not. - - V(true) then the terms will be considered as a folder IDs. Otherwise (default), they are considered as secret IDs. - required: false - type: bool - version_added: 7.1.0 - fetch_attachments: - description: - - Boolean flag which indicates whether attached files will get downloaded or not. - - The download will only happen if O(file_download_path) has been provided. - required: false - type: bool - version_added: 7.0.0 - file_download_path: - description: Indicate the file attachment download location. - required: false - type: path - version_added: 7.0.0 - base_url: - description: The base URL of the server, for example V(https://localhost/SecretServer). - type: string - env: - - name: TSS_BASE_URL - ini: - - section: tss_lookup - key: base_url - required: true - username: - description: The username with which to request the OAuth2 Access Grant. - type: string - env: - - name: TSS_USERNAME - ini: - - section: tss_lookup - key: username - password: - description: - - The password associated with the supplied username. - - Required when O(token) is not provided. - type: string - env: - - name: TSS_PASSWORD - ini: - - section: tss_lookup - key: password - domain: - default: "" - description: - - The domain with which to request the OAuth2 Access Grant. - - Optional when O(token) is not provided. - - Requires C(python-tss-sdk) version 1.0.0 or greater. - type: string - env: - - name: TSS_DOMAIN - ini: - - section: tss_lookup - key: domain - required: false - version_added: 3.6.0 - token: - description: - - Existing token for Thycotic authorizer. - - If provided, O(username) and O(password) are not needed. - - Requires C(python-tss-sdk) version 1.0.0 or greater. - type: string - env: - - name: TSS_TOKEN - ini: - - section: tss_lookup - key: token - version_added: 3.7.0 - api_path_uri: - default: /api/v1 - description: The path to append to the base URL to form a valid REST - API request. - type: string - env: - - name: TSS_API_PATH_URI - required: false - token_path_uri: - default: /oauth2/token - description: The path to append to the base URL to form a valid OAuth2 - Access Grant request. - type: string - env: - - name: TSS_TOKEN_PATH_URI - required: false + _terms: + description: The integer ID of the secret. + required: true + type: list + elements: int + secret_path: + description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0. + required: false + type: str + version_added: 7.2.0 + fetch_secret_ids_from_folder: + description: + - Boolean flag which indicates whether secret IDs are in a folder is fetched by folder ID or not. + - V(true) then the terms are considered as a folder IDs. Otherwise (default), they are considered as secret IDs. + required: false + type: bool + version_added: 7.1.0 + fetch_attachments: + description: + - Boolean flag which indicates whether attached files are downloaded or not. + - The download only happens if O(file_download_path) has been provided. + required: false + type: bool + version_added: 7.0.0 + file_download_path: + description: Indicate the file attachment download location. + required: false + type: path + version_added: 7.0.0 + base_url: + description: The base URL of the server, for example V(https://localhost/SecretServer). + type: string + env: + - name: TSS_BASE_URL + ini: + - section: tss_lookup + key: base_url + required: true + username: + description: The username with which to request the OAuth2 Access Grant. + type: string + env: + - name: TSS_USERNAME + ini: + - section: tss_lookup + key: username + password: + description: + - The password associated with the supplied username. + - Required when O(token) is not provided. + type: string + env: + - name: TSS_PASSWORD + ini: + - section: tss_lookup + key: password + domain: + default: "" + description: + - The domain with which to request the OAuth2 Access Grant. + - Optional when O(token) is not provided. + - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string + env: + - name: TSS_DOMAIN + ini: + - section: tss_lookup + key: domain + required: false + version_added: 3.6.0 + token: + description: + - Existing token for Thycotic authorizer. + - If provided, O(username) and O(password) are not needed. + - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string + env: + - name: TSS_TOKEN + ini: + - section: tss_lookup + key: token + version_added: 3.7.0 + api_path_uri: + default: /api/v1 + description: The path to append to the base URL to form a valid REST API request. + type: string + env: + - name: TSS_API_PATH_URI + required: false + token_path_uri: + default: /oauth2/token + description: The path to append to the base URL to form a valid OAuth2 Access Grant request. + type: string + env: + - name: TSS_TOKEN_PATH_URI + required: false """ RETURN = r""" _list: - description: - - The JSON responses to C(GET /secrets/{id}). - - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get). - type: list - elements: dict + description: + - The JSON responses to C(GET /secrets/{id}). + - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get). + type: list + elements: dict """ EXAMPLES = r""" - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - username='user.name', - password='password' - ) - }} + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} + - ansible.builtin.debug: + msg: > + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - username='user.name', - password='password', - domain='domain' - ) - }} + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password', + domain='domain' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} + - ansible.builtin.debug: + msg: > + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} - hosts: localhost vars: - secret_password: >- - {{ - ((lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - token='thycotic_access_token', - ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] - }} + secret_password: >- + {{ + ((lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token', + ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] + }} tasks: - - ansible.builtin.debug: - msg: the password is {{ secret_password }} + - ansible.builtin.debug: + msg: the password is {{ secret_password }} # Private key stores into certificate file which is attached with secret. # If fetch_attachments=True then private key file will be download on specified path # and file content will display in debug message. - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - fetch_attachments=True, - file_download_path='/home/certs', - base_url='https://secretserver.domain.com/SecretServer/', - token='thycotic_access_token' - ) - }} + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + fetch_attachments=True, + file_download_path='/home/certs', + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token' + ) + }} tasks: - ansible.builtin.debug: msg: > @@ -218,16 +214,16 @@ EXAMPLES = r""" # If fetch_secret_ids_from_folder=true then secret IDs are in a folder is fetched based on folder ID - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - fetch_secret_ids_from_folder=true, - base_url='https://secretserver.domain.com/SecretServer/', - token='thycotic_access_token' - ) - }} + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + fetch_secret_ids_from_folder=true, + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token' + ) + }} tasks: - ansible.builtin.debug: msg: > @@ -238,25 +234,25 @@ EXAMPLES = r""" # If secret ID is 0 and secret_path has value then secret is fetched by secret path - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 0, - secret_path='\folderName\secretName' - base_url='https://secretserver.domain.com/SecretServer/', - username='user.name', - password='password' - ) - }} + secret: >- + {{ + lookup( + 'community.general.tss', + 0, + secret_path='\folderName\secretName' + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} + - ansible.builtin.debug: + msg: >- + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} """ import abc diff --git a/plugins/module_utils/cmd_runner_fmt.py b/plugins/module_utils/cmd_runner_fmt.py index bd6d00a15d..8b415edcf9 100644 --- a/plugins/module_utils/cmd_runner_fmt.py +++ b/plugins/module_utils/cmd_runner_fmt.py @@ -78,7 +78,9 @@ def as_list(ignore_none=None, min_len=0, max_len=None): return _ArgFormat(func, ignore_none=ignore_none) -def as_fixed(args): +def as_fixed(*args): + if len(args) == 1 and is_sequence(args[0]): + args = args[0] return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False, ignore_missing_value=True) diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py index 8314ed945e..4f5293c09f 100644 --- a/plugins/module_utils/django.py +++ b/plugins/module_utils/django.py @@ -67,11 +67,9 @@ class _DjangoRunner(PythonRunner): class DjangoModuleHelper(ModuleHelper): module = {} - use_old_vardict = False django_admin_cmd = None arg_formats = {} django_admin_arg_order = () - use_old_vardict = False _django_args = [] _check_mode_arg = "" diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index a9867b9b08..e053eca305 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -142,6 +142,7 @@ def keycloak_argument_spec(): validate_certs=dict(type='bool', default=True), connection_timeout=dict(type='int', default=10), token=dict(type='str', no_log=True), + refresh_token=dict(type='str', no_log=True), http_agent=dict(type='str', default='Ansible'), ) @@ -151,58 +152,142 @@ def camel(words): class KeycloakError(Exception): - pass + def __init__(self, msg, authError=None): + self.msg = msg + self.authError = authError + + def __str__(self): + return str(self.msg) + + +def _token_request(module_params, payload): + """ Obtains connection header with token for the authentication, + using the provided auth_username/auth_password + :param module_params: parameters of the module + :param payload: + type: + dict + description: + Authentication request payload. Must contain at least + 'grant_type' and 'client_id', optionally 'client_secret', + along with parameters based on 'grant_type'; e.g., + 'username'/'password' for type 'password', + 'refresh_token' for type 'refresh_token'. + :return: access token + """ + base_url = module_params.get('auth_keycloak_url') + if not base_url.lower().startswith(('http', 'https')): + raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url) + auth_realm = module_params.get('auth_realm') + auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) + http_agent = module_params.get('http_agent') + validate_certs = module_params.get('validate_certs') + connection_timeout = module_params.get('connection_timeout') + + try: + r = json.loads(to_native(open_url(auth_url, method='POST', + validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout, + data=urlencode(payload)).read())) + + return r['access_token'] + except ValueError as e: + raise KeycloakError( + 'API returned invalid JSON when trying to obtain access token from %s: %s' + % (auth_url, str(e))) + except KeyError: + raise KeycloakError( + 'API did not include access_token field in response from %s' % auth_url) + except Exception as e: + raise KeycloakError('Could not obtain access token from %s: %s' + % (auth_url, str(e)), authError=e) + + +def _request_token_using_credentials(module_params): + """ Obtains connection header with token for the authentication, + using the provided auth_username/auth_password + :param module_params: parameters of the module. Must include 'auth_username' and 'auth_password'. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + auth_username = module_params.get('auth_username') + auth_password = module_params.get('auth_password') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'password', + 'client_id': client_id, + 'client_secret': client_secret, + 'username': auth_username, + 'password': auth_password, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) + + +def _request_token_using_refresh_token(module_params): + """ Obtains connection header with token for the authentication, + using the provided refresh_token + :param module_params: parameters of the module. Must include 'refresh_token'. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + refresh_token = module_params.get('refresh_token') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'refresh_token', + 'client_id': client_id, + 'client_secret': client_secret, + 'refresh_token': refresh_token, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) + + +def _request_token_using_client_credentials(module_params): + """ Obtains connection header with token for the authentication, + using the provided auth_client_id and auth_client_secret by grant_type + client_credentials. Ensure that the used client uses client authorization + with service account roles enabled and required service roles assigned. + :param module_params: parameters of the module. Must include 'auth_client_id' + and 'auth_client_secret'.. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'client_credentials', + 'client_id': client_id, + 'client_secret': client_secret, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) def get_token(module_params): """ Obtains connection header with token for the authentication, - token already given or obtained from credentials - :param module_params: parameters of the module - :return: connection header + token already given or obtained from credentials + :param module_params: parameters of the module + :return: connection header """ token = module_params.get('token') - base_url = module_params.get('auth_keycloak_url') - http_agent = module_params.get('http_agent') - - if not base_url.lower().startswith(('http', 'https')): - raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url) if token is None: - base_url = module_params.get('auth_keycloak_url') - validate_certs = module_params.get('validate_certs') - auth_realm = module_params.get('auth_realm') - client_id = module_params.get('auth_client_id') + auth_client_id = module_params.get('auth_client_id') + auth_client_secret = module_params.get('auth_client_secret') auth_username = module_params.get('auth_username') - auth_password = module_params.get('auth_password') - client_secret = module_params.get('auth_client_secret') - connection_timeout = module_params.get('connection_timeout') - auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) - temp_payload = { - 'grant_type': 'password', - 'client_id': client_id, - 'client_secret': client_secret, - 'username': auth_username, - 'password': auth_password, - } - # Remove empty items, for instance missing client_secret - payload = {k: v for k, v in temp_payload.items() if v is not None} - try: - r = json.loads(to_native(open_url(auth_url, method='POST', - validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout, - data=urlencode(payload)).read())) - except ValueError as e: - raise KeycloakError( - 'API returned invalid JSON when trying to obtain access token from %s: %s' - % (auth_url, str(e))) - except Exception as e: - raise KeycloakError('Could not obtain access token from %s: %s' - % (auth_url, str(e))) + if auth_client_id is not None and auth_client_secret is not None and auth_username is None: + token = _request_token_using_client_credentials(module_params) + else: + token = _request_token_using_credentials(module_params) - try: - token = r['access_token'] - except KeyError: - raise KeycloakError( - 'Could not obtain access token from %s' % auth_url) return { 'Authorization': 'Bearer ' + token, 'Content-Type': 'application/json' @@ -272,6 +357,7 @@ class KeycloakAPI(object): """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which is obtained through OpenID connect """ + def __init__(self, module, connection_header): self.module = module self.baseurl = self.module.params.get('auth_keycloak_url') @@ -280,6 +366,87 @@ class KeycloakAPI(object): self.restheaders = connection_header self.http_agent = self.module.params.get('http_agent') + def _request(self, url, method, data=None): + """ Makes a request to Keycloak and returns the raw response. + If a 401 is returned, attempts to re-authenticate + using first the module's refresh_token (if provided) + and then the module's username/password (if provided). + On successful re-authentication, the new token is stored + in the restheaders for future requests. + + :param url: request path + :param method: request method (e.g., 'GET', 'POST', etc.) + :param data: (optional) data for request + :return: raw API response + """ + def make_request_catching_401(): + try: + return open_url(url, method=method, data=data, + http_agent=self.http_agent, headers=self.restheaders, + timeout=self.connection_timeout, + validate_certs=self.validate_certs) + except HTTPError as e: + if e.code != 401: + raise e + return e + + r = make_request_catching_401() + + if isinstance(r, Exception): + # Try to refresh token and retry, if available + refresh_token = self.module.params.get('refresh_token') + if refresh_token is not None: + try: + token = _request_token_using_refresh_token(self.module.params) + self.restheaders['Authorization'] = 'Bearer ' + token + + r = make_request_catching_401() + except KeycloakError as e: + # Token refresh returns 400 if token is expired/invalid, so continue on if we get a 400 + if e.authError is not None and e.authError.code != 400: + raise e + + if isinstance(r, Exception): + # Try to re-auth with username/password, if available + auth_username = self.module.params.get('auth_username') + auth_password = self.module.params.get('auth_password') + if auth_username is not None and auth_password is not None: + token = _request_token_using_credentials(self.module.params) + self.restheaders['Authorization'] = 'Bearer ' + token + + r = make_request_catching_401() + + if isinstance(r, Exception): + # Try to re-auth with client_id and client_secret, if available + auth_client_id = self.module.params.get('auth_client_id') + auth_client_secret = self.module.params.get('auth_client_secret') + if auth_client_id is not None and auth_client_secret is not None: + try: + token = _request_token_using_client_credentials(self.module.params) + self.restheaders['Authorization'] = 'Bearer ' + token + + r = make_request_catching_401() + except KeycloakError as e: + # Token refresh returns 400 if token is expired/invalid, so continue on if we get a 400 + if e.authError is not None and e.authError.code != 400: + raise e + + if isinstance(r, Exception): + # Either no re-auth options were available, or they all failed + raise r + + return r + + def _request_and_deserialize(self, url, method, data=None): + """ Wraps the _request method with JSON deserialization of the response. + + :param url: request path + :param method: request method (e.g., 'GET', 'POST', etc.) + :param data: (optional) data for request + :return: raw API response + """ + return json.loads(to_native(self._request(url, method, data).read())) + def get_realm_info_by_id(self, realm='master'): """ Obtain realm public info by id @@ -289,16 +456,14 @@ class KeycloakAPI(object): realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(realm_info_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(realm_info_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), exception=traceback.format_exc()) @@ -320,16 +485,14 @@ class KeycloakAPI(object): realm_keys_metadata_url = URL_REALM_KEYS_METADATA.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(realm_keys_metadata_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(realm_keys_metadata_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), exception=traceback.format_exc()) @@ -337,6 +500,8 @@ class KeycloakAPI(object): self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), exception=traceback.format_exc()) + # The Keycloak API expects the realm name (like `master`) not the ID when fetching the realm data. + # See the Keycloak API docs: https://www.keycloak.org/docs-api/latest/rest-api/#_realms_admin def get_realm_by_id(self, realm='master'): """ Obtain realm representation by id @@ -346,15 +511,14 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(realm_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(realm_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), exception=traceback.format_exc()) @@ -371,11 +535,10 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return open_url(realm_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(realmrep), validate_certs=self.validate_certs) + return self._request(realm_url, method='PUT', data=json.dumps(realmrep)) except Exception as e: - self.fail_open_url(e, msg='Could not update realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not update realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) def create_realm(self, realmrep): """ Create a realm in keycloak @@ -385,11 +548,10 @@ class KeycloakAPI(object): realm_url = URL_REALMS.format(url=self.baseurl) try: - return open_url(realm_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(realmrep), validate_certs=self.validate_certs) + return self._request(realm_url, method='POST', data=json.dumps(realmrep)) except Exception as e: - self.fail_open_url(e, msg='Could not create realm %s: %s' % (realmrep['id'], str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not create realm %s: %s' % (realmrep['id'], str(e)), + exception=traceback.format_exc()) def delete_realm(self, realm="master"): """ Delete a realm from Keycloak @@ -400,11 +562,10 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return open_url(realm_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(realm_url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Could not delete realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not delete realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) def get_clients(self, realm='master', filter=None): """ Obtains client representations for clients in a realm @@ -418,15 +579,13 @@ class KeycloakAPI(object): clientlist_url += '?clientId=%s' % filter try: - return json.loads(to_native(open_url(clientlist_url, http_agent=self.http_agent, method='GET', headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientlist_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s' % (realm, str(e))) except Exception as e: - self.fail_open_url(e, msg='Could not obtain list of clients for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of clients for realm %s: %s' + % (realm, str(e))) def get_client_by_clientid(self, client_id, realm='master'): """ Get client representation by clientId @@ -450,16 +609,14 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return json.loads(to_native(open_url(client_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg='Could not obtain client %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not obtain client %s for realm %s: %s' + % (id, realm, str(e))) except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s' % (id, realm, str(e))) @@ -490,11 +647,10 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(client_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientrep), validate_certs=self.validate_certs) + return self._request(client_url, method='PUT', data=json.dumps(clientrep)) except Exception as e: - self.fail_open_url(e, msg='Could not update client %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not update client %s in realm %s: %s' + % (id, realm, str(e))) def create_client(self, clientrep, realm="master"): """ Create a client in keycloak @@ -505,11 +661,10 @@ class KeycloakAPI(object): client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) try: - return open_url(client_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientrep), validate_certs=self.validate_certs) + return self._request(client_url, method='POST', data=json.dumps(clientrep)) except Exception as e: - self.fail_open_url(e, msg='Could not create client %s in realm %s: %s' - % (clientrep['clientId'], realm, str(e))) + self.fail_request(e, msg='Could not create client %s in realm %s: %s' + % (clientrep['clientId'], realm, str(e))) def delete_client(self, id, realm="master"): """ Delete a client from Keycloak @@ -521,11 +676,10 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(client_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(client_url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Could not delete client %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not delete client %s in realm %s: %s' + % (id, realm, str(e))) def get_client_roles_by_id(self, cid, realm="master"): """ Fetch the roles of the a client on the Keycloak server. @@ -536,12 +690,10 @@ class KeycloakAPI(object): """ client_roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_roles_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch rolemappings for client %s in realm %s: %s" - % (cid, realm, str(e))) + self.fail_request(e, msg="Could not fetch rolemappings for client %s in realm %s: %s" + % (cid, realm, str(e))) def get_client_role_id_by_name(self, cid, name, realm="master"): """ Get the role ID of a client. @@ -568,15 +720,13 @@ class KeycloakAPI(object): """ rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") for role in rolemappings: if rid == role['id']: return role except Exception as e: - self.fail_open_url(e, msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) return None def get_client_group_available_rolemappings(self, gid, cid, realm="master"): @@ -589,12 +739,10 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(available_rolemappings_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) def get_client_group_composite_rolemappings(self, gid, cid, realm="master"): """ Fetch the composite role of a client in a specified group on the Keycloak server. @@ -606,12 +754,10 @@ class KeycloakAPI(object): """ composite_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(composite_rolemappings_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) def get_role_by_id(self, rid, realm="master"): """ Fetch a role by its id on the Keycloak server. @@ -622,12 +768,10 @@ class KeycloakAPI(object): """ client_roles_url = URL_ROLES_BY_ID.format(url=self.baseurl, realm=realm, id=rid) try: - return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_roles_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch role for id %s in realm %s: %s" - % (rid, realm, str(e))) + self.fail_request(e, msg="Could not fetch role for id %s in realm %s: %s" + % (rid, realm, str(e))) def get_client_roles_by_id_composite_rolemappings(self, rid, cid, realm="master"): """ Fetch a role by its id on the Keycloak server. @@ -639,12 +783,10 @@ class KeycloakAPI(object): """ client_roles_url = URL_ROLES_BY_ID_COMPOSITES_CLIENTS.format(url=self.baseurl, realm=realm, id=rid, cid=cid) try: - return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_roles_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch role for id %s and cid %s in realm %s: %s" - % (rid, cid, realm, str(e))) + self.fail_request(e, msg="Could not fetch role for id %s and cid %s in realm %s: %s" + % (rid, cid, realm, str(e))) def add_client_roles_by_id_composite_rolemapping(self, rid, roles_rep, realm="master"): """ Assign roles to composite role @@ -656,11 +798,10 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_ROLES_BY_ID_COMPOSITES.format(url=self.baseurl, realm=realm, id=rid) try: - open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(roles_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(available_rolemappings_url, method="POST", data=json.dumps(roles_rep)) except Exception as e: - self.fail_open_url(e, msg="Could not assign roles to composite role %s and realm %s: %s" - % (rid, realm, str(e))) + self.fail_request(e, msg="Could not assign roles to composite role %s and realm %s: %s" + % (rid, realm, str(e))) def add_group_realm_rolemapping(self, gid, role_rep, realm="master"): """ Add the specified realm role to specified group on the Keycloak server. @@ -672,11 +813,10 @@ class KeycloakAPI(object): """ url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid) try: - open_url(url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.fail_open_url(e, msg="Could add realm role mappings for group %s, realm %s: %s" - % (gid, realm, str(e))) + self.fail_request(e, msg="Could add realm role mappings for group %s, realm %s: %s" + % (gid, realm, str(e))) def delete_group_realm_rolemapping(self, gid, role_rep, realm="master"): """ Delete the specified realm role from the specified group on the Keycloak server. @@ -688,11 +828,10 @@ class KeycloakAPI(object): """ url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid) try: - open_url(url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.fail_open_url(e, msg="Could not delete realm role mappings for group %s, realm %s: %s" - % (gid, realm, str(e))) + self.fail_request(e, msg="Could not delete realm role mappings for group %s, realm %s: %s" + % (gid, realm, str(e))) def add_group_rolemapping(self, gid, cid, role_rep, realm="master"): """ Fetch the composite role of a client in a specified group on the Keycloak server. @@ -705,11 +844,10 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(available_rolemappings_url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.fail_open_url(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"): """ Delete the rolemapping of a client in a specified group on the Keycloak server. @@ -722,11 +860,10 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - open_url(available_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(available_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.fail_open_url(e, msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm='master'): """ Obtain client representation by id @@ -739,15 +876,13 @@ class KeycloakAPI(object): """ rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) try: - rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") for role in rolemappings: if rid == role['id']: return role except Exception as e: - self.fail_open_url(e, msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s" - % (cid, uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s" + % (cid, uid, realm, str(e))) return None def get_client_user_available_rolemappings(self, uid, cid, realm="master"): @@ -760,12 +895,10 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid, client=cid) try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(available_rolemappings_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s" - % (cid, uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s" + % (cid, uid, realm, str(e))) def get_client_user_composite_rolemappings(self, uid, cid, realm="master"): """ Fetch the composite role of a client for a specified user on the Keycloak server. @@ -777,12 +910,10 @@ class KeycloakAPI(object): """ composite_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid, client=cid) try: - return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(composite_rolemappings_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s" - % (uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s" + % (uid, realm, str(e))) def get_realm_user_rolemapping_by_id(self, uid, rid, realm='master'): """ Obtain role representation by id @@ -794,15 +925,13 @@ class KeycloakAPI(object): """ rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) try: - rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") for role in rolemappings: if rid == role['id']: return role except Exception as e: - self.fail_open_url(e, msg="Could not fetch rolemappings for user %s, realm %s: %s" - % (uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch rolemappings for user %s, realm %s: %s" + % (uid, realm, str(e))) return None def get_realm_user_available_rolemappings(self, uid, realm="master"): @@ -814,12 +943,10 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_REALM_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid) try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(available_rolemappings_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s" - % (uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s" + % (uid, realm, str(e))) def get_realm_user_composite_rolemappings(self, uid, realm="master"): """ Fetch the composite role of a realm for a specified user on the Keycloak server. @@ -830,12 +957,10 @@ class KeycloakAPI(object): """ composite_rolemappings_url = URL_REALM_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid) try: - return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(composite_rolemappings_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch effective rolemappings for user %s, realm %s: %s" - % (uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch effective rolemappings for user %s, realm %s: %s" + % (uid, realm, str(e))) def get_user_by_username(self, username, realm="master"): """ Fetch a keycloak user within a realm based on its username. @@ -848,9 +973,7 @@ class KeycloakAPI(object): users_url += '?username=%s&exact=true' % username try: userrep = None - users = json.loads(to_native(open_url(users_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + users = self._request_and_deserialize(users_url, method='GET') for user in users: if user['username'] == username: userrep = user @@ -861,8 +984,8 @@ class KeycloakAPI(object): self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the user for realm %s and username %s: %s' % (realm, username, str(e))) except Exception as e: - self.fail_open_url(e, msg='Could not obtain the user for realm %s and username %s: %s' - % (realm, username, str(e))) + self.fail_request(e, msg='Could not obtain the user for realm %s and username %s: %s' + % (realm, username, str(e))) def get_service_account_user_by_client_id(self, client_id, realm="master"): """ Fetch a keycloak service account user within a realm based on its client_id. @@ -875,15 +998,13 @@ class KeycloakAPI(object): service_account_user_url = URL_CLIENT_SERVICE_ACCOUNT_USER.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(service_account_user_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(service_account_user_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the service-account-user for realm %s and client_id %s: %s' % (realm, client_id, str(e))) except Exception as e: - self.fail_open_url(e, msg='Could not obtain the service-account-user for realm %s and client_id %s: %s' - % (realm, client_id, str(e))) + self.fail_request(e, msg='Could not obtain the service-account-user for realm %s and client_id %s: %s' + % (realm, client_id, str(e))) def add_user_rolemapping(self, uid, cid, role_rep, realm="master"): """ Assign a realm or client role to a specified user on the Keycloak server. @@ -897,19 +1018,17 @@ class KeycloakAPI(object): if cid is None: user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) try: - open_url(user_realm_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(user_realm_rolemappings_url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.fail_open_url(e, msg="Could not map roles to userId %s for realm %s and roles %s: %s" - % (uid, realm, json.dumps(role_rep), str(e))) + self.fail_request(e, msg="Could not map roles to userId %s for realm %s and roles %s: %s" + % (uid, realm, json.dumps(role_rep), str(e))) else: user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) try: - open_url(user_client_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(user_client_rolemappings_url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.fail_open_url(e, msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s" - % (cid, uid, realm, json.dumps(role_rep), str(e))) + self.fail_request(e, msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s" + % (cid, uid, realm, json.dumps(role_rep), str(e))) def delete_user_rolemapping(self, uid, cid, role_rep, realm="master"): """ Delete the rolemapping of a client in a specified user on the Keycloak server. @@ -923,19 +1042,17 @@ class KeycloakAPI(object): if cid is None: user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) try: - open_url(user_realm_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(user_realm_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.fail_open_url(e, msg="Could not remove roles %s from userId %s, realm %s: %s" - % (json.dumps(role_rep), uid, realm, str(e))) + self.fail_request(e, msg="Could not remove roles %s from userId %s, realm %s: %s" + % (json.dumps(role_rep), uid, realm, str(e))) else: user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) try: - open_url(user_client_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(user_client_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.fail_open_url(e, msg="Could not remove roles %s for client %s from userId %s, realm %s: %s" - % (json.dumps(role_rep), cid, uid, realm, str(e))) + self.fail_request(e, msg="Could not remove roles %s for client %s from userId %s, realm %s: %s" + % (json.dumps(role_rep), cid, uid, realm, str(e))) def get_client_templates(self, realm='master'): """ Obtains client template representations for client templates in a realm @@ -946,14 +1063,13 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s' % (realm, str(e))) except Exception as e: - self.fail_open_url(e, msg='Could not obtain list of client templates for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of client templates for realm %s: %s' + % (realm, str(e))) def get_client_template_by_id(self, id, realm='master'): """ Obtain client template representation by id @@ -965,14 +1081,13 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm) try: - return json.loads(to_native(open_url(url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s' % (id, realm, str(e))) except Exception as e: - self.fail_open_url(e, msg='Could not obtain client template %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not obtain client template %s for realm %s: %s' + % (id, realm, str(e))) def get_client_template_by_name(self, name, realm='master'): """ Obtain client template representation by name @@ -1011,11 +1126,10 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clienttrep), validate_certs=self.validate_certs) + return self._request(url, method='PUT', data=json.dumps(clienttrep)) except Exception as e: - self.fail_open_url(e, msg='Could not update client template %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not update client template %s in realm %s: %s' + % (id, realm, str(e))) def create_client_template(self, clienttrep, realm="master"): """ Create a client in keycloak @@ -1026,11 +1140,10 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) try: - return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clienttrep), validate_certs=self.validate_certs) + return self._request(url, method='POST', data=json.dumps(clienttrep)) except Exception as e: - self.fail_open_url(e, msg='Could not create client template %s in realm %s: %s' - % (clienttrep['clientId'], realm, str(e))) + self.fail_request(e, msg='Could not create client template %s in realm %s: %s' + % (clienttrep['clientId'], realm, str(e))) def delete_client_template(self, id, realm="master"): """ Delete a client template from Keycloak @@ -1042,11 +1155,10 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Could not delete client template %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not delete client template %s in realm %s: %s' + % (id, realm, str(e))) def get_clientscopes(self, realm="master"): """ Fetch the name and ID of all clientscopes on the Keycloak server. @@ -1059,12 +1171,10 @@ class KeycloakAPI(object): """ clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientscopes_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch list of clientscopes in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg="Could not fetch list of clientscopes in realm %s: %s" + % (realm, str(e))) def get_clientscope_by_clientscopeid(self, cid, realm="master"): """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. @@ -1077,16 +1187,14 @@ class KeycloakAPI(object): """ clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(clientscope_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientscope_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg="Could not fetch clientscope %s in realm %s: %s" - % (cid, realm, str(e))) + self.fail_request(e, msg="Could not fetch clientscope %s in realm %s: %s" + % (cid, realm, str(e))) except Exception as e: self.module.fail_json(msg="Could not clientscope group %s in realm %s: %s" % (cid, realm, str(e))) @@ -1123,11 +1231,10 @@ class KeycloakAPI(object): """ clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) try: - return open_url(clientscopes_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + return self._request(clientscopes_url, method='POST', data=json.dumps(clientscoperep)) except Exception as e: - self.fail_open_url(e, msg="Could not create clientscope %s in realm %s: %s" - % (clientscoperep['name'], realm, str(e))) + self.fail_request(e, msg="Could not create clientscope %s in realm %s: %s" + % (clientscoperep['name'], realm, str(e))) def update_clientscope(self, clientscoperep, realm="master"): """ Update an existing clientscope. @@ -1138,12 +1245,11 @@ class KeycloakAPI(object): clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep['id']) try: - return open_url(clientscope_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + return self._request(clientscope_url, method='PUT', data=json.dumps(clientscoperep)) except Exception as e: - self.fail_open_url(e, msg='Could not update clientscope %s in realm %s: %s' - % (clientscoperep['name'], realm, str(e))) + self.fail_request(e, msg='Could not update clientscope %s in realm %s: %s' + % (clientscoperep['name'], realm, str(e))) def delete_clientscope(self, name=None, cid=None, realm="master"): """ Delete a clientscope. One of name or cid must be provided. @@ -1176,11 +1282,10 @@ class KeycloakAPI(object): # should have a good cid by here. clientscope_url = URL_CLIENTSCOPE.format(realm=realm, id=cid, url=self.baseurl) try: - return open_url(clientscope_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(clientscope_url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg="Unable to delete clientscope %s: %s" % (cid, str(e))) + self.fail_request(e, msg="Unable to delete clientscope %s: %s" % (cid, str(e))) def get_clientscope_protocolmappers(self, cid, realm="master"): """ Fetch the name and ID of all clientscopes on the Keycloak server. @@ -1194,12 +1299,10 @@ class KeycloakAPI(object): """ protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(id=cid, url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(protocolmappers_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(protocolmappers_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch list of protocolmappers in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg="Could not fetch list of protocolmappers in realm %s: %s" + % (realm, str(e))) def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"): """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. @@ -1214,16 +1317,14 @@ class KeycloakAPI(object): """ protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=pid) try: - return json.loads(to_native(open_url(protocolmapper_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(protocolmapper_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg="Could not fetch protocolmapper %s in realm %s: %s" - % (pid, realm, str(e))) + self.fail_request(e, msg="Could not fetch protocolmapper %s in realm %s: %s" + % (pid, realm, str(e))) except Exception as e: self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" % (cid, realm, str(e))) @@ -1262,11 +1363,10 @@ class KeycloakAPI(object): """ protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(url=self.baseurl, id=cid, realm=realm) try: - return open_url(protocolmappers_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + return self._request(protocolmappers_url, method='POST', data=json.dumps(mapper_rep)) except Exception as e: - self.fail_open_url(e, msg="Could not create protocolmapper %s in realm %s: %s" - % (mapper_rep['name'], realm, str(e))) + self.fail_request(e, msg="Could not create protocolmapper %s in realm %s: %s" + % (mapper_rep['name'], realm, str(e))) def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"): """ Update an existing clientscope. @@ -1278,12 +1378,11 @@ class KeycloakAPI(object): protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep['id']) try: - return open_url(protocolmapper_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + return self._request(protocolmapper_url, method='PUT', data=json.dumps(mapper_rep)) except Exception as e: - self.fail_open_url(e, msg='Could not update protocolmappers for clientscope %s in realm %s: %s' - % (mapper_rep, realm, str(e))) + self.fail_request(e, msg='Could not update protocolmappers for clientscope %s in realm %s: %s' + % (mapper_rep, realm, str(e))) def get_default_clientscopes(self, realm, client_id=None): """Fetch the name and ID of all clientscopes on the Keycloak server. @@ -1326,18 +1425,16 @@ class KeycloakAPI(object): if client_id is None: clientscopes_url = url_template.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientscopes_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e))) + self.fail_request(e, msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e))) else: cid = self.get_client_id(client_id=client_id, realm=realm) clientscopes_url = url_template.format(url=self.baseurl, realm=realm, cid=cid) try: - return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientscopes_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url)) + self.fail_request(e, msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url)) def _decide_url_type_clientscope(self, client_id=None, scope_type="default"): """Decides which url to use. @@ -1403,12 +1500,11 @@ class KeycloakAPI(object): clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format(realm=realm, id=id, cid=cid, url=self.baseurl) try: method = 'PUT' if action == "add" else 'DELETE' - return open_url(clientscope_type_url, method=method, http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(clientscope_type_url, method=method) except Exception as e: place = 'realm' if client_id is None else 'client ' + client_id - self.fail_open_url(e, msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e))) + self.fail_request(e, msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e))) def create_clientsecret(self, id, realm="master"): """ Generate a new client secret by id @@ -1420,16 +1516,14 @@ class KeycloakAPI(object): clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id) try: - return json.loads(to_native(open_url(clientsecret_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientsecret_url, method='POST') except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg='Could not obtain clientsecret of client %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not obtain clientsecret of client %s for realm %s: %s' + % (id, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s' % (id, realm, str(e))) @@ -1444,16 +1538,14 @@ class KeycloakAPI(object): clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id) try: - return json.loads(to_native(open_url(clientsecret_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientsecret_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg='Could not obtain clientsecret of client %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not obtain clientsecret of client %s for realm %s: %s' + % (id, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s' % (id, realm, str(e))) @@ -1468,12 +1560,10 @@ class KeycloakAPI(object): """ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(groups_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(groups_url, method="GET") except Exception as e: - self.fail_open_url(e, msg="Could not fetch list of groups in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg="Could not fetch list of groups in realm %s: %s" + % (realm, str(e))) def get_group_by_groupid(self, gid, realm="master"): """ Fetch a keycloak group from the provided realm using the group's unique ID. @@ -1486,15 +1576,13 @@ class KeycloakAPI(object): """ groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid) try: - return json.loads(to_native(open_url(groups_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(groups_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg="Could not fetch group %s in realm %s: %s" - % (gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch group %s in realm %s: %s" + % (gid, realm, str(e))) except Exception as e: self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" % (gid, realm, str(e))) @@ -1507,10 +1595,8 @@ class KeycloakAPI(object): if parent['subGroupCount'] == 0: group_children = [] else: - group_children_url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id']) - group_children = json.loads(to_native(open_url(group_children_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + group_children_url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id']) + "?max=" + str(parent['subGroupCount']) + group_children = self._request_and_deserialize(group_children_url, method="GET") subgroups = group_children else: subgroups = parent['subGroups'] @@ -1528,7 +1614,6 @@ class KeycloakAPI(object): :param realm: Realm in which the group resides; default 'master' :param parents: Optional list of parents when group to look for is a subgroup """ - groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: if parents: parent = self.get_subgroup_direct_parent(parents, realm) @@ -1654,11 +1739,10 @@ class KeycloakAPI(object): """ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - return open_url(groups_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(grouprep), validate_certs=self.validate_certs) + return self._request(groups_url, method='POST', data=json.dumps(grouprep)) except Exception as e: - self.fail_open_url(e, msg="Could not create group %s in realm %s: %s" - % (grouprep['name'], realm, str(e))) + self.fail_request(e, msg="Could not create group %s in realm %s: %s" + % (grouprep['name'], realm, str(e))) def create_subgroup(self, parents, grouprep, realm="master"): """ Create a Keycloak subgroup. @@ -1682,11 +1766,10 @@ class KeycloakAPI(object): parent_id = parent_id["id"] url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent_id) - return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(grouprep), validate_certs=self.validate_certs) + return self._request(url, method='POST', data=json.dumps(grouprep)) except Exception as e: - self.fail_open_url(e, msg="Could not create subgroup %s for parent group %s in realm %s: %s" - % (grouprep['name'], parent_id, realm, str(e))) + self.fail_request(e, msg="Could not create subgroup %s for parent group %s in realm %s: %s" + % (grouprep['name'], parent_id, realm, str(e))) def update_group(self, grouprep, realm="master"): """ Update an existing group. @@ -1697,11 +1780,10 @@ class KeycloakAPI(object): group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id']) try: - return open_url(group_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(grouprep), validate_certs=self.validate_certs) + return self._request(group_url, method='PUT', data=json.dumps(grouprep)) except Exception as e: - self.fail_open_url(e, msg='Could not update group %s in realm %s: %s' - % (grouprep['name'], realm, str(e))) + self.fail_request(e, msg='Could not update group %s in realm %s: %s' + % (grouprep['name'], realm, str(e))) def delete_group(self, name=None, groupid=None, realm="master"): """ Delete a group. One of name or groupid must be provided. @@ -1734,10 +1816,9 @@ class KeycloakAPI(object): # should have a good groupid by here. group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl) try: - return open_url(group_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(group_url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg="Unable to delete group %s: %s" % (groupid, str(e))) + self.fail_request(e, msg="Unable to delete group %s: %s" % (groupid, str(e))) def get_realm_roles(self, realm='master'): """ Obtains role representations for roles in a realm @@ -1747,15 +1828,13 @@ class KeycloakAPI(object): """ rolelist_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(rolelist_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(rolelist_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for realm %s: %s' % (realm, str(e))) except Exception as e: - self.fail_open_url(e, msg='Could not obtain list of roles for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of roles for realm %s: %s' + % (realm, str(e))) def get_realm_role(self, name, realm='master'): """ Fetch a keycloak role from the provided realm using the role's name. @@ -1766,14 +1845,13 @@ class KeycloakAPI(object): """ role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe='')) try: - return json.loads(to_native(open_url(role_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(role_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg='Could not fetch role %s in realm %s: %s' - % (name, realm, str(e))) + self.fail_request(e, msg='Could not fetch role %s in realm %s: %s' + % (name, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' % (name, realm, str(e))) @@ -1789,11 +1867,10 @@ class KeycloakAPI(object): if "composites" in rolerep: keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"]) rolerep["composites"] = keycloak_compatible_composites - return open_url(roles_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + return self._request(roles_url, method='POST', data=json.dumps(rolerep)) except Exception as e: - self.fail_open_url(e, msg='Could not create role %s in realm %s: %s' - % (rolerep['name'], realm, str(e))) + self.fail_request(e, msg='Could not create role %s in realm %s: %s' + % (rolerep['name'], realm, str(e))) def update_realm_role(self, rolerep, realm='master'): """ Update an existing realm role. @@ -1807,14 +1884,13 @@ class KeycloakAPI(object): if "composites" in rolerep: composites = copy.deepcopy(rolerep["composites"]) del rolerep["composites"] - role_response = open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep)) if composites is not None: self.update_role_composites(rolerep=rolerep, composites=composites, realm=realm) return role_response except Exception as e: - self.fail_open_url(e, msg='Could not update role %s in realm %s: %s' - % (rolerep['name'], realm, str(e))) + self.fail_request(e, msg='Could not update role %s in realm %s: %s' + % (rolerep['name'], realm, str(e))) def get_role_composites(self, rolerep, clientid=None, realm='master'): composite_url = '' @@ -1826,16 +1902,10 @@ class KeycloakAPI(object): else: composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) # Get existing composites - return json.loads(to_native(open_url( - composite_url, - method='GET', - http_agent=self.http_agent, - headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(composite_url, method='GET') except Exception as e: - self.fail_open_url(e, msg='Could not get role %s composites in realm %s: %s' - % (rolerep['name'], realm, str(e))) + self.fail_request(e, msg='Could not get role %s composites in realm %s: %s' + % (rolerep['name'], realm, str(e))) def create_role_composites(self, rolerep, composites, clientid=None, realm='master'): composite_url = '' @@ -1848,11 +1918,10 @@ class KeycloakAPI(object): composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) # Get existing composites # create new composites - return open_url(composite_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(composites), validate_certs=self.validate_certs) + return self._request(composite_url, method='POST', data=json.dumps(composites)) except Exception as e: - self.fail_open_url(e, msg='Could not create role %s composites in realm %s: %s' - % (rolerep['name'], realm, str(e))) + self.fail_request(e, msg='Could not create role %s composites in realm %s: %s' + % (rolerep['name'], realm, str(e))) def delete_role_composites(self, rolerep, composites, clientid=None, realm='master'): composite_url = '' @@ -1865,11 +1934,10 @@ class KeycloakAPI(object): composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) # Get existing composites # create new composites - return open_url(composite_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(composites), validate_certs=self.validate_certs) + return self._request(composite_url, method='DELETE', data=json.dumps(composites)) except Exception as e: - self.fail_open_url(e, msg='Could not create role %s composites in realm %s: %s' - % (rolerep['name'], realm, str(e))) + self.fail_request(e, msg='Could not create role %s composites in realm %s: %s' + % (rolerep['name'], realm, str(e))) def update_role_composites(self, rolerep, composites, clientid=None, realm='master'): # Get existing composites @@ -1893,7 +1961,7 @@ class KeycloakAPI(object): and composite["name"] == existing_composite["name"]): composite_found = True break - if (not composite_found and ('state' not in composite or composite['state'] == 'present')): + if not composite_found and ('state' not in composite or composite['state'] == 'present'): if "client_id" in composite and composite['client_id'] is not None: client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm) for client_role in client_roles: @@ -1929,11 +1997,10 @@ class KeycloakAPI(object): """ role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe='')) try: - return open_url(role_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(role_url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Unable to delete role %s in realm %s: %s' - % (name, realm, str(e))) + self.fail_request(e, msg='Unable to delete role %s in realm %s: %s' + % (name, realm, str(e))) def get_client_roles(self, clientid, realm='master'): """ Obtains role representations for client roles in a specific client @@ -1948,15 +2015,13 @@ class KeycloakAPI(object): % (clientid, realm)) rolelist_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(rolelist_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(rolelist_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for client %s in realm %s: %s' % (clientid, realm, str(e))) except Exception as e: - self.fail_open_url(e, msg='Could not obtain list of roles for client %s in realm %s: %s' - % (clientid, realm, str(e))) + self.fail_request(e, msg='Could not obtain list of roles for client %s in realm %s: %s' + % (clientid, realm, str(e))) def get_client_role(self, name, clientid, realm='master'): """ Fetch a keycloak client role from the provided realm using the role's name. @@ -1973,14 +2038,13 @@ class KeycloakAPI(object): % (clientid, realm)) role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe='')) try: - return json.loads(to_native(open_url(role_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(role_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg='Could not fetch role %s in client %s of realm %s: %s' - % (name, clientid, realm, str(e))) + self.fail_request(e, msg='Could not fetch role %s in client %s of realm %s: %s' + % (name, clientid, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch role %s for client %s in realm %s: %s' % (name, clientid, realm, str(e))) @@ -2002,11 +2066,10 @@ class KeycloakAPI(object): if "composites" in rolerep: keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"]) rolerep["composites"] = keycloak_compatible_composites - return open_url(roles_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + return self._request(roles_url, method='POST', data=json.dumps(rolerep)) except Exception as e: - self.fail_open_url(e, msg='Could not create role %s for client %s in realm %s: %s' - % (rolerep['name'], clientid, realm, str(e))) + self.fail_request(e, msg='Could not create role %s for client %s in realm %s: %s' + % (rolerep['name'], clientid, realm, str(e))) def convert_role_composites(self, composites): keycloak_compatible_composites = { @@ -2041,14 +2104,13 @@ class KeycloakAPI(object): if "composites" in rolerep: composites = copy.deepcopy(rolerep["composites"]) del rolerep['composites'] - update_role_response = open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + update_role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep)) if composites is not None: self.update_role_composites(rolerep=rolerep, clientid=clientid, composites=composites, realm=realm) return update_role_response except Exception as e: - self.fail_open_url(e, msg='Could not update role %s for client %s in realm %s: %s' - % (rolerep['name'], clientid, realm, str(e))) + self.fail_request(e, msg='Could not update role %s for client %s in realm %s: %s' + % (rolerep['name'], clientid, realm, str(e))) def delete_client_role(self, name, clientid, realm="master"): """ Delete a role. One of name or roleid must be provided. @@ -2063,11 +2125,10 @@ class KeycloakAPI(object): % (clientid, realm)) role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe='')) try: - return open_url(role_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(role_url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Unable to delete role %s for client %s in realm %s: %s' - % (name, clientid, realm, str(e))) + self.fail_request(e, msg='Unable to delete role %s for client %s in realm %s: %s' + % (name, clientid, realm, str(e))) def get_authentication_flow_by_alias(self, alias, realm='master'): """ @@ -2079,16 +2140,14 @@ class KeycloakAPI(object): try: authentication_flow = {} # Check if the authentication flow exists on the Keycloak serveraders - authentications = json.load(open_url(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, validate_certs=self.validate_certs)) + authentications = json.load(self._request(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET')) for authentication in authentications: if authentication["alias"] == alias: authentication_flow = authentication break return authentication_flow except Exception as e: - self.fail_open_url(e, msg="Unable get authentication flow %s: %s" % (alias, str(e))) + self.fail_request(e, msg="Unable get authentication flow %s: %s" % (alias, str(e))) def delete_authentication_flow_by_id(self, id, realm='master'): """ @@ -2100,11 +2159,10 @@ class KeycloakAPI(object): flow_url = URL_AUTHENTICATION_FLOW.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(flow_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(flow_url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Could not delete authentication flow %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not delete authentication flow %s in realm %s: %s' + % (id, realm, str(e))) def copy_auth_flow(self, config, realm='master'): """ @@ -2117,31 +2175,25 @@ class KeycloakAPI(object): new_name = dict( newName=config["alias"] ) - open_url( + self._request( URL_AUTHENTICATION_FLOW_COPY.format( url=self.baseurl, realm=realm, copyfrom=quote(config["copyFrom"], safe='')), method='POST', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(new_name), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(new_name)) flow_list = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), - method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + method='GET')) for flow in flow_list: if flow["alias"] == config["alias"]: return flow return None except Exception as e: - self.fail_open_url(e, msg='Could not copy authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg='Could not copy authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) def create_empty_auth_flow(self, config, realm='master'): """ @@ -2157,31 +2209,25 @@ class KeycloakAPI(object): description=config["description"], topLevel=True ) - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format( url=self.baseurl, realm=realm), method='POST', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(new_flow), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(new_flow)) flow_list = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format( url=self.baseurl, realm=realm), - method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + method='GET')) for flow in flow_list: if flow["alias"] == config["alias"]: return flow return None except Exception as e: - self.fail_open_url(e, msg='Could not create empty authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg='Could not create empty authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) def update_authentication_executions(self, flowAlias, updatedExec, realm='master'): """ Update authentication executions @@ -2191,19 +2237,16 @@ class KeycloakAPI(object): :return: HTTPResponse object on success """ try: - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS.format( url=self.baseurl, realm=realm, flowalias=quote(flowAlias, safe='')), method='PUT', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(updatedExec), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(updatedExec)) except HTTPError as e: - self.fail_open_url(e, msg="Unable to update execution '%s': %s: %s %s" - % (flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec))) + self.fail_request(e, msg="Unable to update execution '%s': %s: %s %s" + % (flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec))) except Exception as e: self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e))) @@ -2215,18 +2258,32 @@ class KeycloakAPI(object): :return: HTTPResponse object on success """ try: - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_CONFIG.format( url=self.baseurl, realm=realm, id=executionId), method='POST', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(authenticationConfig), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(authenticationConfig)) except Exception as e: - self.fail_open_url(e, msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e))) + self.fail_request(e, msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e))) + + def delete_authentication_config(self, configId, realm='master'): + """ Delete authenticator config + + :param configId: id of authentication config + :param realm: realm of authentication config to be deleted + """ + try: + # Send a DELETE request to remove the specified authentication config from the Keycloak server. + self._request( + URL_AUTHENTICATION_CONFIG.format( + url=self.baseurl, + realm=realm, + id=configId), + method='DELETE') + except Exception as e: + self.fail_request(e, msg="Unable to delete authentication config %s: %s" % (configId, str(e))) def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic-flow'): """ Create new sublow on the flow @@ -2240,18 +2297,15 @@ class KeycloakAPI(object): newSubFlow["alias"] = subflowName newSubFlow["provider"] = "registration-page-form" newSubFlow["type"] = flowType - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format( url=self.baseurl, realm=realm, flowalias=quote(flowAlias, safe='')), method='POST', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(newSubFlow), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(newSubFlow)) except Exception as e: - self.fail_open_url(e, msg="Unable to create new subflow %s: %s" % (subflowName, str(e))) + self.fail_request(e, msg="Unable to create new subflow %s: %s" % (subflowName, str(e))) def create_execution(self, execution, flowAlias, realm='master'): """ Create new execution on the flow @@ -2264,19 +2318,16 @@ class KeycloakAPI(object): newExec = {} newExec["provider"] = execution["providerId"] newExec["requirement"] = execution["requirement"] - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format( url=self.baseurl, realm=realm, flowalias=quote(flowAlias, safe='')), method='POST', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(newExec), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(newExec)) except HTTPError as e: - self.fail_open_url(e, msg="Unable to create new execution '%s' %s: %s: %s %s" - % (flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec))) + self.fail_request(e, msg="Unable to create new execution '%s' %s: %s: %s %s" + % (flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec))) except Exception as e: self.module.fail_json(msg="Unable to create new execution '%s' %s: %s" % (flowAlias, execution["providerId"], repr(e))) @@ -2291,28 +2342,22 @@ class KeycloakAPI(object): try: if diff > 0: for i in range(diff): - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY.format( url=self.baseurl, realm=realm, id=executionId), - method='POST', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + method='POST') elif diff < 0: for i in range(-diff): - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY.format( url=self.baseurl, realm=realm, id=executionId), - method='POST', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + method='POST') except Exception as e: - self.fail_open_url(e, msg="Unable to change execution priority %s: %s" % (executionId, str(e))) + self.fail_request(e, msg="Unable to change execution priority %s: %s" % (executionId, str(e))) def get_executions_representation(self, config, realm='master'): """ @@ -2324,33 +2369,27 @@ class KeycloakAPI(object): try: # Get executions created executions = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS.format( url=self.baseurl, realm=realm, flowalias=quote(config["alias"], safe='')), - method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + method='GET')) for execution in executions: if "authenticationConfig" in execution: execConfigId = execution["authenticationConfig"] execConfig = json.load( - open_url( + self._request( URL_AUTHENTICATION_CONFIG.format( url=self.baseurl, realm=realm, id=execConfigId), - method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + method='GET')) execution["authenticationConfig"] = execConfig return executions except Exception as e: - self.fail_open_url(e, msg='Could not get executions for authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg='Could not get executions for authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) def get_required_actions(self, realm='master'): """ @@ -2361,15 +2400,12 @@ class KeycloakAPI(object): try: required_actions = json.load( - open_url( + self._request( URL_AUTHENTICATION_REQUIRED_ACTIONS.format( url=self.baseurl, realm=realm ), - method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs + method='GET' ) ) @@ -2391,19 +2427,16 @@ class KeycloakAPI(object): } try: - return open_url( + return self._request( URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION.format( url=self.baseurl, realm=realm ), method='POST', - http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(data), - timeout=self.connection_timeout, - validate_certs=self.validate_certs ) except Exception as e: - self.fail_open_url( + self.fail_request( e, msg='Unable to register required action %s in realm %s: %s' % (rep["name"], realm, str(e)) @@ -2419,20 +2452,17 @@ class KeycloakAPI(object): """ try: - return open_url( + return self._request( URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format( url=self.baseurl, alias=quote(alias, safe=''), realm=realm ), method='PUT', - http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(rep), - timeout=self.connection_timeout, - validate_certs=self.validate_certs ) except Exception as e: - self.fail_open_url( + self.fail_request( e, msg='Unable to update required action %s in realm %s: %s' % (alias, realm, str(e)) @@ -2447,19 +2477,16 @@ class KeycloakAPI(object): """ try: - return open_url( + return self._request( URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format( url=self.baseurl, alias=quote(alias, safe=''), realm=realm ), method='DELETE', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs ) except Exception as e: - self.fail_open_url( + self.fail_request( e, msg='Unable to delete required action %s in realm %s: %s' % (alias, realm, str(e)) @@ -2472,14 +2499,13 @@ class KeycloakAPI(object): """ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(idps_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(idps_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s' % (realm, str(e))) except Exception as e: - self.fail_open_url(e, msg='Could not obtain list of identity providers for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of identity providers for realm %s: %s' + % (realm, str(e))) def get_identity_provider(self, alias, realm='master'): """ Fetch identity provider representation from a realm using the idp's alias. @@ -2489,14 +2515,13 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) try: - return json.loads(to_native(open_url(idp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(idp_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg='Could not fetch identity provider %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg='Could not fetch identity provider %s in realm %s: %s' + % (alias, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' % (alias, realm, str(e))) @@ -2509,11 +2534,10 @@ class KeycloakAPI(object): """ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) try: - return open_url(idps_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(idprep), validate_certs=self.validate_certs) + return self._request(idps_url, method='POST', data=json.dumps(idprep)) except Exception as e: - self.fail_open_url(e, msg='Could not create identity provider %s in realm %s: %s' - % (idprep['alias'], realm, str(e))) + self.fail_request(e, msg='Could not create identity provider %s in realm %s: %s' + % (idprep['alias'], realm, str(e))) def update_identity_provider(self, idprep, realm='master'): """ Update an existing identity provider. @@ -2523,11 +2547,10 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias']) try: - return open_url(idp_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(idprep), validate_certs=self.validate_certs) + return self._request(idp_url, method='PUT', data=json.dumps(idprep)) except Exception as e: - self.fail_open_url(e, msg='Could not update identity provider %s in realm %s: %s' - % (idprep['alias'], realm, str(e))) + self.fail_request(e, msg='Could not update identity provider %s in realm %s: %s' + % (idprep['alias'], realm, str(e))) def delete_identity_provider(self, alias, realm='master'): """ Delete an identity provider. @@ -2536,11 +2559,10 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) try: - return open_url(idp_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(idp_url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Unable to delete identity provider %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg='Unable to delete identity provider %s in realm %s: %s' + % (alias, realm, str(e))) def get_identity_provider_mappers(self, alias, realm='master'): """ Fetch representations for identity provider mappers @@ -2550,15 +2572,13 @@ class KeycloakAPI(object): """ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) try: - return json.loads(to_native(open_url(mappers_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(mappers_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s' % (alias, realm, str(e))) except Exception as e: - self.fail_open_url(e, msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s' + % (alias, realm, str(e))) def get_identity_provider_mapper(self, mid, alias, realm='master'): """ Fetch identity provider representation from a realm using the idp's alias. @@ -2569,15 +2589,13 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) try: - return json.loads(to_native(open_url(mapper_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(mapper_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) + self.fail_request(e, msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' % (mid, alias, realm, str(e))) @@ -2591,11 +2609,10 @@ class KeycloakAPI(object): """ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) try: - return open_url(mappers_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper), validate_certs=self.validate_certs) + return self._request(mappers_url, method='POST', data=json.dumps(mapper)) except Exception as e: - self.fail_open_url(e, msg='Could not create identity provider mapper %s for idp %s in realm %s: %s' - % (mapper['name'], alias, realm, str(e))) + self.fail_request(e, msg='Could not create identity provider mapper %s for idp %s in realm %s: %s' + % (mapper['name'], alias, realm, str(e))) def update_identity_provider_mapper(self, mapper, alias, realm='master'): """ Update an existing identity provider. @@ -2606,11 +2623,10 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id']) try: - return open_url(mapper_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper), validate_certs=self.validate_certs) + return self._request(mapper_url, method='PUT', data=json.dumps(mapper)) except Exception as e: - self.fail_open_url(e, msg='Could not update mapper %s for identity provider %s in realm %s: %s' - % (mapper['id'], alias, realm, str(e))) + self.fail_request(e, msg='Could not update mapper %s for identity provider %s in realm %s: %s' + % (mapper['id'], alias, realm, str(e))) def delete_identity_provider_mapper(self, mid, alias, realm='master'): """ Delete an identity provider. @@ -2620,11 +2636,10 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) try: - return open_url(mapper_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(mapper_url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Unable to delete mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) + self.fail_request(e, msg='Unable to delete mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) def get_components(self, filter=None, realm='master'): """ Fetch representations for components in a realm @@ -2637,14 +2652,13 @@ class KeycloakAPI(object): comps_url += '?%s' % filter try: - return json.loads(to_native(open_url(comps_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(comps_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of components for realm %s: %s' % (realm, str(e))) except Exception as e: - self.fail_open_url(e, msg='Could not obtain list of components for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of components for realm %s: %s' + % (realm, str(e))) def get_component(self, cid, realm='master'): """ Fetch component representation from a realm using its cid. @@ -2654,14 +2668,13 @@ class KeycloakAPI(object): """ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(comp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(comp_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_open_url(e, msg='Could not fetch component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg='Could not fetch component %s in realm %s: %s' + % (cid, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch component %s in realm %s: %s' % (cid, realm, str(e))) @@ -2674,17 +2687,15 @@ class KeycloakAPI(object): """ comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm) try: - resp = open_url(comps_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(comprep), validate_certs=self.validate_certs) + resp = self._request(comps_url, method='POST', data=json.dumps(comprep)) comp_url = resp.getheader('Location') if comp_url is None: self.module.fail_json(msg='Could not create component in realm %s: %s' % (realm, 'unexpected response')) - return json.loads(to_native(open_url(comp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(comp_url, method="GET") except Exception as e: - self.fail_open_url(e, msg='Could not create component in realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not create component in realm %s: %s' + % (realm, str(e))) def update_component(self, comprep, realm='master'): """ Update an existing component. @@ -2697,11 +2708,10 @@ class KeycloakAPI(object): self.module.fail_json(msg='Cannot update component without id') comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return open_url(comp_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(comprep), validate_certs=self.validate_certs) + return self._request(comp_url, method='PUT', data=json.dumps(comprep)) except Exception as e: - self.fail_open_url(e, msg='Could not update component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg='Could not update component %s in realm %s: %s' + % (cid, realm, str(e))) def delete_component(self, cid, realm='master'): """ Delete an component. @@ -2710,20 +2720,17 @@ class KeycloakAPI(object): """ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return open_url(comp_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(comp_url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Unable to delete component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg='Unable to delete component %s in realm %s: %s' + % (cid, realm, str(e))) def get_authz_authorization_scope_by_name(self, name, client_id, realm): url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm) search_url = "%s/search?name=%s" % (url, quote(name, safe='')) try: - return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(search_url, method='GET') except Exception: return False @@ -2732,30 +2739,27 @@ class KeycloakAPI(object): url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm) try: - return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(payload), validate_certs=self.validate_certs) + return self._request(url, method='POST', data=json.dumps(payload)) except Exception as e: - self.fail_open_url(e, msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + self.fail_request(e, msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) def update_authz_authorization_scope(self, payload, id, client_id, realm): """Update an authorization scope for a Keycloak client""" url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) try: - return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(payload), validate_certs=self.validate_certs) + return self._request(url, method='PUT', data=json.dumps(payload)) except Exception as e: - self.fail_open_url(e, msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + self.fail_request(e, msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) def remove_authz_authorization_scope(self, id, client_id, realm): """Remove an authorization scope from a Keycloak client""" url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) try: - return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) + self.fail_request(e, msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) def get_user_by_id(self, user_id, realm='master'): """ @@ -2770,16 +2774,13 @@ class KeycloakAPI(object): realm=realm, id=user_id) userrep = json.load( - open_url( + self._request( user_url, - method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + method='GET')) return userrep except Exception as e: - self.fail_open_url(e, msg='Could not get user %s in realm %s: %s' - % (user_id, realm, str(e))) + self.fail_request(e, msg='Could not get user %s in realm %s: %s' + % (user_id, realm, str(e))) def create_user(self, userrep, realm='master'): """ @@ -2795,19 +2796,16 @@ class KeycloakAPI(object): users_url = URL_USERS.format( url=self.baseurl, realm=realm) - open_url(users_url, - method='POST', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(userrep), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + self._request(users_url, + method='POST', + data=json.dumps(userrep)) created_user = self.get_user_by_username( username=userrep['username'], realm=realm) return created_user except Exception as e: - self.fail_open_url(e, msg='Could not create user %s in realm %s: %s' - % (userrep['username'], realm, str(e))) + self.fail_request(e, msg='Could not create user %s in realm %s: %s' + % (userrep['username'], realm, str(e))) def convert_user_attributes_to_keycloak_dict(self, attributes): keycloak_user_attributes_dict = {} @@ -2840,20 +2838,17 @@ class KeycloakAPI(object): url=self.baseurl, realm=realm, id=userrep["id"]) - open_url( + self._request( user_url, method='PUT', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(userrep), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(userrep)) updated_user = self.get_user_by_id( user_id=userrep['id'], realm=realm) return updated_user except Exception as e: - self.fail_open_url(e, msg='Could not update user %s in realm %s: %s' - % (userrep['username'], realm, str(e))) + self.fail_request(e, msg='Could not update user %s in realm %s: %s' + % (userrep['username'], realm, str(e))) def delete_user(self, user_id, realm='master'): """ @@ -2867,44 +2862,42 @@ class KeycloakAPI(object): url=self.baseurl, realm=realm, id=user_id) - return open_url( + return self._request( user_url, - method='DELETE', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Could not delete user %s in realm %s: %s' - % (user_id, realm, str(e))) + self.fail_request(e, msg='Could not delete user %s in realm %s: %s' + % (user_id, realm, str(e))) def get_user_groups(self, user_id, realm='master'): """ - Get groups for a user. + Get the group names for a user. :param user_id: User ID :param realm: Realm - :return: Representation of the client groups. + :return: The client group names as a list of strings. + """ + user_groups = self.get_user_group_details(user_id, realm) + return [user_group['name'] for user_group in user_groups if 'name' in user_group] + + def get_user_group_details(self, user_id, realm='master'): + """ + Get the group details for a user. + :param user_id: User ID + :param realm: Realm + :return: The client group details as a list of dictionaries. """ try: - groups = [] - user_groups_url = URL_USER_GROUPS.format( - url=self.baseurl, - realm=realm, - id=user_id) - user_groups = json.load( - open_url( - user_groups_url, - method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) - for user_group in user_groups: - groups.append(user_group["name"]) - return groups + user_groups_url = URL_USER_GROUPS.format(url=self.baseurl, realm=realm, id=user_id) + return self._request_and_deserialize(user_groups_url, method='GET') except Exception as e: - self.fail_open_url(e, msg='Could not get groups for user %s in realm %s: %s' - % (user_id, realm, str(e))) + self.fail_request(e, msg='Could not get groups for user %s in realm %s: %s' + % (user_id, realm, str(e))) def add_user_in_group(self, user_id, group_id, realm='master'): + """DEPRECATED: Call add_user_to_group(...) instead. This method is scheduled for removal in community.general 13.0.0.""" + return self.add_user_to_group(user_id, group_id, realm) + + def add_user_to_group(self, user_id, group_id, realm='master'): """ Add a user to a group. :param user_id: User ID @@ -2918,15 +2911,12 @@ class KeycloakAPI(object): realm=realm, id=user_id, group_id=group_id) - return open_url( + return self._request( user_group_url, - method='PUT', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + method='PUT') except Exception as e: - self.fail_open_url(e, msg='Could not add user %s in group %s in realm %s: %s' - % (user_id, group_id, realm, str(e))) + self.fail_request(e, msg='Could not add user %s to group %s in realm %s: %s' + % (user_id, group_id, realm, str(e))) def remove_user_from_group(self, user_id, group_id, realm='master'): """ @@ -2942,15 +2932,12 @@ class KeycloakAPI(object): realm=realm, id=user_id, group_id=group_id) - return open_url( + return self._request( user_group_url, - method='DELETE', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Could not remove user %s from group %s in realm %s: %s' - % (user_id, group_id, realm, str(e))) + self.fail_request(e, msg='Could not remove user %s from group %s in realm %s: %s' + % (user_id, group_id, realm, str(e))) def update_user_groups_membership(self, userrep, groups, realm='master'): """ @@ -2959,49 +2946,72 @@ class KeycloakAPI(object): :param realm: Realm :return: True if group membership has been changed. False Otherwise. """ - changed = False try: - user_existing_groups = self.get_user_groups( - user_id=userrep['id'], - realm=realm) - groups_to_add_and_remove = self.extract_groups_to_add_to_and_remove_from_user(groups) - # If group membership need to be changed - if not is_struct_included(groups_to_add_and_remove['add'], user_existing_groups): - # Get available groups in the realm - realm_groups = self.get_groups(realm=realm) - for realm_group in realm_groups: - if "name" in realm_group and realm_group["name"] in groups_to_add_and_remove['add']: - self.add_user_in_group( - user_id=userrep["id"], - group_id=realm_group["id"], - realm=realm) - changed = True - elif "name" in realm_group and realm_group['name'] in groups_to_add_and_remove['remove']: - self.remove_user_from_group( - user_id=userrep['id'], - group_id=realm_group['id'], - realm=realm) - changed = True - return changed + groups_to_add, groups_to_remove = self.extract_groups_to_add_to_and_remove_from_user(groups) + if not groups_to_add and not groups_to_remove: + return False + + user_groups = self.get_user_group_details(user_id=userrep['id'], realm=realm) + user_group_names = [user_group['name'] for user_group in user_groups if 'name' in user_group] + user_group_paths = [user_group['path'] for user_group in user_groups if 'path' in user_group] + + groups_to_add = [group_to_add for group_to_add in groups_to_add + if group_to_add not in user_group_names and group_to_add not in user_group_paths] + groups_to_remove = [group_to_remove for group_to_remove in groups_to_remove + if group_to_remove in user_group_names or group_to_remove in user_group_paths] + if not groups_to_add and not groups_to_remove: + return False + + for group_to_add in groups_to_add: + realm_group = self.find_group_by_path(group_to_add, realm=realm) + if realm_group: + self.add_user_to_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm) + + for group_to_remove in groups_to_remove: + realm_group = self.find_group_by_path(group_to_remove, realm=realm) + if realm_group: + self.remove_user_from_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm) + + return True except Exception as e: self.module.fail_json(msg='Could not update group membership for user %s in realm %s: %s' - % (userrep['id]'], realm, str(e))) + % (userrep['username'], realm, e)) def extract_groups_to_add_to_and_remove_from_user(self, groups): - groups_extract = {} groups_to_add = [] groups_to_remove = [] - if isinstance(groups, list) and len(groups) > 0: + if isinstance(groups, list): for group in groups: group_name = group['name'] if isinstance(group, dict) and 'name' in group else group - if isinstance(group, dict) and ('state' not in group or group['state'] == 'present'): - groups_to_add.append(group_name) - else: - groups_to_remove.append(group_name) - groups_extract['add'] = groups_to_add - groups_extract['remove'] = groups_to_remove + if isinstance(group, dict): + if 'state' not in group or group['state'] == 'present': + groups_to_add.append(group_name) + else: + groups_to_remove.append(group_name) + return groups_to_add, groups_to_remove - return groups_extract + def find_group_by_path(self, target, realm='master'): + """ + Finds a realm group by path, e.g. '/my/group'. + The path is formed by prepending a '/' character to `target` unless it's already present. + This adds support for finding top level groups by name and subgroups by path. + """ + groups = self.get_groups(realm=realm) + path = target if target.startswith('/') else '/' + target + for segment in path.split('/'): + if not segment: + continue + abort = True + for group in groups: + if group['path'] == path: + return self.get_group_by_groupid(group['id'], realm=realm) + if group['name'] == segment: + groups = self.get_subgroups(group, realm=realm) + abort = False + break + if abort: + break + return None def convert_user_group_list_of_str_to_list_of_dict(self, groups): list_of_groups = [] @@ -3018,10 +3028,9 @@ class KeycloakAPI(object): url = URL_AUTHZ_CUSTOM_POLICY.format(url=self.baseurl, policy_type=policy_type, client_id=client_id, realm=realm) try: - return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(payload), validate_certs=self.validate_certs) + return self._request(url, method='POST', data=json.dumps(payload)) except Exception as e: - self.fail_open_url(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + self.fail_request(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) def remove_authz_custom_policy(self, policy_id, client_id, realm): """Remove a custom policy from a Keycloak client""" @@ -3029,10 +3038,9 @@ class KeycloakAPI(object): delete_url = "%s/%s" % (url, policy_id) try: - return open_url(delete_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(delete_url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Could not delete custom policy %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) + self.fail_request(e, msg='Could not delete custom policy %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) def get_authz_permission_by_name(self, name, client_id, realm): """Get authorization permission by name""" @@ -3040,9 +3048,7 @@ class KeycloakAPI(object): search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20')) try: - return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(search_url, method='GET') except Exception: return False @@ -3051,30 +3057,27 @@ class KeycloakAPI(object): url = URL_AUTHZ_PERMISSIONS.format(url=self.baseurl, permission_type=permission_type, client_id=client_id, realm=realm) try: - return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(payload), validate_certs=self.validate_certs) + return self._request(url, method='POST', data=json.dumps(payload)) except Exception as e: - self.fail_open_url(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + self.fail_request(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) def remove_authz_permission(self, id, client_id, realm): """Create an authorization permission for a Keycloak client""" url = URL_AUTHZ_POLICY.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) try: - return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(url, method='DELETE') except Exception as e: - self.fail_open_url(e, msg='Could not delete permission %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) + self.fail_request(e, msg='Could not delete permission %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) def update_authz_permission(self, payload, permission_type, id, client_id, realm): """Update a permission for a Keycloak client""" url = URL_AUTHZ_PERMISSION.format(url=self.baseurl, permission_type=permission_type, id=id, client_id=client_id, realm=realm) try: - return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(payload), validate_certs=self.validate_certs) + return self._request(url, method='PUT', data=json.dumps(payload)) except Exception as e: - self.fail_open_url(e, msg='Could not create update permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + self.fail_request(e, msg='Could not create update permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) def get_authz_resource_by_name(self, name, client_id, realm): """Get authorization resource by name""" @@ -3082,9 +3085,7 @@ class KeycloakAPI(object): search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20')) try: - return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(search_url, method='GET') except Exception: return False @@ -3094,9 +3095,7 @@ class KeycloakAPI(object): search_url = "%s/search?name=%s&permission=false" % (url, name.replace(' ', '%20')) try: - return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(search_url, method='GET') except Exception: return False @@ -3109,11 +3108,9 @@ class KeycloakAPI(object): """ client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) try: - return json.loads(to_native(open_url(client_role_scope_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_role_scope_url, method='GET') except Exception as e: - self.fail_open_url(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + self.fail_request(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) def update_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"): """ Update and fetch the roles associated with the client's scope on the Keycloak server. @@ -3125,11 +3122,10 @@ class KeycloakAPI(object): """ client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) try: - open_url(client_role_scope_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(payload), validate_certs=self.validate_certs) + self._request(client_role_scope_url, method='POST', data=json.dumps(payload)) except Exception as e: - self.fail_open_url(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + self.fail_request(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) @@ -3143,11 +3139,10 @@ class KeycloakAPI(object): """ client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) try: - open_url(client_role_scope_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(payload), validate_certs=self.validate_certs) + self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload)) except Exception as e: - self.fail_open_url(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + self.fail_request(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) @@ -3159,11 +3154,9 @@ class KeycloakAPI(object): """ client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) try: - return json.loads(to_native(open_url(client_role_scope_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_role_scope_url, method='GET') except Exception as e: - self.fail_open_url(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + self.fail_request(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) def update_client_role_scope_from_realm(self, payload, clientid, realm="master"): """ Update and fetch the realm roles from the client's scope on the Keycloak server. @@ -3174,11 +3167,10 @@ class KeycloakAPI(object): """ client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) try: - open_url(client_role_scope_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(payload), validate_certs=self.validate_certs) + self._request(client_role_scope_url, method='POST', data=json.dumps(payload)) except Exception as e: - self.fail_open_url(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + self.fail_request(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) return self.get_client_role_scope_from_realm(clientid, realm) @@ -3191,18 +3183,42 @@ class KeycloakAPI(object): """ client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) try: - open_url(client_role_scope_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(payload), validate_certs=self.validate_certs) + self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload)) except Exception as e: - self.fail_open_url(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + self.fail_request(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) return self.get_client_role_scope_from_realm(clientid, realm) - def fail_open_url(self, e, msg, **kwargs): + def fail_request(self, e, msg, **kwargs): + """ Triggers a module failure. This should be called + when an exception occurs during/after a request. + Attempts to parse the exception e as an HTTP error + and append it to msg. + + :param e: exception which triggered the failure + :param msg: error message to display to the user + :param kwargs: additional arguments to pass to module.fail_json + :return: None + """ try: if isinstance(e, HTTPError): msg = "%s: %s" % (msg, to_native(e.read())) - except Exception as ingore: + except Exception: pass self.module.fail_json(msg, **kwargs) + + def fail_open_url(self, e, msg, **kwargs): + """ DEPRECATED: Use fail_request instead. + + Triggers a module failure. This should be called + when an exception occurs during/after a request. + Attempts to parse the exception e as an HTTP error + and append it to msg. + + :param e: exception which triggered the failure + :param msg: error message to display to the user + :param kwargs: additional arguments to pass to module.fail_json + :return: None + """ + return self.fail_request(e, msg, **kwargs) diff --git a/plugins/module_utils/known_hosts.py b/plugins/module_utils/known_hosts.py index 25dd3e174e..9a17355b4e 100644 --- a/plugins/module_utils/known_hosts.py +++ b/plugins/module_utils/known_hosts.py @@ -103,13 +103,11 @@ def not_in_host_file(self, host): continue try: - host_fh = open(hf) + with open(hf) as host_fh: + data = host_fh.read() except IOError: hfiles_not_found += 1 continue - else: - data = host_fh.read() - host_fh.close() for line in data.split("\n"): if line is None or " " not in line: diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py index b10762eaba..cf054f59fd 100644 --- a/plugins/module_utils/mh/base.py +++ b/plugins/module_utils/mh/base.py @@ -15,6 +15,7 @@ from ansible_collections.community.general.plugins.module_utils.mh.deco import m class ModuleHelperBase(object): module = None ModuleHelperException = _MHE + # in 12.0.0 add 'debug' to the tuple _delegated_to_module = ( 'check_mode', 'get_bin_path', 'warn', 'deprecate', ) @@ -28,6 +29,18 @@ class ModuleHelperBase(object): if not isinstance(self.module, AnsibleModule): self.module = AnsibleModule(**self.module) + # in 12.0.0 remove this if statement entirely + if hasattr(self, 'debug'): + msg = ( + "This class ({cls}) has an attribute 'debug' defined and that is deprecated. " + "Method 'debug' will be an integral part of ModuleHelper in community.general " + "12.0.0, delegated to the underlying AnsibleModule object. " + "Please rename the existing attribute to prevent this message from showing.".format(cls=self.__class__.__name__) + ) + self.deprecate(msg, version="12.0.0", collection_name="community.general") + else: + self._delegated_to_module = self._delegated_to_module + ('debug',) + @property def diff_mode(self): return self.module._diff diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py deleted file mode 100644 index dd879ff4b2..0000000000 --- a/plugins/module_utils/mh/mixins/deps.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright (c) 2020, Ansible Project -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -class DependencyCtxMgr(object): - """ - DEPRECATION WARNING - - This class is deprecated and will be removed in community.general 11.0.0 - Modules should use plugins/module_utils/deps.py instead. - """ - def __init__(self, name, msg=None): - self.name = name - self.msg = msg - self.has_it = False - self.exc_type = None - self.exc_val = None - self.exc_tb = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.has_it = exc_type is None - self.exc_type = exc_type - self.exc_val = exc_val - self.exc_tb = exc_tb - return not self.has_it - - @property - def text(self): - return self.msg or str(self.exc_val) diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py deleted file mode 100644 index 7db9904f93..0000000000 --- a/plugins/module_utils/mh/mixins/vars.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright (c) 2020, Ansible Project -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import copy - - -class VarMeta(object): - """ - DEPRECATION WARNING - - This class is deprecated and will be removed in community.general 11.0.0 - Modules should use the VarDict from plugins/module_utils/vardict.py instead. - """ - - NOTHING = object() - - def __init__(self, diff=False, output=True, change=None, fact=False): - self.init = False - self.initial_value = None - self.value = None - - self.diff = diff - self.change = diff if change is None else change - self.output = output - self.fact = fact - - def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING): - if diff is not None: - self.diff = diff - if output is not None: - self.output = output - if change is not None: - self.change = change - if fact is not None: - self.fact = fact - if initial_value is not self.NOTHING: - self.initial_value = copy.deepcopy(initial_value) - - def set_value(self, value): - if not self.init: - self.initial_value = copy.deepcopy(value) - self.init = True - self.value = value - return self - - @property - def has_changed(self): - return self.change and (self.initial_value != self.value) - - @property - def diff_result(self): - return None if not (self.diff and self.has_changed) else { - 'before': self.initial_value, - 'after': self.value, - } - - def __str__(self): - return "".format( - self.value, self.initial_value, self.diff, self.output, self.change - ) - - -class VarDict(object): - """ - DEPRECATION WARNING - - This class is deprecated and will be removed in community.general 11.0.0 - Modules should use the VarDict from plugins/module_utils/vardict.py instead. - """ - def __init__(self): - self._data = dict() - self._meta = dict() - - def __getitem__(self, item): - return self._data[item] - - def __setitem__(self, key, value): - self.set(key, value) - - def __getattr__(self, item): - try: - return self._data[item] - except KeyError: - return getattr(self._data, item) - - def __setattr__(self, key, value): - if key in ('_data', '_meta'): - super(VarDict, self).__setattr__(key, value) - else: - self.set(key, value) - - def meta(self, name): - return self._meta[name] - - def set_meta(self, name, **kwargs): - self.meta(name).set(**kwargs) - - def set(self, name, value, **kwargs): - if name in ('_data', '_meta'): - raise ValueError("Names _data and _meta are reserved for use by ModuleHelper") - self._data[name] = value - if name in self._meta: - meta = self.meta(name) - else: - meta = VarMeta(**kwargs) - meta.set_value(value) - self._meta[name] = meta - - def output(self): - return {k: v for k, v in self._data.items() if self.meta(k).output} - - def diff(self): - diff_results = [(k, self.meta(k).diff_result) for k in self._data] - diff_results = [dr for dr in diff_results if dr[1] is not None] - if diff_results: - before = dict((dr[0], dr[1]['before']) for dr in diff_results) - after = dict((dr[0], dr[1]['after']) for dr in diff_results) - return {'before': before, 'after': after} - return None - - def facts(self): - facts_result = {k: v for k, v in self._data.items() if self._meta[k].fact} - return facts_result if facts_result else None - - def change_vars(self): - return [v for v in self._data if self.meta(v).change] - - def has_changed(self, v): - return self._meta[v].has_changed - - -class VarsMixin(object): - """ - DEPRECATION WARNING - - This class is deprecated and will be removed in community.general 11.0.0 - Modules should use the VarDict from plugins/module_utils/vardict.py instead. - """ - def __init__(self, module=None): - self.vars = VarDict() - super(VarsMixin, self).__init__(module) - - def update_vars(self, meta=None, **kwargs): - if meta is None: - meta = {} - for k, v in kwargs.items(): - self.vars.set(k, v, **meta) diff --git a/plugins/module_utils/mh/module_helper.py b/plugins/module_utils/mh/module_helper.py index ca95199d9b..f0e2ad6e96 100644 --- a/plugins/module_utils/mh/module_helper.py +++ b/plugins/module_utils/mh/module_helper.py @@ -10,13 +10,9 @@ __metaclass__ = type from ansible.module_utils.common.dict_transformations import dict_merge -from ansible_collections.community.general.plugins.module_utils.vardict import VarDict as _NewVarDict # remove "as NewVarDict" in 11.0.0 -# (TODO: remove AnsibleModule!) pylint: disable-next=unused-import -from ansible_collections.community.general.plugins.module_utils.mh.base import AnsibleModule # noqa: F401 DEPRECATED, remove in 11.0.0 +from ansible_collections.community.general.plugins.module_utils.vardict import VarDict from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin -# (TODO: remove mh.mixins.vars!) pylint: disable-next=unused-import -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _OldVarDict # noqa: F401 remove in 11.0.0 from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin @@ -26,24 +22,11 @@ class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase): diff_params = () change_params = () facts_params = () - use_old_vardict = True # remove in 11.0.0 - mute_vardict_deprecation = False def __init__(self, module=None): - if self.use_old_vardict: # remove first half of the if in 11.0.0 - self.vars = _OldVarDict() - super(ModuleHelper, self).__init__(module) - if not self.mute_vardict_deprecation: - self.module.deprecate( - "This class is using the old VarDict from ModuleHelper, which is deprecated. " - "Set the class variable use_old_vardict to False and make the necessary adjustments." - "The old VarDict class will be removed in community.general 11.0.0", - version="11.0.0", collection_name="community.general" - ) - else: - self.vars = _NewVarDict() - super(ModuleHelper, self).__init__(module) + super(ModuleHelper, self).__init__(module) + self.vars = VarDict() for name, value in self.module.params.items(): self.vars.set( name, value, @@ -66,9 +49,6 @@ class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase): self.update_vars(meta={"fact": True}, **kwargs) def _vars_changed(self): - if self.use_old_vardict: - return any(self.vars.has_changed(v) for v in self.vars.change_vars()) - return self.vars.has_changed def has_changed(self): diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index 366699329a..f70ae3515d 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -11,12 +11,8 @@ __metaclass__ = type from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( ModuleHelper, StateModuleHelper, - AnsibleModule # remove in 11.0.0 ) -from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401 remove in 11.0.0 -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr # noqa: F401 remove in 11.0.0 from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401 from ansible_collections.community.general.plugins.module_utils.mh.deco import ( cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns, ) -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict, VarsMixin # noqa: F401 remove in 11.0.0 diff --git a/plugins/module_utils/pacemaker.py b/plugins/module_utils/pacemaker.py new file mode 100644 index 0000000000..f0f54cce9d --- /dev/null +++ b/plugins/module_utils/pacemaker.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Dexter Le +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +_state_map = { + "present": "create", + "absent": "remove", + "status": "status", + "enabled": "enable", + "disabled": "disable", + "online": "start", + "offline": "stop", + "maintenance": "set", + "config": "config", + "cleanup": "cleanup", +} + + +def fmt_resource_type(value): + return [value[k] for k in ['resource_standard', 'resource_provider', 'resource_name'] if value.get(k) is not None] + + +def fmt_resource_operation(value): + cmd = [] + for op in value: + cmd.append("op") + cmd.append(op.get('operation_action')) + for operation_option in op.get('operation_option'): + cmd.append(operation_option) + + return cmd + + +def fmt_resource_argument(value): + return ['--group' if value['argument_action'] == 'group' else value['argument_action']] + value['argument_option'] + + +def get_pacemaker_maintenance_mode(runner): + with runner("cli_action config") as ctx: + rc, out, err = ctx.run(cli_action="property") + maintenance_mode_output = list(filter(lambda string: "maintenance-mode=true" in string.lower(), out.splitlines())) + return bool(maintenance_mode_output) + + +def pacemaker_runner(module, **kwargs): + runner_command = ['pcs'] + runner = CmdRunner( + module, + command=runner_command, + arg_formats=dict( + cli_action=cmd_runner_fmt.as_list(), + state=cmd_runner_fmt.as_map(_state_map), + name=cmd_runner_fmt.as_list(), + resource_type=cmd_runner_fmt.as_func(fmt_resource_type), + resource_option=cmd_runner_fmt.as_list(), + resource_operation=cmd_runner_fmt.as_func(fmt_resource_operation), + resource_meta=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("meta"), + resource_argument=cmd_runner_fmt.as_func(fmt_resource_argument), + apply_all=cmd_runner_fmt.as_bool("--all"), + wait=cmd_runner_fmt.as_opt_eq_val("--wait"), + config=cmd_runner_fmt.as_fixed("config"), + force=cmd_runner_fmt.as_bool("--force"), + ), + **kwargs + ) + return runner diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py index de43f80b40..bb37712c21 100644 --- a/plugins/module_utils/pipx.py +++ b/plugins/module_utils/pipx.py @@ -71,36 +71,51 @@ def pipx_runner(module, command, **kwargs): return runner -def make_process_list(mod_helper, **kwargs): - def process_list(rc, out, err): - if not out: - return [] +def _make_entry(venv_name, venv, include_injected, include_deps): + entry = { + 'name': venv_name, + 'version': venv['metadata']['main_package']['package_version'], + 'pinned': venv['metadata']['main_package'].get('pinned'), + } + if include_injected: + entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()} + if include_deps: + entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies']) + return entry - results = [] + +def make_process_dict(include_injected, include_deps=False): + def process_dict(rc, out, err): + if not out: + return {} + + results = {} raw_data = json.loads(out) + for venv_name, venv in raw_data['venvs'].items(): + results[venv_name] = _make_entry(venv_name, venv, include_injected, include_deps) + + return results, raw_data + + return process_dict + + +def make_process_list(mod_helper, **kwargs): + # + # ATTENTION! + # + # The function `make_process_list()` is deprecated and will be removed in community.general 13.0.0 + # + process_dict = make_process_dict(mod_helper, **kwargs) + + def process_list(rc, out, err): + res_dict, raw_data = process_dict(rc, out, err) + if kwargs.get("include_raw"): mod_helper.vars.raw_output = raw_data - if kwargs["name"]: - if kwargs["name"] in raw_data['venvs']: - data = {kwargs["name"]: raw_data['venvs'][kwargs["name"]]} - else: - data = {} - else: - data = raw_data['venvs'] - - for venv_name, venv in data.items(): - entry = { - 'name': venv_name, - 'version': venv['metadata']['main_package']['package_version'], - 'pinned': venv['metadata']['main_package'].get('pinned'), - } - if kwargs.get("include_injected"): - entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()} - if kwargs.get("include_deps"): - entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies']) - results.append(entry) - - return results - + return [ + entry + for name, entry in res_dict.items() + if name == kwargs.get("name") + ] return process_list diff --git a/plugins/module_utils/pkg_req.py b/plugins/module_utils/pkg_req.py new file mode 100644 index 0000000000..8e82ffd360 --- /dev/null +++ b/plugins/module_utils/pkg_req.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.six import raise_from + +from ansible_collections.community.general.plugins.module_utils import deps + + +with deps.declare("packaging"): + from packaging.requirements import Requirement + from packaging.version import parse as parse_version, InvalidVersion + + +class PackageRequirement: + def __init__(self, module, name): + self.module = module + self.parsed_name, self.requirement = self._parse_spec(name) + + def _parse_spec(self, name): + """ + Parse a package name that may include version specifiers using PEP 508. + Returns a tuple of (name, requirement) where requirement is of type packaging.requirements.Requirement and it may be None. + + Example inputs: + "package" + "package>=1.0" + "package>=1.0,<2.0" + "package[extra]>=1.0" + "package[foo,bar]>=1.0,!=1.5" + + :param name: Package name with optional version specifiers and extras + :return: Tuple of (name, requirement) + :raises ValueError: If the package specification is invalid + """ + if not name: + return name, None + + # Quick check for simple package names + if not any(c in name for c in '>= -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import traceback -from time import sleep - -PROXMOXER_IMP_ERR = None -try: - from proxmoxer import ProxmoxAPI - from proxmoxer import __version__ as proxmoxer_version - HAS_PROXMOXER = True -except ImportError: - HAS_PROXMOXER = False - PROXMOXER_IMP_ERR = traceback.format_exc() - - -from ansible.module_utils.basic import env_fallback, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - - -def proxmox_auth_argument_spec(): - return dict( - api_host=dict(type='str', - required=True, - fallback=(env_fallback, ['PROXMOX_HOST']) - ), - api_port=dict(type='int', - fallback=(env_fallback, ['PROXMOX_PORT']) - ), - api_user=dict(type='str', - required=True, - fallback=(env_fallback, ['PROXMOX_USER']) - ), - api_password=dict(type='str', - no_log=True, - fallback=(env_fallback, ['PROXMOX_PASSWORD']) - ), - api_token_id=dict(type='str', - no_log=False - ), - api_token_secret=dict(type='str', - no_log=True - ), - validate_certs=dict(type='bool', - default=False - ), - ) - - -def proxmox_to_ansible_bool(value): - '''Convert Proxmox representation of a boolean to be ansible-friendly''' - return True if value == 1 else False - - -def ansible_to_proxmox_bool(value): - '''Convert Ansible representation of a boolean to be proxmox-friendly''' - if value is None: - return None - - if not isinstance(value, bool): - raise ValueError("%s must be of type bool not %s" % (value, type(value))) - - return 1 if value else 0 - - -class ProxmoxAnsible(object): - """Base class for Proxmox modules""" - TASK_TIMED_OUT = 'timeout expired' - - def __init__(self, module): - if not HAS_PROXMOXER: - module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR) - - self.module = module - self.proxmoxer_version = proxmoxer_version - self.proxmox_api = self._connect() - # Test token validity - try: - self.proxmox_api.version.get() - except Exception as e: - module.fail_json(msg='%s' % e, exception=traceback.format_exc()) - - def _connect(self): - api_host = self.module.params['api_host'] - api_port = self.module.params['api_port'] - api_user = self.module.params['api_user'] - api_password = self.module.params['api_password'] - api_token_id = self.module.params['api_token_id'] - api_token_secret = self.module.params['api_token_secret'] - validate_certs = self.module.params['validate_certs'] - - auth_args = {'user': api_user} - - if api_port: - auth_args['port'] = api_port - - if api_password: - auth_args['password'] = api_password - else: - if self.proxmoxer_version < LooseVersion('1.1.0'): - self.module.fail_json('Using "token_name" and "token_value" require proxmoxer>=1.1.0') - auth_args['token_name'] = api_token_id - auth_args['token_value'] = api_token_secret - - try: - return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) - except Exception as e: - self.module.fail_json(msg='%s' % e, exception=traceback.format_exc()) - - def version(self): - try: - apiversion = self.proxmox_api.version.get() - return LooseVersion(apiversion['version']) - except Exception as e: - self.module.fail_json(msg='Unable to retrieve Proxmox VE version: %s' % e) - - def get_node(self, node): - try: - nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node] - except Exception as e: - self.module.fail_json(msg='Unable to retrieve Proxmox VE node: %s' % e) - return nodes[0] if nodes else None - - def get_nextvmid(self): - try: - return self.proxmox_api.cluster.nextid.get() - except Exception as e: - self.module.fail_json(msg='Unable to retrieve next free vmid: %s' % e) - - def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False): - try: - vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name] - except Exception as e: - self.module.fail_json(msg='Unable to retrieve list of VMs filtered by name %s: %s' % (name, e)) - - if not vms: - if ignore_missing: - return None - - self.module.fail_json(msg='No VM with name %s found' % name) - elif len(vms) > 1: - self.module.fail_json(msg='Multiple VMs with name %s found, provide vmid instead' % name) - - return vms[0] - - def get_vm(self, vmid, ignore_missing=False): - try: - vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] - except Exception as e: - self.module.fail_json(msg='Unable to retrieve list of VMs filtered by vmid %s: %s' % (vmid, e)) - - if vms: - return vms[0] - else: - if ignore_missing: - return None - - self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid) - - def api_task_ok(self, node, taskid): - try: - status = self.proxmox_api.nodes(node).tasks(taskid).status.get() - return status['status'] == 'stopped' and status['exitstatus'] == 'OK' - except Exception as e: - self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node, e)) - - def api_task_complete(self, node_name, task_id, timeout): - """Wait until the task stops or times out. - - :param node_name: Proxmox node name where the task is running. - :param task_id: ID of the running task. - :param timeout: Timeout in seconds to wait for the task to complete. - :return: Task completion status (True/False) and ``exitstatus`` message when status=False. - """ - status = {} - while timeout: - try: - status = self.proxmox_api.nodes(node_name).tasks(task_id).status.get() - except Exception as e: - self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node_name, e)) - - if status['status'] == 'stopped': - if status['exitstatus'] == 'OK': - return True, None - else: - return False, status['exitstatus'] - else: - timeout -= 1 - if timeout <= 0: - return False, ProxmoxAnsible.TASK_TIMED_OUT - sleep(1) - - def get_pool(self, poolid): - """Retrieve pool information - - :param poolid: str - name of the pool - :return: dict - pool information - """ - try: - return self.proxmox_api.pools(poolid).get() - except Exception as e: - self.module.fail_json(msg="Unable to retrieve pool %s information: %s" % (poolid, e)) - - def get_storages(self, type): - """Retrieve storages information - - :param type: str, optional - type of storages - :return: list of dicts - array of storages - """ - try: - return self.proxmox_api.storage.get(type=type) - except Exception as e: - self.module.fail_json(msg="Unable to retrieve storages information with type %s: %s" % (type, e)) - - def get_storage_content(self, node, storage, content=None, vmid=None): - try: - return ( - self.proxmox_api.nodes(node) - .storage(storage) - .content() - .get(content=content, vmid=vmid) - ) - except Exception as e: - self.module.fail_json( - msg="Unable to list content on %s, %s for %s and %s: %s" - % (node, storage, content, vmid, e) - ) diff --git a/plugins/module_utils/puppet.py b/plugins/module_utils/puppet.py index e06683b3ee..251d5618d3 100644 --- a/plugins/module_utils/puppet.py +++ b/plugins/module_utils/puppet.py @@ -95,10 +95,7 @@ def puppet_runner(module): skip_tags=cmd_runner_fmt.as_func(lambda v: ["--skip_tags", ",".join(v)]), certname=cmd_runner_fmt.as_opt_eq_val("--certname"), noop=cmd_runner_fmt.as_func(noop_func), - use_srv_records=cmd_runner_fmt.as_map({ - True: "--usr_srv_records", - False: "--no-usr_srv_records", - }), + use_srv_records=cmd_runner_fmt.as_bool("--usr_srv_records", "--no-usr_srv_records", ignore_none=True), logdest=cmd_runner_fmt.as_map(_logdest_map, default=[]), modulepath=cmd_runner_fmt.as_opt_eq_val("--modulepath"), _execute=cmd_runner_fmt.as_func(execute_func), diff --git a/plugins/module_utils/python_runner.py b/plugins/module_utils/python_runner.py index b65867c61e..a8e9e651be 100644 --- a/plugins/module_utils/python_runner.py +++ b/plugins/module_utils/python_runner.py @@ -19,7 +19,7 @@ class PythonRunner(CmdRunner): self.venv = venv self.has_venv = venv is not None - if (os.path.isabs(python) or '/' in python): + if os.path.isabs(python) or '/' in python: self.python = python elif self.has_venv: if path_prefix is None: diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index f9649ce6d7..bc93f0e498 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -10,9 +10,7 @@ import json import os import random import string -import gzip import time -from io import BytesIO from ansible.module_utils.urls import open_url from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.text.converters import to_text @@ -21,8 +19,6 @@ from ansible.module_utils.six import text_type from ansible.module_utils.six.moves import http_client from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible.module_utils.ansible_release import __version__ as ansible_version -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', @@ -38,6 +34,21 @@ FAIL_MSG = 'Issuing a data modification command without specifying the '\ 'than one %(resource)s is no longer allowed. Use the `resource_id` '\ 'option to specify the target %(resource)s ID.' +# Use together with the community.general.redfish docs fragment +REDFISH_COMMON_ARGUMENT_SPEC = { + "validate_certs": { + "type": "bool", + "default": False, + }, + "ca_path": { + "type": "path", + }, + "ciphers": { + "type": "list", + "elements": "str", + }, +} + class RedfishUtils(object): @@ -53,8 +64,10 @@ class RedfishUtils(object): self.resource_id = resource_id self.data_modification = data_modification self.strip_etag_quotes = strip_etag_quotes - self.ciphers = ciphers + self.ciphers = ciphers if ciphers is not None else module.params.get("ciphers") self._vendor = None + self.validate_certs = module.params.get("validate_certs", False) + self.ca_path = module.params.get("ca_path") def _auth_params(self, headers): """ @@ -132,6 +145,17 @@ class RedfishUtils(object): resp['msg'] = 'Properties in %s are already set' % uri return resp + def _request(self, uri, **kwargs): + kwargs.setdefault("validate_certs", self.validate_certs) + kwargs.setdefault("follow_redirects", "all") + kwargs.setdefault("use_proxy", True) + kwargs.setdefault("timeout", self.timeout) + kwargs.setdefault("ciphers", self.ciphers) + kwargs.setdefault("ca_path", self.ca_path) + resp = open_url(uri, **kwargs) + headers = {k.lower(): v for (k, v) in resp.info().items()} + return resp, headers + # The following functions are to send GET/POST/PATCH/DELETE requests def get_request(self, uri, override_headers=None, allow_no_resp=False, timeout=None): req_headers = dict(GET_HEADERS) @@ -145,19 +169,17 @@ class RedfishUtils(object): # in case the caller will be using sessions later. if uri == (self.root_uri + self.service_root): basic_auth = False - resp = open_url(uri, method="GET", headers=req_headers, - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=timeout, ciphers=self.ciphers) - headers = {k.lower(): v for (k, v) in resp.info().items()} + resp, headers = self._request( + uri, + method="GET", + headers=req_headers, + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + timeout=timeout, + ) try: - if headers.get('content-encoding') == 'gzip' and LooseVersion(ansible_version) < LooseVersion('2.14'): - # Older versions of Ansible do not automatically decompress the data - # Starting in 2.14, open_url will decompress the response data by default - data = json.loads(to_native(gzip.open(BytesIO(resp.read()), 'rt', encoding='utf-8').read())) - else: - data = json.loads(to_native(resp.read())) + data = json.loads(to_native(resp.read())) except Exception as e: # No response data; this is okay in certain cases data = None @@ -194,18 +216,20 @@ class RedfishUtils(object): req_headers['content-type'] = multipart_encoder[1] else: data = json.dumps(pyld) - resp = open_url(uri, data=data, - headers=req_headers, method="POST", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) + resp, headers = self._request( + uri, + data=data, + headers=req_headers, + method="POST", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) try: data = json.loads(to_native(resp.read())) except Exception as e: # No response data; this is okay in many cases data = None - headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: msg, data = self._get_extended_message(e) return {'ret': False, @@ -248,12 +272,15 @@ class RedfishUtils(object): username, password, basic_auth = self._auth_params(req_headers) try: - resp = open_url(uri, data=json.dumps(pyld), - headers=req_headers, method="PATCH", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) + resp, dummy = self._request( + uri, + data=json.dumps(pyld), + headers=req_headers, + method="PATCH", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) except HTTPError as e: msg, data = self._get_extended_message(e) return {'ret': False, 'changed': False, @@ -283,12 +310,15 @@ class RedfishUtils(object): req_headers['If-Match'] = etag username, password, basic_auth = self._auth_params(req_headers) try: - resp = open_url(uri, data=json.dumps(pyld), - headers=req_headers, method="PUT", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) + resp, dummy = self._request( + uri, + data=json.dumps(pyld), + headers=req_headers, + method="PUT", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) except HTTPError as e: msg, data = self._get_extended_message(e) return {'ret': False, @@ -309,12 +339,15 @@ class RedfishUtils(object): username, password, basic_auth = self._auth_params(req_headers) try: data = json.dumps(pyld) if pyld else None - resp = open_url(uri, data=data, - headers=req_headers, method="DELETE", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout, ciphers=self.ciphers) + resp, dummy = self._request( + uri, + data=data, + headers=req_headers, + method="DELETE", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) except HTTPError as e: msg, data = self._get_extended_message(e) return {'ret': False, @@ -409,9 +442,6 @@ class RedfishUtils(object): pass return msg, data - def _init_session(self): - self.module.deprecate("Method _init_session is deprecated and will be removed.", version="11.0.0", collection_name="community.general") - def _get_vendor(self): # If we got the vendor info once, don't get it again if self._vendor is not None: @@ -1119,7 +1149,8 @@ class RedfishUtils(object): key = "Actions" reset_type_values = ['On', 'ForceOff', 'GracefulShutdown', 'GracefulRestart', 'ForceRestart', 'Nmi', - 'ForceOn', 'PushPowerButton', 'PowerCycle'] + 'ForceOn', 'PushPowerButton', 'PowerCycle', + 'FullPowerCycle'] # command should be PowerOn, PowerForceOff, etc. if not command.startswith('Power'): @@ -3951,3 +3982,38 @@ class RedfishUtils(object): "rsp_uri": rsp_uri } return res + + def get_accountservice_properties(self): + # Find the AccountService resource + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return response + data = response['data'] + accountservice_uri = data.get("AccountService", {}).get("@odata.id") + if accountservice_uri is None: + return {'ret': False, 'msg': "AccountService resource not found"} + + response = self.get_request(self.root_uri + accountservice_uri) + if response['ret'] is False: + return response + return { + 'ret': True, + 'entries': response['data'] + } + + def get_power_restore_policy(self, systems_uri): + # Retrieve System resource + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + return { + 'ret': True, + 'entries': response['data']['PowerRestorePolicy'] + } + + def get_multi_power_restore_policy(self): + return self.aggregate_systems(self.get_power_restore_policy) + + def set_power_restore_policy(self, policy): + body = {'PowerRestorePolicy': policy} + return self.patch_request(self.root_uri + self.systems_uri, body, check_pyld=True) diff --git a/plugins/module_utils/snap.py b/plugins/module_utils/snap.py index 253269b9a9..e55a3a13a5 100644 --- a/plugins/module_utils/snap.py +++ b/plugins/module_utils/snap.py @@ -41,8 +41,15 @@ def snap_runner(module, **kwargs): options=cmd_runner_fmt.as_list(), info=cmd_runner_fmt.as_fixed("info"), dangerous=cmd_runner_fmt.as_bool("--dangerous"), + version=cmd_runner_fmt.as_fixed("version"), ), check_rc=False, **kwargs ) return runner + + +def get_version(runner): + with runner("version") as ctx: + rc, out, err = ctx.run() + return dict(x.split() for x in out.splitlines() if len(x.split()) == 2) diff --git a/plugins/module_utils/systemd.py b/plugins/module_utils/systemd.py new file mode 100644 index 0000000000..5d74118d12 --- /dev/null +++ b/plugins/module_utils/systemd.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Marco Noce +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def systemd_runner(module, command, **kwargs): + arg_formats = dict( + version=cmd_runner_fmt.as_fixed("--version"), + list_units=cmd_runner_fmt.as_fixed(["list-units", "--no-pager"]), + types=cmd_runner_fmt.as_func(lambda v: [] if not v else ["--type", ",".join(v)]), + all=cmd_runner_fmt.as_fixed("--all"), + plain=cmd_runner_fmt.as_fixed("--plain"), + no_legend=cmd_runner_fmt.as_fixed("--no-legend"), + show=cmd_runner_fmt.as_fixed("show"), + props=cmd_runner_fmt.as_func(lambda v: [] if not v else ["-p", ",".join(v)]), + dashdash=cmd_runner_fmt.as_fixed("--"), + unit=cmd_runner_fmt.as_list(), + ) + + runner = CmdRunner( + module, + command=command, + arg_formats=arg_formats, + check_rc=True, + **kwargs + ) + return runner diff --git a/plugins/module_utils/xdg_mime.py b/plugins/module_utils/xdg_mime.py new file mode 100644 index 0000000000..f84b9ef7ea --- /dev/null +++ b/plugins/module_utils/xdg_mime.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Marcos Alano +# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def xdg_mime_runner(module, **kwargs): + return CmdRunner( + module, + command=['xdg-mime'], + arg_formats=dict( + default=cmd_runner_fmt.as_fixed('default'), + query=cmd_runner_fmt.as_fixed('query'), + mime_types=cmd_runner_fmt.as_list(), + handler=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), + ), + **kwargs + ) + + +def xdg_mime_get(runner, mime_type): + def process(rc, out, err): + if not out.strip(): + return None + out = out.splitlines()[0] + return out.split()[-1] + + with runner("query default mime_types", output_process=process) as ctx: + return ctx.run(mime_types=mime_type) diff --git a/plugins/modules/aerospike_migrations.py b/plugins/modules/aerospike_migrations.py index 69da5c879a..d9440fdb4e 100644 --- a/plugins/modules/aerospike_migrations.py +++ b/plugins/modules/aerospike_migrations.py @@ -15,7 +15,8 @@ short_description: Check or wait for migrations between nodes description: - This can be used to check for migrations in a cluster. This makes it easy to do a rolling upgrade/update on Aerospike nodes. - - If waiting for migrations is not desired, simply just poll until port 3000 if available or C(asinfo -v status) returns ok. + - If waiting for migrations is not desired, simply just poll until port 3000 if available or C(asinfo -v status) returns + ok. author: "Albert Autin (@Alb0t)" extends_documentation_fragment: - community.general.attributes @@ -28,7 +29,6 @@ options: host: description: - Which host do we use as seed for info connection. - required: false type: str default: localhost port: @@ -69,7 +69,7 @@ options: type: bool min_cluster_size: description: - - Check will return bad until cluster size is met or until tries is exhausted. + - Check fails until cluster size is met or until tries is exhausted. required: false type: int default: 1 @@ -93,10 +93,10 @@ options: default: migrate_rx_partitions_remaining target_cluster_size: description: - - When all aerospike builds in the cluster are greater than version 4.3, then the C(cluster-stable) info command will - be used. Inside this command, you can optionally specify what the target cluster size is - but it is not necessary. + - When all aerospike builds in the cluster are greater than version 4.3, then the C(cluster-stable) info command is + used. Inside this command, you can optionally specify what the target cluster size is - but it is not necessary. You can still rely on O(min_cluster_size) if you do not want to use this option. - - If this option is specified on a cluster that has at least one host <4.3 then it will be ignored until the min version + - If this option is specified on a cluster that has at least one host <4.3 then it is ignored until the min version reaches 4.3. required: false type: int @@ -179,19 +179,19 @@ else: def run_module(): """run ansible module""" module_args = dict( - host=dict(type='str', required=False, default='localhost'), - port=dict(type='int', required=False, default=3000), - connect_timeout=dict(type='int', required=False, default=1000), - consecutive_good_checks=dict(type='int', required=False, default=3), - sleep_between_checks=dict(type='int', required=False, default=60), - tries_limit=dict(type='int', required=False, default=300), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=3000), + connect_timeout=dict(type='int', default=1000), + consecutive_good_checks=dict(type='int', default=3), + sleep_between_checks=dict(type='int', default=60), + tries_limit=dict(type='int', default=300), local_only=dict(type='bool', required=True), - min_cluster_size=dict(type='int', required=False, default=1), - target_cluster_size=dict(type='int', required=False, default=None), - fail_on_cluster_change=dict(type='bool', required=False, default=True), - migrate_tx_key=dict(type='str', required=False, no_log=False, + min_cluster_size=dict(type='int', default=1), + target_cluster_size=dict(type='int'), + fail_on_cluster_change=dict(type='bool', default=True), + migrate_tx_key=dict(type='str', no_log=False, default="migrate_tx_partitions_remaining"), - migrate_rx_key=dict(type='str', required=False, no_log=False, + migrate_rx_key=dict(type='str', no_log=False, default="migrate_rx_partitions_remaining") ) diff --git a/plugins/modules/airbrake_deployment.py b/plugins/modules/airbrake_deployment.py index 3a93a16ee0..0fe04f21d6 100644 --- a/plugins/modules/airbrake_deployment.py +++ b/plugins/modules/airbrake_deployment.py @@ -71,7 +71,7 @@ options: type: str validate_certs: description: - - If V(false), SSL certificates for the target url will not be validated. This should only be used on personally controlled + - If V(false), SSL certificates for the target URL is not validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: true @@ -114,11 +114,11 @@ def main(): project_id=dict(required=True, no_log=True, type='str'), project_key=dict(required=True, no_log=True, type='str'), environment=dict(required=True, type='str'), - user=dict(required=False, type='str'), - repo=dict(required=False, type='str'), - revision=dict(required=False, type='str'), - version=dict(required=False, type='str'), - url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'), + user=dict(type='str'), + repo=dict(type='str'), + revision=dict(type='str'), + version=dict(type='str'), + url=dict(default='https://api.airbrake.io/api/v4/projects/', type='str'), validate_certs=dict(default=True, type='bool'), ), supports_check_mode=True, diff --git a/plugins/modules/aix_filesystem.py b/plugins/modules/aix_filesystem.py index 8934d583ff..92a734e8ac 100644 --- a/plugins/modules/aix_filesystem.py +++ b/plugins/modules/aix_filesystem.py @@ -47,7 +47,7 @@ options: description: - Logical volume (LV) device name or remote export device to create a NFS file system. - It is used to create a file system on an already existing logical volume or the exported NFS file system. - - If not mentioned a new logical volume name will be created following AIX standards (LVM). + - If not mentioned a new logical volume name is created following AIX standards (LVM). type: str fs_type: description: @@ -81,14 +81,14 @@ options: size: description: - Specifies the file system size. - - For already V(present) it will be resized. - - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified it will be in Megabytes. If the value has G - specified it will be in Gigabytes. - - If no M or G the value will be 512-byte blocks. - - If "+" is specified in begin of value, the value will be added. - - If "-" is specified in begin of value, the value will be removed. - - If "+" or "-" is not specified, the total value will be the specified. - - Size will respects the LVM AIX standards. + - For already present it resizes the filesystem. + - 512-byte blocks, megabytes or gigabytes. If the value has M specified it is in megabytes. If the value has G specified + it is in gigabytes. + - If no M or G the value is 512-byte blocks. + - If V(+) is specified in begin of value, the value is added. + - If V(-) is specified in begin of value, the value is removed. + - If neither V(+) nor V(-) is specified, then the total value is the specified. + - Size respects the LVM AIX standards. type: str state: description: @@ -165,16 +165,6 @@ EXAMPLES = r""" state: absent """ -RETURN = r""" -changed: - description: Return changed for aix_filesystems actions as true or false. - returned: always - type: bool -msg: - description: Return message regarding the action. - returned: always - type: str -""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils._mount import ismount diff --git a/plugins/modules/aix_inittab.py b/plugins/modules/aix_inittab.py index 0c32f91e7f..ece4e95547 100644 --- a/plugins/modules/aix_inittab.py +++ b/plugins/modules/aix_inittab.py @@ -112,16 +112,6 @@ name: returned: always type: str sample: startmyservice -msg: - description: Action done with the C(inittab) entry. - returned: changed - type: str - sample: changed inittab entry startmyservice -changed: - description: Whether the C(inittab) changed or not. - returned: always - type: bool - sample: true """ # Import necessary libraries diff --git a/plugins/modules/aix_lvg.py b/plugins/modules/aix_lvg.py index 29c0b7d3f9..c41e21124e 100644 --- a/plugins/modules/aix_lvg.py +++ b/plugins/modules/aix_lvg.py @@ -36,7 +36,7 @@ options: description: - List of comma-separated devices to use as physical devices in this volume group. - Required when creating or extending (V(present) state) the volume group. - - If not informed reducing (V(absent) state) the volume group will be removed. + - If not informed reducing (V(absent) state) the volume group is removed. type: list elements: str state: @@ -57,7 +57,7 @@ options: choices: [big, normal, scalable] default: normal notes: - - AIX will permit remove VG only if all LV/Filesystems are not busy. + - AIX allows removing VG only if all LV/Filesystems are not busy. - Module does not modify PP size for already present volume group. """ diff --git a/plugins/modules/ali_instance.py b/plugins/modules/ali_instance.py index 608e1060fc..050794d55c 100644 --- a/plugins/modules/ali_instance.py +++ b/plugins/modules/ali_instance.py @@ -45,8 +45,7 @@ options: type: str availability_zone: description: - - Aliyun availability zone ID in which to launch the instance. If it is not specified, it will be allocated by system - automatically. + - Aliyun availability zone ID in which to launch the instance. If it is not specified, it is allocated by system automatically. aliases: ['alicloud_zone', 'zone_id'] type: str image_id: @@ -109,7 +108,7 @@ options: version_added: '0.2.0' password: description: - - The password to login instance. After rebooting instances, modified password will take effect. + - The password to login instance. After rebooting instances, modified password is effective. type: str system_disk_category: description: @@ -140,7 +139,7 @@ options: description: - O(count) determines how many instances based on a specific tag criteria should be present. This can be expressed in multiple ways and is shown in the EXAMPLES section. The specified count_tag must already exist or be passed in as - the O(tags) option. If it is not specified, it will be replaced by O(instance_name). + the O(tags) option. If it is not specified, it is replaced by O(instance_name). type: str allocate_public_ip: description: @@ -172,8 +171,7 @@ options: type: int instance_ids: description: - - A list of instance ids. It is required when need to operate existing instances. If it is specified, O(count) will - lose efficacy. + - A list of instance IDs. It is required when need to operate existing instances. If it is specified, O(count) is ignored. type: list elements: str force: @@ -203,7 +201,7 @@ options: user_data: description: - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. It - only will take effect when launching the new ECS instances. + only takes effect when launching the new ECS instances. required: false type: str ram_role_name: @@ -226,7 +224,7 @@ options: version_added: '0.2.0' period_unit: description: - - The duration unit that you will buy the resource. It is valid when O(instance_charge_type=PrePaid). + - The duration unit that you are buying the resource. It is valid when O(instance_charge_type=PrePaid). choices: ['Month', 'Week'] default: 'Month' type: str @@ -424,7 +422,7 @@ instances: type: str sample: 42.10.2.2 expired_time: - description: The time the instance will expire. + description: The time the instance expires. returned: always type: str sample: "2099-12-31T15:59Z" @@ -554,7 +552,7 @@ instances: type: str sample: 43.0.0.1 resource_group_id: - description: The id of the resource group to which the instance belongs. + description: The ID of the resource group to which the instance belongs. returned: always type: str sample: my-ecs-group @@ -615,7 +613,7 @@ ids: description: List of ECS instance IDs. returned: always type: list - sample: [i-12345er, i-3245fs] + sample: ["i-12345er", "i-3245fs"] """ import re diff --git a/plugins/modules/ali_instance_info.py b/plugins/modules/ali_instance_info.py index b86ce2031d..7be5b8cda6 100644 --- a/plugins/modules/ali_instance_info.py +++ b/plugins/modules/ali_instance_info.py @@ -167,7 +167,7 @@ instances: type: str sample: 42.10.2.2 expired_time: - description: The time the instance will expire. + description: The time the instance expires. returned: always type: str sample: "2099-12-31T15:59Z" @@ -297,7 +297,7 @@ instances: type: str sample: 43.0.0.1 resource_group_id: - description: The id of the resource group to which the instance belongs. + description: The ID of the resource group to which the instance belongs. returned: always type: str sample: my-ecs-group @@ -341,7 +341,7 @@ ids: description: List of ECS instance IDs. returned: always type: list - sample: [i-12345er, i-3245fs] + sample: ["i-12345er", "i-3245fs"] """ from ansible.module_utils.basic import AnsibleModule, missing_required_lib diff --git a/plugins/modules/android_sdk.py b/plugins/modules/android_sdk.py index c36c22eb0c..a9bffa50ea 100644 --- a/plugins/modules/android_sdk.py +++ b/plugins/modules/android_sdk.py @@ -28,8 +28,8 @@ version_added: 10.2.0 options: accept_licenses: description: - - If this is set to V(true), the module will try to accept license prompts generated by C(sdkmanager) during package - installation. Otherwise, every license prompt will be rejected. + - If this is set to V(true), the module attempts to accept license prompts generated by C(sdkmanager) during package + installation. Otherwise, every license prompt is rejected. type: bool default: false name: @@ -64,16 +64,16 @@ requirements: notes: - For some of the packages installed by C(sdkmanager) is it necessary to accept licenses. Usually it is done through command line prompt in a form of a Y/N question when a licensed package is requested to be installed. If there are several packages - requested for installation and at least two of them belong to different licenses, the C(sdkmanager) tool will prompt for - these licenses in a loop. In order to install packages, the module must be able to answer these license prompts. Currently, + requested for installation and at least two of them belong to different licenses, the C(sdkmanager) tool prompts for these + licenses in a loop. In order to install packages, the module must be able to answer these license prompts. Currently, it is only possible to answer one license prompt at a time, meaning that instead of installing multiple packages as a - single invocation of the C(sdkmanager --install) command, it will be done by executing the command independently for each - package. This makes sure that at most only one license prompt will need to be answered. At the time of writing this module, - a C(sdkmanager)'s package may belong to at most one license type that needs to be accepted. However, if this changes - in the future, the module may hang as there might be more prompts generated by the C(sdkmanager) tool which the module - will not be able to answer. If this becomes the case, file an issue and in the meantime, consider accepting all the licenses - in advance, as it is described in the C(sdkmanager) L(documentation,https://developer.android.com/tools/sdkmanager#accept-licenses), - for instance, using the M(ansible.builtin.command) module. + single invocation of the C(sdkmanager --install) command, it is done by executing the command independently for each package. + This makes sure that at most only one license prompt needs to be answered. At the time of writing this module, a C(sdkmanager)'s + package may belong to at most one license type that needs to be accepted. However, if this changes in the future, the + module may hang as there might be more prompts generated by the C(sdkmanager) tool which the module is unable to answer. + If this becomes the case, file an issue and in the meantime, consider accepting all the licenses in advance, as it is + described in the C(sdkmanager) L(documentation,https://developer.android.com/tools/sdkmanager#accept-licenses), for instance, + using the M(ansible.builtin.command) module. seealso: - name: sdkmanager tool documentation description: Detailed information of how to install and use sdkmanager command line tool. @@ -126,13 +126,13 @@ installed: description: A list of packages that have been installed. returned: when packages have changed type: list - sample: ['build-tools;34.0.0', 'platform-tools'] + sample: ["build-tools;34.0.0", "platform-tools"] removed: description: A list of packages that have been removed. returned: when packages have changed type: list - sample: ['build-tools;34.0.0', 'platform-tools'] + sample: ["build-tools;34.0.0", "platform-tools"] """ from ansible_collections.community.general.plugins.module_utils.mh.module_helper import StateModuleHelper @@ -150,7 +150,6 @@ class AndroidSdk(StateModuleHelper): ), supports_check_mode=True ) - use_old_vardict = False def __init_module__(self): self.sdkmanager = AndroidSdkManager(self.module) diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py index ad055dfa14..4712ca9a3c 100644 --- a/plugins/modules/ansible_galaxy_install.py +++ b/plugins/modules/ansible_galaxy_install.py @@ -18,8 +18,8 @@ description: - This module allows the installation of Ansible collections or roles using C(ansible-galaxy). notes: - Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0. - - The module will try and run using the C(C.UTF-8) locale. If that fails, it will try C(en_US.UTF-8). If that one also fails, - the module will fail. + - The module tries to run using the C(C.UTF-8) locale. If that fails, it tries C(en_US.UTF-8). If that one also fails, the + module fails. seealso: - name: C(ansible-galaxy) command manual page description: Manual page for the command. @@ -37,10 +37,10 @@ attributes: options: state: description: - - If O(state=present) then the collection or role will be installed. Note that the collections and roles are not updated + - If O(state=present) then the collection or role is installed. Note that the collections and roles are not updated with this option. - - Currently the O(state=latest) is ignored unless O(type=collection), and it will ensure the collection is installed - and updated to the latest available version. + - Currently the O(state=latest) is ignored unless O(type=collection), and it ensures the collection is installed and + updated to the latest available version. - Please note that O(force=true) can be used to perform upgrade regardless of O(type). type: str choices: [present, latest] @@ -71,7 +71,7 @@ options: dest: description: - The path to the directory containing your collections or roles, according to the value of O(type). - - Please notice that C(ansible-galaxy) will not install collections with O(type=both), when O(requirements_file) contains + - Please notice that C(ansible-galaxy) does not install collections with O(type=both), when O(requirements_file) contains both roles and collections and O(dest) is specified. type: path no_deps: @@ -83,7 +83,7 @@ options: force: description: - Force overwriting existing roles and/or collections. - - It can be used for upgrading, but the module output will always report C(changed=true). + - It can be used for upgrading, but the module output always reports C(changed=true). - Using O(force=true) is mandatory when downgrading. type: bool default: false @@ -220,7 +220,6 @@ class AnsibleGalaxyInstall(ModuleHelper): required_if=[('type', 'both', ['requirements_file'])], supports_check_mode=False, ) - use_old_vardict = False command = 'ansible-galaxy' command_args_formats = dict( diff --git a/plugins/modules/apache2_mod_proxy.py b/plugins/modules/apache2_mod_proxy.py index f70294bad1..3816845257 100644 --- a/plugins/modules/apache2_mod_proxy.py +++ b/plugins/modules/apache2_mod_proxy.py @@ -19,7 +19,7 @@ description: extends_documentation_fragment: - community.general.attributes requirements: - - Python package C(BeautifulSoup). + - Python package C(BeautifulSoup) on Python 2, C(beautifulsoup4) on Python 3. attributes: check_mode: support: full @@ -40,14 +40,18 @@ options: type: str description: - (IPv4|IPv6|FQDN) of the balancer member to get or to set attributes to. Port number is autodetected and should not - be specified here. If undefined, apache2_mod_proxy module will return a members list of dictionaries of all the current - balancer pool members' attributes. + be specified here. + - If undefined, the M(community.general.apache2_mod_proxy) module returns a members list of dictionaries of all the + current balancer pool members' attributes. state: - type: str + type: list + elements: str + choices: [present, absent, enabled, disabled, drained, hot_standby, ignore_errors] description: - - Desired state of the member host. (absent|disabled),drained,hot_standby,ignore_errors can be simultaneously invoked - by separating them with a comma (for example V(state=drained,ignore_errors)). - - 'Accepted state values: [V(present), V(absent), V(enabled), V(disabled), V(drained), V(hot_standby), V(ignore_errors)].' + - Desired state of the member host. + - States can be simultaneously invoked by separating them with a comma (for example V(state=drained,ignore_errors)), + but it is recommended to specify them as a proper YAML list. + - States V(present) and V(absent) must be used without any other state. tls: description: - Use https to access balancer management page. @@ -109,22 +113,23 @@ EXAMPLES = r""" RETURN = r""" member: - description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with C(member_host) parameter. + description: Specific balancer member information dictionary, returned when the module is invoked with O(member_host) parameter. type: dict returned: success sample: - {"attributes": - {"Busy": "0", - "Elected": "42", - "Factor": "1", - "From": "136K", - "Load": "0", - "Route": null, - "RouteRedir": null, - "Set": "0", - "Status": "Init Ok ", - "To": " 47K", - "Worker URL": null + { + "attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null }, "balancer_url": "http://10.10.0.2/balancer-manager/", "host": "10.10.0.20", @@ -133,18 +138,21 @@ member: "port": 8080, "protocol": "http", "status": { - "disabled": false, - "drained": false, - "hot_standby": false, - "ignore_errors": false + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false } } members: - description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no C(member_host) and state args. + description: List of member (defined above) dictionaries, returned when the module is invoked with no O(member_host) and + O(state) args. returned: success type: list sample: - [{"attributes": { + [ + { + "attributes": { "Busy": "0", "Elected": "42", "Factor": "1", @@ -156,21 +164,22 @@ members: "Status": "Init Ok ", "To": " 47K", "Worker URL": null - }, - "balancer_url": "http://10.10.0.2/balancer-manager/", - "host": "10.10.0.20", - "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", - "path": "/ws", - "port": 8080, - "protocol": "http", - "status": { + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { "disabled": false, "drained": false, "hot_standby": false, "ignore_errors": false - } + } }, - {"attributes": { + { + "attributes": { "Busy": "0", "Elected": "42", "Factor": "1", @@ -182,50 +191,57 @@ members: "Status": "Init Ok ", "To": " 47K", "Worker URL": null - }, - "balancer_url": "http://10.10.0.2/balancer-manager/", - "host": "10.10.0.21", - "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", - "path": "/ws", - "port": 8080, - "protocol": "http", - "status": { + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.21", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { "disabled": false, "drained": false, "hot_standby": false, - "ignore_errors": false} + "ignore_errors": false + } } ] """ import re -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils import deps +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException + +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six import iteritems +from ansible.module_utils.six import raise_from, PY2 -BEAUTIFUL_SOUP_IMP_ERR = None -try: - from BeautifulSoup import BeautifulSoup -except ImportError: - BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc() - HAS_BEAUTIFULSOUP = False +if PY2: + with deps.declare("BeautifulSoup"): + from BeautifulSoup import BeautifulSoup else: - HAS_BEAUTIFULSOUP = True + with deps.declare("beautifulsoup4"): + from bs4 import BeautifulSoup # balancer member attributes extraction regexp: -EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)" +EXPRESSION = re.compile(to_text(r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)")) # Apache2 server version extraction regexp: -APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)" +APACHE_VERSION_EXPRESSION = re.compile(to_text(r"SERVER VERSION: APACHE/([\d.]+)")) + + +def find_all(where, what): + if PY2: + return where.findAll(what) + return where.find_all(what) def regexp_extraction(string, _regexp, groups=1): """ Returns the capture group (default=1) specified in the regexp, applied to the string """ - regexp_search = re.search(string=str(string), pattern=str(_regexp)) + regexp_search = _regexp.search(string) if regexp_search: if regexp_search.group(groups) != '': - return str(regexp_search.group(groups)) + return regexp_search.group(groups) return None @@ -246,33 +262,33 @@ class BalancerMember(object): """ def __init__(self, management_url, balancer_url, module): - self.host = regexp_extraction(management_url, str(EXPRESSION), 4) - self.management_url = str(management_url) + self.host = regexp_extraction(management_url, EXPRESSION, 4) + self.management_url = management_url self.protocol = regexp_extraction(management_url, EXPRESSION, 3) self.port = regexp_extraction(management_url, EXPRESSION, 5) self.path = regexp_extraction(management_url, EXPRESSION, 6) - self.balancer_url = str(balancer_url) + self.balancer_url = balancer_url self.module = module def get_member_attributes(self): """ Returns a dictionary of a balancer member's attributes.""" - balancer_member_page = fetch_url(self.module, self.management_url) + resp, info = fetch_url(self.module, self.management_url, headers={'Referer': self.management_url}) - if balancer_member_page[1]['status'] != 200: - self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1]) - else: - try: - soup = BeautifulSoup(balancer_member_page[0]) - except TypeError as exc: - self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(exc)) - else: - subsoup = soup.findAll('table')[1].findAll('tr') - keys = subsoup[0].findAll('th') - for valuesset in subsoup[1::1]: - if re.search(pattern=self.host, string=str(valuesset)): - values = valuesset.findAll('td') - return {keys[x].string: values[x].string for x in range(0, len(keys))} + if info['status'] != 200: + raise ModuleHelperException("Could not get balancer_member_page, check for connectivity! {0}".format(info)) + + try: + soup = BeautifulSoup(resp) + except TypeError as exc: + raise_from(ModuleHelperException("Cannot parse balancer_member_page HTML! {0}".format(exc)), exc) + + subsoup = find_all(find_all(soup, 'table')[1], 'tr') + keys = find_all(subsoup[0], 'th') + for valuesset in subsoup[1::1]: + if re.search(pattern=self.host, string=str(valuesset)): + values = find_all(valuesset, 'td') + return {keys[x].string: values[x].string for x in range(0, len(keys))} def get_member_status(self): """ Returns a dictionary of a balancer member's status attributes.""" @@ -280,8 +296,8 @@ class BalancerMember(object): 'drained': 'Drn', 'hot_standby': 'Stby', 'ignore_errors': 'Ign'} - actual_status = str(self.attributes['Status']) - status = {mode: patt in actual_status for mode, patt in iteritems(status_mapping)} + actual_status = self.attributes['Status'] + status = {mode: patt in actual_status for mode, patt in status_mapping.items()} return status def set_member_status(self, values): @@ -292,155 +308,125 @@ class BalancerMember(object): 'ignore_errors': '&w_status_I'} request_body = regexp_extraction(self.management_url, EXPRESSION, 1) - values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in iteritems(values_mapping)) + values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in values_mapping.items()) request_body = "{0}{1}".format(request_body, values_url) - response = fetch_url(self.module, self.management_url, data=request_body) - if response[1]['status'] != 200: - self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status']) + response, info = fetch_url(self.module, self.management_url, data=request_body, headers={'Referer': self.management_url}) + if info['status'] != 200: + raise ModuleHelperException("Could not set the member status! {0} {1}".format(self.host, info['status'])) attributes = property(get_member_attributes) status = property(get_member_status, set_member_status) + def as_dict(self): + return { + "host": self.host, + "status": self.status, + "protocol": self.protocol, + "port": self.port, + "path": self.path, + "attributes": self.attributes, + "management_url": self.management_url, + "balancer_url": self.balancer_url + } + class Balancer(object): """ Apache httpd 2.4 mod_proxy balancer object""" - def __init__(self, host, suffix, module, members=None, tls=False): - if tls: - self.base_url = 'https://' + str(host) - self.url = 'https://' + str(host) + str(suffix) - else: - self.base_url = 'http://' + str(host) - self.url = 'http://' + str(host) + str(suffix) + def __init__(self, module, host, suffix, tls=False): + proto = "https" if tls else "http" + self.base_url = '{0}://{1}'.format(proto, host) + self.url = '{0}://{1}{2}'.format(proto, host, suffix) self.module = module self.page = self.fetch_balancer_page() - if members is None: - self._members = [] def fetch_balancer_page(self): """ Returns the balancer management html page as a string for later parsing.""" - page = fetch_url(self.module, str(self.url)) - if page[1]['status'] != 200: - self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status'])) - else: - content = page[0].read() - apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1) - if apache_version: - if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version): - self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version)) - return content - else: - self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager") + resp, info = fetch_url(self.module, self.url) + if info['status'] != 200: + raise ModuleHelperException("Could not get balancer page! HTTP status response: {0}".format(info['status'])) + + content = to_text(resp.read()) + apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1) + if not apache_version: + raise ModuleHelperException("Could not get the Apache server version from the balancer-manager") + + if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version): + raise ModuleHelperException("This module only acts on an Apache2 2.4+ instance, current Apache2 version: {0}".format(apache_version)) + return content def get_balancer_members(self): """ Returns members of the balancer as a generator object for later iteration.""" try: soup = BeautifulSoup(self.page) - except TypeError: - self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page)) - else: - for element in soup.findAll('a')[1::1]: - balancer_member_suffix = str(element.get('href')) - if not balancer_member_suffix: - self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!") - else: - yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module) + except TypeError as e: + raise_from(ModuleHelperException("Cannot parse balancer page HTML! {0}".format(self.page)), e) + + elements = find_all(soup, 'a') + for element in elements[1::1]: + balancer_member_suffix = element.get('href') + if not balancer_member_suffix: + raise ModuleHelperException("Argument 'balancer_member_suffix' is empty!") + + yield BalancerMember(self.base_url + balancer_member_suffix, self.url, self.module) members = property(get_balancer_members) -def main(): +class ApacheModProxy(ModuleHelper): """ Initiates module.""" - module = AnsibleModule( + module = dict( argument_spec=dict( balancer_vhost=dict(required=True, type='str'), balancer_url_suffix=dict(default="/balancer-manager/", type='str'), member_host=dict(type='str'), - state=dict(type='str'), + state=dict(type='list', elements='str', choices=['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']), tls=dict(default=False, type='bool'), validate_certs=dict(default=True, type='bool') ), supports_check_mode=True ) - if HAS_BEAUTIFULSOUP is False: - module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR) + def __init_module__(self): + deps.validate(self.module) - if module.params['state'] is not None: - states = module.params['state'].split(',') - if (len(states) > 1) and (("present" in states) or ("enabled" in states)): - module.fail_json(msg="state present/enabled is mutually exclusive with other states!") + if len(self.vars.state or []) > 1 and ("present" in self.vars.state or "enabled" in self.vars.state): + self.do_raise(msg="states present/enabled are mutually exclusive with other states!") + + self.mybalancer = Balancer(self.module, self.vars.balancer_vhost, self.vars.balancer_url_suffix, tls=self.vars.tls) + + def __run__(self): + if self.vars.member_host is None: + self.vars.members = [member.as_dict() for member in self.mybalancer.members] else: - for _state in states: - if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']: - module.fail_json( - msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'." - ) - else: - states = ['None'] + member_exists = False + member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False} + for mode in member_status: + for state in self.vars.state or []: + if mode == state: + member_status[mode] = True + elif mode == 'disabled' and state == 'absent': + member_status[mode] = True - mybalancer = Balancer(module.params['balancer_vhost'], - module.params['balancer_url_suffix'], - module=module, - tls=module.params['tls']) + for member in self.mybalancer.members: + if str(member.host) == self.vars.member_host: + member_exists = True + if self.vars.state is not None: + member_status_before = member.status + if not self.check_mode: + member_status_after = member.status = member_status + else: + member_status_after = member_status + self.changed |= (member_status_before != member_status_after) + self.vars.member = member.as_dict() - if module.params['member_host'] is None: - json_output_list = [] - for member in mybalancer.members: - json_output_list.append({ - "host": member.host, - "status": member.status, - "protocol": member.protocol, - "port": member.port, - "path": member.path, - "attributes": member.attributes, - "management_url": member.management_url, - "balancer_url": member.balancer_url - }) - module.exit_json( - changed=False, - members=json_output_list - ) - else: - changed = False - member_exists = False - member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False} - for mode in member_status.keys(): - for state in states: - if mode == state: - member_status[mode] = True - elif mode == 'disabled' and state == 'absent': - member_status[mode] = True + if not member_exists: + self.do_raise(msg='{0} is not a member of the balancer {1}!'.format(self.vars.member_host, self.vars.balancer_vhost)) - for member in mybalancer.members: - if str(member.host) == str(module.params['member_host']): - member_exists = True - if module.params['state'] is not None: - member_status_before = member.status - if not module.check_mode: - member_status_after = member.status = member_status - else: - member_status_after = member_status - if member_status_before != member_status_after: - changed = True - json_output = { - "host": member.host, - "status": member.status, - "protocol": member.protocol, - "port": member.port, - "path": member.path, - "attributes": member.attributes, - "management_url": member.management_url, - "balancer_url": member.balancer_url - } - if member_exists: - module.exit_json( - changed=changed, - member=json_output - ) - else: - module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!') + +def main(): + ApacheModProxy.execute() if __name__ == '__main__': diff --git a/plugins/modules/apache2_module.py b/plugins/modules/apache2_module.py index cacb870ee0..99db968670 100644 --- a/plugins/modules/apache2_module.py +++ b/plugins/modules/apache2_module.py @@ -113,22 +113,6 @@ result: description: Message about action taken. returned: always type: str -warnings: - description: List of warning messages. - returned: when needed - type: list -rc: - description: Return code of underlying command. - returned: failed - type: int -stdout: - description: The stdout of underlying command. - returned: failed - type: str -stderr: - description: The stderr of underlying command. - returned: failed - type: str """ import re @@ -164,12 +148,12 @@ def _module_is_enabled(module): if module.params['ignore_configcheck']: if 'AH00534' in stderr and 'mpm_' in module.params['name']: if module.params['warn_mpm_absent']: - module.warnings.append( + module.warn( "No MPM module loaded! apache2 reload AND other module actions" " will fail if no MPM module is loaded immediately." ) else: - module.warnings.append(error_msg) + module.warn(error_msg) return False else: module.fail_json(msg=error_msg) @@ -194,6 +178,7 @@ def create_apache_identifier(name): # re expressions to extract subparts of names re_workarounds = [ + ('php8', re.compile(r'^(php)[\d\.]+')), ('php', re.compile(r'^(php\d)\.')), ] @@ -223,9 +208,7 @@ def _set_state(module, state): if _module_is_enabled(module) != want_enabled: if module.check_mode: - module.exit_json(changed=True, - result=success_msg, - warnings=module.warnings) + module.exit_json(changed=True, result=success_msg) a2mod_binary_path = module.get_bin_path(a2mod_binary) if a2mod_binary_path is None: @@ -240,9 +223,7 @@ def _set_state(module, state): result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name]) if _module_is_enabled(module) == want_enabled: - module.exit_json(changed=True, - result=success_msg, - warnings=module.warnings) + module.exit_json(changed=True, result=success_msg) else: msg = ( 'Failed to set module {name} to {state}:\n' @@ -260,9 +241,7 @@ def _set_state(module, state): stdout=stdout, stderr=stderr) else: - module.exit_json(changed=False, - result=success_msg, - warnings=module.warnings) + module.exit_json(changed=False, result=success_msg) def main(): @@ -278,8 +257,6 @@ def main(): supports_check_mode=True, ) - module.warnings = [] - name = module.params['name'] if name == 'cgi' and _run_threaded(module): module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.") diff --git a/plugins/modules/apk.py b/plugins/modules/apk.py index 7f1f83ce56..7ad5d5908e 100644 --- a/plugins/modules/apk.py +++ b/plugins/modules/apk.py @@ -47,8 +47,8 @@ options: version_added: 1.0.0 repository: description: - - A package repository or multiple repositories. Unlike with the underlying apk command, this list will override the - system repositories rather than supplement them. + - A package repository or multiple repositories. Unlike with the underlying apk command, this list overrides the system + repositories rather than supplement them. type: list elements: str state: @@ -79,7 +79,7 @@ options: version_added: 5.4.0 notes: - O(name) and O(upgrade) are mutually exclusive. - - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly to the O(name) option. """ @@ -164,7 +164,7 @@ packages: description: A list of packages that have been changed. returned: when packages have changed type: list - sample: ['package', 'other-package'] + sample: ["package", "other-package"] """ import re @@ -351,6 +351,9 @@ def main(): p = module.params + if all(not name.strip() for name in p['name']): + module.fail_json(msg="Package name(s) cannot be empty or whitespace-only") + if p['no_cache']: APK_PATH = "%s --no-cache" % (APK_PATH, ) diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py index 5a5ba57faf..1dcca5815c 100644 --- a/plugins/modules/apt_rpm.py +++ b/plugins/modules/apt_rpm.py @@ -35,9 +35,9 @@ options: state: description: - Indicates the desired package state. - - Please note that V(present) and V(installed) are equivalent to V(latest) right now. This will change in the future. - To simply ensure that a package is installed, without upgrading it, use the V(present_not_latest) state. - The states V(latest) and V(present_not_latest) have been added in community.general 8.6.0. + - Please note before community.general 11.0.0, V(present) and V(installed) were equivalent to V(latest). This changed + in community.general 11.0.0. Now they are equivalent to V(present_not_latest). choices: - absent - present @@ -307,17 +307,6 @@ def main(): module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") p = module.params - if p['state'] in ['installed', 'present']: - module.deprecate( - 'state=%s currently behaves unexpectedly by always upgrading to the latest version if' - ' the package is already installed. This behavior is deprecated and will change in' - ' community.general 11.0.0. You can use state=latest to explicitly request this behavior' - ' or state=present_not_latest to explicitly request the behavior that state=%s will have' - ' in community.general 11.0.0, namely that the package will not be upgraded if it is' - ' already installed.' % (p['state'], p['state']), - version='11.0.0', - collection_name='community.general', - ) modified = False output = "" @@ -341,7 +330,7 @@ def main(): packages = p['package'] if p['state'] in ['installed', 'present', 'present_not_latest', 'latest']: - (m, out) = install_packages(module, packages, allow_upgrade=p['state'] != 'present_not_latest') + (m, out) = install_packages(module, packages, allow_upgrade=p['state'] == 'latest') modified = modified or m output += out diff --git a/plugins/modules/archive.py b/plugins/modules/archive.py index 4e4b6368ce..65b397c255 100644 --- a/plugins/modules/archive.py +++ b/plugins/modules/archive.py @@ -43,7 +43,7 @@ options: - The file name of the destination archive. The parent directory must exists on the remote host. - This is required when O(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. - - If the destination archive already exists, it will be truncated and overwritten. + - If the destination archive already exists, it is truncated and overwritten. type: path exclude_path: description: diff --git a/plugins/modules/awall.py b/plugins/modules/awall.py index b95f36ea8d..0bc4ca1d79 100644 --- a/plugins/modules/awall.py +++ b/plugins/modules/awall.py @@ -40,7 +40,7 @@ options: description: - Activate the new firewall rules. - Can be run with other steps or on its own. - - Idempotency is affected if O(activate=true), as the module will always report a changed state. + - Idempotency is affected if O(activate=true), as the module always reports a changed state. type: bool default: false notes: diff --git a/plugins/modules/beadm.py b/plugins/modules/beadm.py index 3d9d8ca651..0c200661f1 100644 --- a/plugins/modules/beadm.py +++ b/plugins/modules/beadm.py @@ -32,7 +32,7 @@ options: aliases: ["be"] snapshot: description: - - If specified, the new boot environment will be cloned from the given snapshot or inactive boot environment. + - If specified, the new boot environment is cloned from the given snapshot or inactive boot environment. type: str description: description: diff --git a/plugins/modules/bearychat.py b/plugins/modules/bearychat.py index 1dec1bce68..e738d83d36 100644 --- a/plugins/modules/bearychat.py +++ b/plugins/modules/bearychat.py @@ -33,7 +33,7 @@ options: - Message to send. markdown: description: - - If V(true), text will be parsed as markdown. + - If V(true), text is parsed as markdown. default: true type: bool channel: diff --git a/plugins/modules/bigpanda.py b/plugins/modules/bigpanda.py index 071440b839..81e2085b7d 100644 --- a/plugins/modules/bigpanda.py +++ b/plugins/modules/bigpanda.py @@ -75,7 +75,7 @@ options: default: "https://api.bigpanda.io" validate_certs: description: - - If V(false), SSL certificates for the target url will not be validated. This should only be used on personally controlled + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: true @@ -150,14 +150,14 @@ def main(): version=dict(required=True), token=dict(required=True, no_log=True), state=dict(required=True, choices=['started', 'finished', 'failed']), - hosts=dict(required=False, aliases=['host']), - env=dict(required=False), - owner=dict(required=False), - description=dict(required=False), - deployment_message=dict(required=False), - source_system=dict(required=False, default='ansible'), + hosts=dict(aliases=['host']), + env=dict(), + owner=dict(), + description=dict(), + deployment_message=dict(), + source_system=dict(default='ansible'), validate_certs=dict(default=True, type='bool'), - url=dict(required=False, default='https://api.bigpanda.io'), + url=dict(default='https://api.bigpanda.io'), ), supports_check_mode=True, ) diff --git a/plugins/modules/bitbucket_pipeline_known_host.py b/plugins/modules/bitbucket_pipeline_known_host.py index f5594dc8ac..eb8b22b4f0 100644 --- a/plugins/modules/bitbucket_pipeline_known_host.py +++ b/plugins/modules/bitbucket_pipeline_known_host.py @@ -13,8 +13,7 @@ module: bitbucket_pipeline_known_host short_description: Manages Bitbucket pipeline known hosts description: - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu. - - The host fingerprint will be retrieved automatically, but in case of an error, one can use O(key) field to specify it - manually. + - The host fingerprint is retrieved automatically, but in case of an error, one can use O(key) field to specify it manually. author: - Evgeniy Krysanov (@catcombo) extends_documentation_fragment: diff --git a/plugins/modules/bootc_manage.py b/plugins/modules/bootc_manage.py index 44444960df..da92c02b06 100644 --- a/plugins/modules/bootc_manage.py +++ b/plugins/modules/bootc_manage.py @@ -20,7 +20,7 @@ options: state: description: - Control whether to apply the latest image or switch the image. - - B(Note:) This will not reboot the system. + - B(Note:) This does not reboot the system. - Please use M(ansible.builtin.reboot) to reboot the system. required: true type: str @@ -57,7 +57,7 @@ from ansible.module_utils.common.locale import get_best_parsable_locale def main(): argument_spec = dict( state=dict(type='str', required=True, choices=['switch', 'latest']), - image=dict(type='str', required=False), + image=dict(type='str'), ) module = AnsibleModule( argument_spec=argument_spec, diff --git a/plugins/modules/bower.py b/plugins/modules/bower.py index 3e7ebdaecc..547152fa98 100644 --- a/plugins/modules/bower.py +++ b/plugins/modules/bower.py @@ -187,13 +187,13 @@ class Bower(object): def main(): arg_spec = dict( - name=dict(default=None), + name=dict(), offline=dict(default=False, type='bool'), production=dict(default=False, type='bool'), path=dict(required=True, type='path'), - relative_execpath=dict(default=None, required=False, type='path'), + relative_execpath=dict(type='path'), state=dict(default='present', choices=['present', 'absent', 'latest', ]), - version=dict(default=None), + version=dict(), ) module = AnsibleModule( argument_spec=arg_spec diff --git a/plugins/modules/btrfs_info.py b/plugins/modules/btrfs_info.py index c3f4204684..9467fb782d 100644 --- a/plugins/modules/btrfs_info.py +++ b/plugins/modules/btrfs_info.py @@ -49,7 +49,7 @@ filesystems: - /dev/sda1 - /dev/sdb1 default_subvolume: - description: The id of the filesystem's default subvolume. + description: The ID of the filesystem's default subvolume. type: int sample: 5 subvolumes: @@ -64,7 +64,7 @@ filesystems: mountpoints: description: Paths where the subvolume is mounted on the targeted host. type: list - sample: ['/home'] + sample: ["/home"] parent: description: The identifier of this subvolume's parent. type: int diff --git a/plugins/modules/btrfs_subvolume.py b/plugins/modules/btrfs_subvolume.py index b1593a8ecd..3c34ef4680 100644 --- a/plugins/modules/btrfs_subvolume.py +++ b/plugins/modules/btrfs_subvolume.py @@ -64,9 +64,9 @@ options: no change is required. Warning, this option does not yet verify that the target subvolume was generated from a snapshot of the requested source. - V(clobber) - If a subvolume already exists at the requested location, delete it first. This option is not idempotent - and will result in a new snapshot being generated on every execution. + and results in a new snapshot being generated on every execution. - V(error) - If a subvolume already exists at the requested location, return an error. This option is not idempotent - and will result in an error on replay of the module. + and results in an error on replay of the module. type: str choices: [skip, clobber, error] default: skip @@ -80,8 +80,8 @@ options: notes: - If any or all of the options O(filesystem_device), O(filesystem_label) or O(filesystem_uuid) parameters are provided, there is expected to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or - only a single btrfs filesystem is mounted, that filesystem will be used; otherwise, the module will take no action and - return an error. + only a single btrfs filesystem is mounted, that filesystem is used; otherwise, the module takes no action and returns an + error. extends_documentation_fragment: - community.general.attributes @@ -120,7 +120,7 @@ EXAMPLES = r""" community.general.btrfs_subvolume: name: /@ snapshot_source: / - default: Yes + default: true filesystem_device: /dev/vda2 - name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required @@ -180,7 +180,7 @@ filesystem: mountpoints: description: Paths where the subvolume is mounted on the targeted host. type: list - sample: ['/home'] + sample: ["/home"] parent: description: The identifier of this subvolume's parent. type: int @@ -644,16 +644,16 @@ class BtrfsSubvolumeModule(object): def run_module(): module_args = dict( - automount=dict(type='bool', required=False, default=False), - default=dict(type='bool', required=False, default=False), - filesystem_device=dict(type='path', required=False), - filesystem_label=dict(type='str', required=False), - filesystem_uuid=dict(type='str', required=False), + automount=dict(type='bool', default=False), + default=dict(type='bool', default=False), + filesystem_device=dict(type='path'), + filesystem_label=dict(type='str'), + filesystem_uuid=dict(type='str'), name=dict(type='str', required=True), recursive=dict(type='bool', default=False), - state=dict(type='str', required=False, default='present', choices=['present', 'absent']), - snapshot_source=dict(type='str', required=False), - snapshot_conflict=dict(type='str', required=False, default='skip', choices=['skip', 'clobber', 'error']) + state=dict(type='str', default='present', choices=['present', 'absent']), + snapshot_source=dict(type='str'), + snapshot_conflict=dict(type='str', default='skip', choices=['skip', 'clobber', 'error']) ) module = AnsibleModule( diff --git a/plugins/modules/bundler.py b/plugins/modules/bundler.py index bfd7fe7ec1..6bf2556110 100644 --- a/plugins/modules/bundler.py +++ b/plugins/modules/bundler.py @@ -36,13 +36,13 @@ options: type: path description: - The directory to execute the bundler commands from. This directory needs to contain a valid Gemfile or .bundle/ directory. - - If not specified, it will default to the temporary working directory. + - If not specified, it defaults to the temporary working directory. exclude_groups: type: list elements: str description: - A list of Gemfile groups to exclude during operations. This only applies when O(state=present). Bundler considers - this a 'remembered' property for the Gemfile and will automatically exclude groups in future operations even if O(exclude_groups) + this a 'remembered' property for the Gemfile and automatically excludes groups in future operations even if O(exclude_groups) is not set. clean: description: @@ -53,7 +53,7 @@ options: type: path description: - Only applies if O(state=present). The path to the gemfile to use to install gems. - - If not specified it will default to the Gemfile in current directory. + - If not specified it defaults to the Gemfile in current directory. local: description: - If set only installs gems from the cache on the target host. @@ -61,8 +61,8 @@ options: default: false deployment_mode: description: - - Only applies if O(state=present). If set it will install gems in C(./vendor/bundle) instead of the default location. - Requires a C(Gemfile.lock) file to have been created prior. + - Only applies if O(state=present). If set it installs gems in C(./vendor/bundle) instead of the default location. Requires + a C(Gemfile.lock) file to have been created prior. type: bool default: false user_install: @@ -75,12 +75,12 @@ options: description: - Only applies if O(state=present). Specifies the directory to install the gems into. If O(chdir) is set then this path is relative to O(chdir). - - If not specified the default RubyGems gem paths will be used. + - If not specified the default RubyGems gem paths are used. binstub_directory: type: path description: - Only applies if O(state=present). Specifies the directory to install any gem bins files to. When executed the bin - files will run within the context of the Gemfile and fail if any required gem dependencies are not installed. If O(chdir) + files run within the context of the Gemfile and fail if any required gem dependencies are not installed. If O(chdir) is set then this path is relative to O(chdir). extra_args: type: str @@ -131,18 +131,18 @@ def get_bundler_executable(module): def main(): module = AnsibleModule( argument_spec=dict( - executable=dict(default=None, required=False), - state=dict(default='present', required=False, choices=['present', 'latest']), - chdir=dict(default=None, required=False, type='path'), - exclude_groups=dict(default=None, required=False, type='list', elements='str'), - clean=dict(default=False, required=False, type='bool'), - gemfile=dict(default=None, required=False, type='path'), - local=dict(default=False, required=False, type='bool'), - deployment_mode=dict(default=False, required=False, type='bool'), - user_install=dict(default=True, required=False, type='bool'), - gem_path=dict(default=None, required=False, type='path'), - binstub_directory=dict(default=None, required=False, type='path'), - extra_args=dict(default=None, required=False), + executable=dict(), + state=dict(default='present', choices=['present', 'latest']), + chdir=dict(type='path'), + exclude_groups=dict(type='list', elements='str'), + clean=dict(default=False, type='bool'), + gemfile=dict(type='path'), + local=dict(default=False, type='bool'), + deployment_mode=dict(default=False, type='bool'), + user_install=dict(default=True, type='bool'), + gem_path=dict(type='path'), + binstub_directory=dict(type='path'), + extra_args=dict(), ), supports_check_mode=True ) diff --git a/plugins/modules/bzr.py b/plugins/modules/bzr.py index 7a4512a5dd..76ae917802 100644 --- a/plugins/modules/bzr.py +++ b/plugins/modules/bzr.py @@ -42,12 +42,12 @@ options: type: str force: description: - - If V(true), any modified files in the working tree will be discarded. + - If V(true), any modified files in the working tree is discarded. type: bool default: false executable: description: - - Path to bzr executable to use. If not supplied, the normal mechanism for resolving binary paths will be used. + - Path to C(bzr) executable to use. If not supplied, the normal mechanism for resolving binary paths is used. type: str """ diff --git a/plugins/modules/campfire.py b/plugins/modules/campfire.py index 91e83fc7d1..128790c372 100644 --- a/plugins/modules/campfire.py +++ b/plugins/modules/campfire.py @@ -14,7 +14,7 @@ module: campfire short_description: Send a message to Campfire description: - Send a message to Campfire. - - Messages with newlines will result in a "Paste" message being sent. + - Messages with newlines result in a "Paste" message being sent. extends_documentation_fragment: - community.general.attributes attributes: @@ -48,10 +48,51 @@ options: description: - Send a notification sound before the message. required: false - choices: ["56k", "bell", "bezos", "bueller", "clowntown", "cottoneyejoe", "crickets", "dadgummit", "dangerzone", "danielsan", - "deeper", "drama", "greatjob", "greyjoy", "guarantee", "heygirl", "horn", "horror", "inconceivable", "live", "loggins", - "makeitso", "noooo", "nyan", "ohmy", "ohyeah", "pushit", "rimshot", "rollout", "rumble", "sax", "secret", "sexyback", - "story", "tada", "tmyk", "trololo", "trombone", "unix", "vuvuzela", "what", "whoomp", "yeah", "yodel"] + choices: + - 56k + - bell + - bezos + - bueller + - clowntown + - cottoneyejoe + - crickets + - dadgummit + - dangerzone + - danielsan + - deeper + - drama + - greatjob + - greyjoy + - guarantee + - heygirl + - horn + - horror + - inconceivable + - live + - loggins + - makeitso + - noooo + - nyan + - ohmy + - ohyeah + - pushit + - rimshot + - rollout + - rumble + - sax + - secret + - sexyback + - story + - tada + - tmyk + - trololo + - trombone + - unix + - vuvuzela + - what + - whoomp + - yeah + - yodel # informational: requirements for nodes requirements: [] @@ -96,8 +137,7 @@ def main(): token=dict(required=True, no_log=True), room=dict(required=True), msg=dict(required=True), - notify=dict(required=False, - choices=["56k", "bell", "bezos", "bueller", + notify=dict(choices=["56k", "bell", "bezos", "bueller", "clowntown", "cottoneyejoe", "crickets", "dadgummit", "dangerzone", "danielsan", "deeper", "drama", diff --git a/plugins/modules/capabilities.py b/plugins/modules/capabilities.py index 088c15e4f6..b3143df0a4 100644 --- a/plugins/modules/capabilities.py +++ b/plugins/modules/capabilities.py @@ -40,10 +40,10 @@ options: choices: [absent, present] default: present notes: - - The capabilities system will automatically transform operators and flags into the effective set, so for example, C(cap_foo=ep) - will probably become C(cap_foo+ep). - - This module does not attempt to determine the final operator and flags to compare, so you will want to ensure that your - capabilities argument matches the final capabilities. + - The capabilities system automatically transforms operators and flags into the effective set, so for example, C(cap_foo=ep) + probably becomes C(cap_foo+ep). + - This module does not attempt to determine the final operator and flags to compare, so you want to ensure that your capabilities + argument matches the final capabilities. author: - Nate Coraor (@natefoo) """ diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py index ea2976036c..c00983fade 100644 --- a/plugins/modules/cargo.py +++ b/plugins/modules/cargo.py @@ -28,7 +28,7 @@ options: executable: description: - Path to the C(cargo) installed in the system. - - If not specified, the module will look C(cargo) in E(PATH). + - If not specified, the module looks for C(cargo) in E(PATH). type: path version_added: 7.5.0 name: @@ -38,12 +38,12 @@ options: elements: str required: true path: - description: The base path where to install the Rust packages. Cargo automatically appends V(/bin). In other words, - V(/usr/local) will become V(/usr/local/bin). + description: The base path where to install the Rust packages. Cargo automatically appends V(/bin). In other words, V(/usr/local) + becomes V(/usr/local/bin). type: path version: - description: The version to install. If O(name) contains multiple values, the module will try to install all of them - in this version. + description: The version to install. If O(name) contains multiple values, the module tries to install all of them in this + version. type: str required: false locked: @@ -68,6 +68,15 @@ options: type: path required: false version_added: 9.1.0 + features: + description: + - List of features to activate. + - This is only used when installing packages. + type: list + elements: str + required: false + default: [] + version_added: 11.0.0 requirements: - cargo installed """ @@ -106,6 +115,12 @@ EXAMPLES = r""" community.general.cargo: name: ludusavi directory: /path/to/ludusavi/source + +- name: Install "serpl" Rust package with ast_grep feature + community.general.cargo: + name: serpl + features: + - ast_grep """ import json @@ -125,6 +140,7 @@ class Cargo(object): self.version = kwargs["version"] self.locked = kwargs["locked"] self.directory = kwargs["directory"] + self.features = kwargs["features"] @property def path(self): @@ -176,6 +192,8 @@ class Cargo(object): if self.directory: cmd.append("--path") cmd.append(self.directory) + if self.features: + cmd += ["--features", ",".join(self.features)] return self._exec(cmd) def is_outdated(self, name): @@ -229,13 +247,14 @@ class Cargo(object): def main(): arg_spec = dict( - executable=dict(default=None, type="path"), + executable=dict(type="path"), name=dict(required=True, type="list", elements="str"), - path=dict(default=None, type="path"), + path=dict(type="path"), state=dict(default="present", choices=["present", "absent", "latest"]), - version=dict(default=None, type="str"), + version=dict(type="str"), locked=dict(default=False, type="bool"), - directory=dict(default=None, type="path"), + directory=dict(type="path"), + features=dict(default=[], type="list", elements="str"), ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) diff --git a/plugins/modules/catapult.py b/plugins/modules/catapult.py index e9c661e99b..448de5d13d 100644 --- a/plugins/modules/catapult.py +++ b/plugins/modules/catapult.py @@ -16,6 +16,12 @@ module: catapult short_description: Send a sms / mms using the catapult bandwidth API description: - Allows notifications to be sent using SMS / MMS using the catapult bandwidth API. +deprecated: + removed_in: 13.0.0 + why: >- + DNS fails to resolve the API endpoint used by the module since Oct 2024. + See L(the associated issue, https://github.com/ansible-collections/community.general/issues/10318) for details. + alternative: There is none. extends_documentation_fragment: - community.general.attributes attributes: @@ -47,7 +53,7 @@ options: user_id: type: str description: - - User Id from API account page. + - User ID from API account page. required: true api_token: type: str @@ -62,7 +68,7 @@ options: author: "Jonathan Mainguy (@Jmainguy)" notes: - - Will return changed even if the media url is wrong. + - Will return changed even if the media URL is wrong. - Will return changed if the destination number is invalid. """ @@ -89,14 +95,6 @@ EXAMPLES = r""" api_secret: "{{ api_secret }}" """ -RETURN = r""" -changed: - description: Whether the API accepted the message. - returned: always - type: bool - sample: true -""" - import json @@ -132,7 +130,7 @@ def main(): user_id=dict(required=True), api_token=dict(required=True, no_log=True), api_secret=dict(required=True, no_log=True), - media=dict(default=None, required=False), + media=dict(), ), ) diff --git a/plugins/modules/cisco_webex.py b/plugins/modules/cisco_webex.py index 14b8716846..f957f4121d 100644 --- a/plugins/modules/cisco_webex.py +++ b/plugins/modules/cisco_webex.py @@ -177,7 +177,7 @@ def main(): argument_spec=dict( recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']), recipient_id=dict(required=True, no_log=True), - msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']), + msg_type=dict(default='text', aliases=['message_type'], choices=['text', 'markdown']), personal_token=dict(required=True, no_log=True, aliases=['token']), msg=dict(required=True), ), diff --git a/plugins/modules/clc_aa_policy.py b/plugins/modules/clc_aa_policy.py deleted file mode 100644 index d172c26343..0000000000 --- a/plugins/modules/clc_aa_policy.py +++ /dev/null @@ -1,348 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: clc_aa_policy -short_description: Create or Delete Anti-Affinity Policies at CenturyLink Cloud -description: - - An Ansible module to Create or Delete Anti-Affinity Policies at CenturyLink Cloud. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - name: - description: - - The name of the Anti-Affinity Policy. - type: str - required: true - location: - description: - - Datacenter in which the policy lives/should live. - type: str - required: true - state: - description: - - Whether to create or delete the policy. - type: str - required: false - default: present - choices: ['present', 'absent'] -requirements: - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud. - - E(CLC_V2_API_USERNAME), the account login id for the Centurylink Cloud. - - E(CLC_V2_API_PASSWORD), the account password for the Centurylink Cloud. - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account - login and password using the HTTP API call @ https://api.ctl.io/v2/authentication/login - - E(CLC_V2_API_TOKEN), the API token generated from https://api.ctl.io/v2/authentication/login - - E(CLC_ACCT_ALIAS), the account alias associated with the Centurylink Cloud. - - Users can set E(CLC_V2_API_URL) to specify an endpoint for pointing to a different CLC environment. -""" - -EXAMPLES = r""" -- name: Create AA Policy - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create an Anti Affinity Policy - community.general.clc_aa_policy: - name: Hammer Time - location: UK3 - state: present - register: policy - - - name: Debug - ansible.builtin.debug: - var: policy - -- name: Delete AA Policy - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Delete an Anti Affinity Policy - community.general.clc_aa_policy: - name: Hammer Time - location: UK3 - state: absent - register: policy - - - name: Debug - ansible.builtin.debug: - var: policy -""" - -RETURN = r""" -policy: - description: The anti-affinity policy information. - returned: success - type: dict - sample: - { - "id":"1a28dd0988984d87b9cd61fa8da15424", - "name":"test_aa_policy", - "location":"UC1", - "links":[ - { - "rel":"self", - "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424", - "verbs":[ - "GET", - "DELETE", - "PUT" - ] - }, - { - "rel":"location", - "href":"/v2/datacenters/wfad/UC1", - "id":"uc1", - "name":"UC1 - US West (Santa Clara)" - } - ] - } -""" - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk: -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcAntiAffinityPolicy: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), - exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), - exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - location=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - return argument_spec - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_policies_for_datacenter(p) - - if p['state'] == "absent": - changed, policy = self._ensure_policy_is_absent(p) - else: - changed, policy = self._ensure_policy_is_present(p) - - if hasattr(policy, 'data'): - policy = policy.data - elif hasattr(policy, '__dict__'): - policy = policy.__dict__ - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_policies_for_datacenter(self, p): - """ - Get the Policies for a datacenter by calling the CLC API. - :param p: datacenter to get policies from - :return: policies in the datacenter - """ - response = {} - - policies = self.clc.v2.AntiAffinity.GetAll(location=p['location']) - - for policy in policies: - response[policy.name] = policy - return response - - def _create_policy(self, p): - """ - Create an Anti Affinity Policy using the CLC API. - :param p: datacenter to create policy in - :return: response dictionary from the CLC API. - """ - try: - return self.clc.v2.AntiAffinity.Create( - name=p['name'], - location=p['location']) - except CLCException as ex: - self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format( - p['name'], ex.response_text - )) - - def _delete_policy(self, p): - """ - Delete an Anti Affinity Policy using the CLC API. - :param p: datacenter to delete a policy from - :return: none - """ - try: - policy = self.policy_dict[p['name']] - policy.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format( - p['name'], ex.response_text - )) - - def _policy_exists(self, policy_name): - """ - Check to see if an Anti Affinity Policy exists - :param policy_name: name of the policy - :return: boolean of if the policy exists - """ - if policy_name in self.policy_dict: - return self.policy_dict.get(policy_name) - - return False - - def _ensure_policy_is_absent(self, p): - """ - Makes sure that a policy is absent - :param p: dictionary of policy name - :return: tuple of if a deletion occurred and the name of the policy that was deleted - """ - changed = False - if self._policy_exists(policy_name=p['name']): - changed = True - if not self.module.check_mode: - self._delete_policy(p) - return changed, None - - def _ensure_policy_is_present(self, p): - """ - Ensures that a policy is present - :param p: dictionary of a policy name - :return: tuple of if an addition occurred and the name of the policy that was added - """ - changed = False - policy = self._policy_exists(policy_name=p['name']) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_policy(p) - return changed, policy - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(), - supports_check_mode=True) - clc_aa_policy = ClcAntiAffinityPolicy(module) - clc_aa_policy.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_alert_policy.py b/plugins/modules/clc_alert_policy.py deleted file mode 100644 index 999d1f743f..0000000000 --- a/plugins/modules/clc_alert_policy.py +++ /dev/null @@ -1,527 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: clc_alert_policy -short_description: Create or Delete Alert Policies at CenturyLink Cloud -description: - - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - alias: - description: - - The alias of your CLC Account. - type: str - required: true - name: - description: - - The name of the alert policy. This is mutually exclusive with O(id). - type: str - id: - description: - - The alert policy id. This is mutually exclusive with O(name). - type: str - alert_recipients: - description: - - A list of recipient email ids to notify the alert. This is required for O(state=present). - type: list - elements: str - metric: - description: - - The metric on which to measure the condition that will trigger the alert. This is required for O(state=present). - type: str - choices: ['cpu', 'memory', 'disk'] - duration: - description: - - The length of time in minutes that the condition must exceed the threshold. This is required for O(state=present). - type: str - threshold: - description: - - The threshold that will trigger the alert when the metric equals or exceeds it. This is required for O(state=present). - This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0. - type: int - state: - description: - - Whether to create or delete the policy. - type: str - default: present - choices: ['present', 'absent'] -requirements: - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud. - - E(CLC_V2_API_USERNAME), the account login id for the Centurylink Cloud. - - E(CLC_V2_API_PASSWORD), the account password for the Centurylink Cloud. - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account - login and password using the HTTP API call @ https://api.ctl.io/v2/authentication/login - - E(CLC_V2_API_TOKEN), the API token generated from https://api.ctl.io/v2/authentication/login - - E(CLC_ACCT_ALIAS), the account alias associated with the Centurylink Cloud. - - Users can set E(CLC_V2_API_URL) to specify an endpoint for pointing to a different CLC environment. -""" - -EXAMPLES = r""" -- name: Create Alert Policy Example - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create an Alert Policy for disk above 80% for 5 minutes - community.general.clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - alert_recipients: - - test1@centurylink.com - - test2@centurylink.com - metric: 'disk' - duration: '00:05:00' - threshold: 80 - state: present - register: policy - - - name: Debug - ansible.builtin.debug: var=policy - -- name: Delete Alert Policy Example - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Delete an Alert Policy - community.general.clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - state: absent - register: policy - - - name: Debug - ansible.builtin.debug: var=policy -""" - -RETURN = r""" -policy: - description: The alert policy information. - returned: success - type: dict - sample: - { - "actions": [ - { - "action": "email", - "settings": { - "recipients": [ - "user1@domain.com", - "user1@domain.com" - ] - } - } - ], - "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7", - "links": [ - { - "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7", - "rel": "self", - "verbs": [ - "GET", - "DELETE", - "PUT" - ] - } - ], - "name": "test_alert", - "triggers": [ - { - "duration": "00:05:00", - "metric": "disk", - "threshold": 80.0 - } - ] - } -""" - -__version__ = '${version}' - -import json -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcAlertPolicy: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(), - id=dict(), - alias=dict(required=True), - alert_recipients=dict(type='list', elements='str'), - metric=dict( - choices=[ - 'cpu', - 'memory', - 'disk']), - duration=dict(type='str'), - threshold=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']) - ) - mutually_exclusive = [ - ['name', 'id'] - ] - return {'argument_spec': argument_spec, - 'mutually_exclusive': mutually_exclusive} - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_alert_policies(p['alias']) - - if p['state'] == 'present': - changed, policy = self._ensure_alert_policy_is_present() - else: - changed, policy = self._ensure_alert_policy_is_absent() - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_alert_policy_is_present(self): - """ - Ensures that the alert policy is present - :return: (changed, policy) - changed: A flag representing if anything is modified - policy: the created/updated alert policy - """ - changed = False - p = self.module.params - policy_name = p.get('name') - - if not policy_name: - self.module.fail_json(msg='Policy name is a required') - policy = self._alert_policy_exists(policy_name) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_alert_policy() - else: - changed_u, policy = self._ensure_alert_policy_is_updated(policy) - if changed_u: - changed = True - return changed, policy - - def _ensure_alert_policy_is_absent(self): - """ - Ensures that the alert policy is absent - :return: (changed, None) - changed: A flag representing if anything is modified - """ - changed = False - p = self.module.params - alert_policy_id = p.get('id') - alert_policy_name = p.get('name') - alias = p.get('alias') - if not alert_policy_id and not alert_policy_name: - self.module.fail_json( - msg='Either alert policy id or policy name is required') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id( - self.module, - alert_policy_name) - if alert_policy_id and alert_policy_id in self.policy_dict: - changed = True - if not self.module.check_mode: - self._delete_alert_policy(alias, alert_policy_id) - return changed, None - - def _ensure_alert_policy_is_updated(self, alert_policy): - """ - Ensures the alert policy is updated if anything is changed in the alert policy configuration - :param alert_policy: the target alert policy - :return: (changed, policy) - changed: A flag representing if anything is modified - policy: the updated the alert policy - """ - changed = False - p = self.module.params - alert_policy_id = alert_policy.get('id') - email_list = p.get('alert_recipients') - metric = p.get('metric') - duration = p.get('duration') - threshold = p.get('threshold') - policy = alert_policy - if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \ - (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \ - (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))): - changed = True - elif email_list: - t_email_list = list( - alert_policy.get('actions')[0].get('settings').get('recipients')) - if set(email_list) != set(t_email_list): - changed = True - if changed and not self.module.check_mode: - policy = self._update_alert_policy(alert_policy_id) - return changed, policy - - def _get_alert_policies(self, alias): - """ - Get the alert policies for account alias by calling the CLC API. - :param alias: the account alias - :return: the alert policies for the account alias - """ - response = {} - - policies = self.clc.v2.API.Call('GET', - '/v2/alertPolicies/%s' - % alias) - - for policy in policies.get('items'): - response[policy.get('id')] = policy - return response - - def _create_alert_policy(self): - """ - Create an alert Policy using the CLC API. - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - policy_name = p['name'] - arguments = json.dumps( - { - 'name': policy_name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'POST', - '/v2/alertPolicies/%s' % alias, - arguments) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to create alert policy "{0}". {1}'.format( - policy_name, str(e.response_text))) - return result - - def _update_alert_policy(self, alert_policy_id): - """ - Update alert policy using the CLC API. - :param alert_policy_id: The clc alert policy id - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - policy_name = p['name'] - arguments = json.dumps( - { - 'name': policy_name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'PUT', '/v2/alertPolicies/%s/%s' % - (alias, alert_policy_id), arguments) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to update alert policy "{0}". {1}'.format( - policy_name, str(e.response_text))) - return result - - def _delete_alert_policy(self, alias, policy_id): - """ - Delete an alert policy using the CLC API. - :param alias : the account alias - :param policy_id: the alert policy id - :return: response dictionary from the CLC API. - """ - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/alertPolicies/%s/%s' % - (alias, policy_id), None) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to delete alert policy id "{0}". {1}'.format( - policy_id, str(e.response_text))) - return result - - def _alert_policy_exists(self, policy_name): - """ - Check to see if an alert policy exists - :param policy_name: name of the alert policy - :return: boolean of if the policy exists - """ - result = False - for policy_id in self.policy_dict: - if self.policy_dict.get(policy_id).get('name') == policy_name: - result = self.policy_dict.get(policy_id) - return result - - def _get_alert_policy_id(self, module, alert_policy_name): - """ - retrieves the alert policy id of the account based on the name of the policy - :param module: the AnsibleModule object - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - for policy_id in self.policy_dict: - if self.policy_dict.get(policy_id).get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = policy_id - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcAlertPolicy._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_alert_policy = ClcAlertPolicy(module) - clc_alert_policy.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_blueprint_package.py b/plugins/modules/clc_blueprint_package.py deleted file mode 100644 index be44b58530..0000000000 --- a/plugins/modules/clc_blueprint_package.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: clc_blueprint_package -short_description: Deploys a blue print package on a set of servers in CenturyLink Cloud -description: - - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - server_ids: - description: - - A list of server Ids to deploy the blue print package. - type: list - required: true - elements: str - package_id: - description: - - The package id of the blue print. - type: str - required: true - package_params: - description: - - The dictionary of arguments required to deploy the blue print. - type: dict - default: {} - required: false - state: - description: - - Whether to install or uninstall the package. Currently it supports only V(present) for install action. - type: str - required: false - default: present - choices: ['present'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: str - default: 'True' - required: false -requirements: - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud. - - E(CLC_V2_API_USERNAME), the account login id for the Centurylink Cloud. - - E(CLC_V2_API_PASSWORD), the account password for the Centurylink Cloud. - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account - login and password using the HTTP API call @ https://api.ctl.io/v2/authentication/login - - E(CLC_V2_API_TOKEN), the API token generated from https://api.ctl.io/v2/authentication/login - - E(CLC_ACCT_ALIAS), the account alias associated with the Centurylink Cloud. - - Users can set E(CLC_V2_API_URL) to specify an endpoint for pointing to a different CLC environment. -""" - -EXAMPLES = r""" -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Deploy package - community.general.clc_blueprint_package: - server_ids: - - UC1TEST-SERVER1 - - UC1TEST-SERVER2 - package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a - package_params: {} -""" - -RETURN = r""" -server_ids: - description: The list of server ids that are changed. - returned: success - type: list - sample: ["UC1TEST-SERVER1", "UC1TEST-SERVER2"] -""" - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcBlueprintPackage: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - changed = False - changed_server_ids = [] - self._set_clc_credentials_from_env() - server_ids = p['server_ids'] - package_id = p['package_id'] - package_params = p['package_params'] - state = p['state'] - if state == 'present': - changed, changed_server_ids, request_list = self.ensure_package_installed( - server_ids, package_id, package_params) - self._wait_for_requests_to_complete(request_list) - self.module.exit_json(changed=changed, server_ids=changed_server_ids) - - @staticmethod - def define_argument_spec(): - """ - This function defines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', elements='str', required=True), - package_id=dict(required=True), - package_params=dict(type='dict', default={}), - wait=dict(default=True), # @FIXME should be bool? - state=dict(default='present', choices=['present']) - ) - return argument_spec - - def ensure_package_installed(self, server_ids, package_id, package_params): - """ - Ensure the package is installed in the given list of servers - :param server_ids: the server list where the package needs to be installed - :param package_id: the blueprint package id - :param package_params: the package arguments - :return: (changed, server_ids, request_list) - changed: A flag indicating if a change was made - server_ids: The list of servers modified - request_list: The list of request objects from clc-sdk - """ - changed = False - request_list = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to get servers from CLC') - for server in servers: - if not self.module.check_mode: - request = self.clc_install_package( - server, - package_id, - package_params) - request_list.append(request) - changed = True - return changed, server_ids, request_list - - def clc_install_package(self, server, package_id, package_params): - """ - Install the package to a given clc server - :param server: The server object where the package needs to be installed - :param package_id: The blue print package id - :param package_params: the required argument dict for the package installation - :return: The result object from the CLC API call - """ - result = None - try: - result = server.ExecutePackage( - package_id=package_id, - parameters=package_params) - except CLCException as ex: - self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format( - package_id, server.id, ex.message - )) - return result - - def _wait_for_requests_to_complete(self, request_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param request_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in request_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process package install request') - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: the list of server ids - :param message: the error message to raise if there is any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcBlueprintPackage.define_argument_spec(), - supports_check_mode=True - ) - clc_blueprint_package = ClcBlueprintPackage(module) - clc_blueprint_package.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_firewall_policy.py b/plugins/modules/clc_firewall_policy.py deleted file mode 100644 index 7c34b7c744..0000000000 --- a/plugins/modules/clc_firewall_policy.py +++ /dev/null @@ -1,590 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: clc_firewall_policy -short_description: Create/delete/update firewall policies -description: - - Create or delete or update firewall policies on Centurylink Cloud. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - location: - description: - - Target datacenter for the firewall policy. - type: str - required: true - state: - description: - - Whether to create or delete the firewall policy. - type: str - default: present - choices: ['present', 'absent'] - source: - description: - - The list of source addresses for traffic on the originating firewall. This is required when O(state=present). - type: list - elements: str - destination: - description: - - The list of destination addresses for traffic on the terminating firewall. This is required when O(state=present). - type: list - elements: str - ports: - description: - - The list of ports associated with the policy. TCP and UDP can take in single ports or port ranges. - - "Example: V(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])." - type: list - elements: str - firewall_policy_id: - description: - - Id of the firewall policy. This is required to update or delete an existing firewall policy. - type: str - source_account_alias: - description: - - CLC alias for the source account. - type: str - required: true - destination_account_alias: - description: - - CLC alias for the destination account. - type: str - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: str - default: 'True' - enabled: - description: - - Whether the firewall policy is enabled or disabled. - type: str - choices: ['True', 'False'] - default: 'True' -requirements: - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud. - - E(CLC_V2_API_USERNAME), the account login id for the Centurylink Cloud. - - E(CLC_V2_API_PASSWORD), the account password for the Centurylink Cloud. - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account - login and password using the HTTP API call @ https://api.ctl.io/v2/authentication/login - - E(CLC_V2_API_TOKEN), the API token generated from https://api.ctl.io/v2/authentication/login - - E(CLC_ACCT_ALIAS), the account alias associated with the Centurylink Cloud. - - Users can set E(CLC_V2_API_URL) to specify an endpoint for pointing to a different CLC environment. -""" - -EXAMPLES = r""" -- name: Create Firewall Policy - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create / Verify an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: present - source: 10.128.216.0/24 - destination: 10.128.216.0/24 - ports: Any - destination_account_alias: WFAD - -- name: Delete Firewall Policy - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Delete an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: absent - firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1 -""" - -RETURN = r""" -firewall_policy_id: - description: The firewall policy id. - returned: success - type: str - sample: fc36f1bfd47242e488a9c44346438c05 -firewall_policy: - description: The firewall policy information. - returned: success - type: dict - sample: - { - "destination":[ - "10.1.1.0/24", - "10.2.2.0/24" - ], - "destinationAccount":"wfad", - "enabled":true, - "id":"fc36f1bfd47242e488a9c44346438c05", - "links":[ - { - "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05", - "rel":"self", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - } - ], - "ports":[ - "any" - ], - "source":[ - "10.1.1.0/24", - "10.2.2.0/24" - ], - "status":"active" - } -""" - -__version__ = '${version}' - -import os -import traceback -from ansible.module_utils.six.moves.urllib.parse import urlparse -from time import sleep - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcFirewallPolicy: - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.firewall_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - location=dict(required=True), - source_account_alias=dict(required=True), - destination_account_alias=dict(), - firewall_policy_id=dict(), - ports=dict(type='list', elements='str'), - source=dict(type='list', elements='str'), - destination=dict(type='list', elements='str'), - wait=dict(default=True), # @FIXME type=bool - state=dict(default='present', choices=['present', 'absent']), - enabled=dict(default=True, choices=[True, False]) - ) - return argument_spec - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - changed = False - firewall_policy = None - location = self.module.params.get('location') - source_account_alias = self.module.params.get('source_account_alias') - destination_account_alias = self.module.params.get( - 'destination_account_alias') - firewall_policy_id = self.module.params.get('firewall_policy_id') - ports = self.module.params.get('ports') - source = self.module.params.get('source') - destination = self.module.params.get('destination') - wait = self.module.params.get('wait') - state = self.module.params.get('state') - enabled = self.module.params.get('enabled') - - self.firewall_dict = { - 'location': location, - 'source_account_alias': source_account_alias, - 'destination_account_alias': destination_account_alias, - 'firewall_policy_id': firewall_policy_id, - 'ports': ports, - 'source': source, - 'destination': destination, - 'wait': wait, - 'state': state, - 'enabled': enabled} - - self._set_clc_credentials_from_env() - - if state == 'absent': - changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent( - source_account_alias, location, self.firewall_dict) - - elif state == 'present': - changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present( - source_account_alias, location, self.firewall_dict) - - return self.module.exit_json( - changed=changed, - firewall_policy_id=firewall_policy_id, - firewall_policy=firewall_policy) - - @staticmethod - def _get_policy_id_from_response(response): - """ - Method to parse out the policy id from creation response - :param response: response from firewall creation API call - :return: policy_id: firewall policy id from creation call - """ - url = response.get('links')[0]['href'] - path = urlparse(url).path - path_list = os.path.split(path) - policy_id = path_list[-1] - return policy_id - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_firewall_policy_is_present( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary of request parameters for firewall policy - :return: (changed, firewall_policy_id, firewall_policy) - changed: flag for if a change occurred - firewall_policy_id: the firewall policy id that was created/updated - firewall_policy: The firewall_policy object - """ - firewall_policy = None - firewall_policy_id = firewall_dict.get('firewall_policy_id') - - if firewall_policy_id is None: - if not self.module.check_mode: - response = self._create_firewall_policy( - source_account_alias, - location, - firewall_dict) - firewall_policy_id = self._get_policy_id_from_response( - response) - changed = True - else: - firewall_policy = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if not firewall_policy: - return self.module.fail_json( - msg='Unable to find the firewall policy id : {0}'.format( - firewall_policy_id)) - changed = self._compare_get_request_with_dict( - firewall_policy, - firewall_dict) - if not self.module.check_mode and changed: - self._update_firewall_policy( - source_account_alias, - location, - firewall_policy_id, - firewall_dict) - if changed and firewall_policy_id: - firewall_policy = self._wait_for_requests_to_complete( - source_account_alias, - location, - firewall_policy_id) - return changed, firewall_policy_id, firewall_policy - - def _ensure_firewall_policy_is_absent( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is removed if present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: firewall policy to delete - :return: (changed, firewall_policy_id, response) - changed: flag for if a change occurred - firewall_policy_id: the firewall policy id that was deleted - response: response from CLC API call - """ - changed = False - response = [] - firewall_policy_id = firewall_dict.get('firewall_policy_id') - result = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if result: - if not self.module.check_mode: - response = self._delete_firewall_policy( - source_account_alias, - location, - firewall_policy_id) - changed = True - return changed, firewall_policy_id, response - - def _create_firewall_policy( - self, - source_account_alias, - location, - firewall_dict): - """ - Creates the firewall policy for the given account alias - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary of request parameters for firewall policy - :return: response from CLC API call - """ - payload = { - 'destinationAccount': firewall_dict.get('destination_account_alias'), - 'source': firewall_dict.get('source'), - 'destination': firewall_dict.get('destination'), - 'ports': firewall_dict.get('ports')} - try: - response = self.clc.v2.API.Call( - 'POST', '/v2-experimental/firewallPolicies/%s/%s' % - (source_account_alias, location), payload) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to create firewall policy. %s" % - str(e.response_text)) - return response - - def _delete_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Deletes a given firewall policy for an account alias in a datacenter - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy id to delete - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to delete the firewall policy id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - def _update_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id, - firewall_dict): - """ - Updates a firewall policy for a given datacenter and account alias - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy id to update - :param firewall_dict: dictionary of request parameters for firewall policy - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'PUT', - '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, - location, - firewall_policy_id), - firewall_dict) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to update the firewall policy id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - @staticmethod - def _compare_get_request_with_dict(response, firewall_dict): - """ - Helper method to compare the json response for getting the firewall policy with the request parameters - :param response: response from the get method - :param firewall_dict: dictionary of request parameters for firewall policy - :return: changed: Boolean that returns true if there are differences between - the response parameters and the playbook parameters - """ - - changed = False - - response_dest_account_alias = response.get('destinationAccount') - response_enabled = response.get('enabled') - response_source = response.get('source') - response_dest = response.get('destination') - response_ports = response.get('ports') - request_dest_account_alias = firewall_dict.get( - 'destination_account_alias') - request_enabled = firewall_dict.get('enabled') - if request_enabled is None: - request_enabled = True - request_source = firewall_dict.get('source') - request_dest = firewall_dict.get('destination') - request_ports = firewall_dict.get('ports') - - if ( - response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or ( - response_enabled != request_enabled) or ( - response_source and response_source != request_source) or ( - response_dest and response_dest != request_dest) or ( - response_ports and response_ports != request_ports): - changed = True - return changed - - def _get_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Get back details for a particular firewall policy - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: id of the firewall policy to get - :return: response - The response from CLC API call - """ - response = None - try: - response = self.clc.v2.API.Call( - 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - except APIFailedResponse as e: - if e.response_status_code != 404: - self.module.fail_json( - msg="Unable to fetch the firewall policy with id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - def _wait_for_requests_to_complete( - self, - source_account_alias, - location, - firewall_policy_id, - wait_limit=50): - """ - Waits until the CLC requests are complete if the wait argument is True - :param source_account_alias: The source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: The firewall policy id - :param wait_limit: The number of times to check the status for completion - :return: the firewall_policy object - """ - wait = self.module.params.get('wait') - count = 0 - firewall_policy = None - while wait: - count += 1 - firewall_policy = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - status = firewall_policy.get('status') - if status == 'active' or count > wait_limit: - wait = False - else: - # wait for 2 seconds - sleep(2) - return firewall_policy - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcFirewallPolicy._define_module_argument_spec(), - supports_check_mode=True) - - clc_firewall = ClcFirewallPolicy(module) - clc_firewall.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_group.py b/plugins/modules/clc_group.py deleted file mode 100644 index ceb5513c60..0000000000 --- a/plugins/modules/clc_group.py +++ /dev/null @@ -1,517 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: clc_group -short_description: Create/delete Server Groups at Centurylink Cloud -description: - - Create or delete Server Groups at Centurylink Centurylink Cloud. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - name: - description: - - The name of the Server Group. - type: str - required: true - description: - description: - - A description of the Server Group. - type: str - required: false - parent: - description: - - The parent group of the server group. If parent is not provided, it creates the group at top level. - type: str - required: false - location: - description: - - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter associated - with the account. - type: str - required: false - state: - description: - - Whether to create or delete the group. - type: str - default: present - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: bool - default: true - required: false -requirements: - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud. - - E(CLC_V2_API_USERNAME), the account login id for the Centurylink Cloud. - - E(CLC_V2_API_PASSWORD), the account password for the Centurylink Cloud. - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account - login and password using the HTTP API call @ https://api.ctl.io/v2/authentication/login - - E(CLC_V2_API_TOKEN), the API token generated from https://api.ctl.io/v2/authentication/login - - E(CLC_ACCT_ALIAS), the account alias associated with the Centurylink Cloud. - - Users can set E(CLC_V2_API_URL) to specify an endpoint for pointing to a different CLC environment. -""" - -EXAMPLES = r""" -# Create a Server Group -- name: Create Server Group - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create / Verify a Server Group at CenturyLink Cloud - community.general.clc_group: - name: My Cool Server Group - parent: Default Group - state: present - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc - -# Delete a Server Group -- name: Delete Server Group - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Delete / Verify Absent a Server Group at CenturyLink Cloud - community.general.clc_group: - name: My Cool Server Group - parent: Default Group - state: absent - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc -""" - -RETURN = r""" -group: - description: The group information. - returned: success - type: dict - sample: - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":"2015-07-29T18:52:47Z", - "modifiedBy":"service.wfad", - "modifiedDate":"2015-07-29T18:52:47Z" - }, - "customFields":[ - - ], - "description":"test group", - "groups":[ - - ], - "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1", - "links":[ - { - "href":"/v2/groups/wfad", - "rel":"createGroup", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad", - "rel":"createServer", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"parentGroup" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults", - "rel":"defaults", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing", - "rel":"billing" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive", - "rel":"archiveGroupAction" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics", - "rel":"statistics" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy", - "rel":"horizontalAutoscalePolicyMapping", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - } - ], - "locationId":"UC1", - "name":"test group", - "status":"active", - "type":"default" - } -""" - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcGroup(object): - - clc = None - root_group = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - location = self.module.params.get('location') - group_name = self.module.params.get('name') - parent_name = self.module.params.get('parent') - group_description = self.module.params.get('description') - state = self.module.params.get('state') - - self._set_clc_credentials_from_env() - self.group_dict = self._get_group_tree_for_datacenter( - datacenter=location) - - if state == "absent": - changed, group, requests = self._ensure_group_is_absent( - group_name=group_name, parent_name=parent_name) - if requests: - self._wait_for_requests_to_complete(requests) - else: - changed, group = self._ensure_group_is_present( - group_name=group_name, parent_name=parent_name, group_description=group_description) - try: - group = group.data - except AttributeError: - group = group_name - self.module.exit_json(changed=changed, group=group) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(), - parent=dict(), - location=dict(), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=True)) - - return argument_spec - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_group_is_absent(self, group_name, parent_name): - """ - Ensure that group_name is absent by deleting it if necessary - :param group_name: string - the name of the clc server group to delete - :param parent_name: string - the name of the parent group for group_name - :return: changed, group - """ - changed = False - group = [] - results = [] - - if self._group_exists(group_name=group_name, parent_name=parent_name): - if not self.module.check_mode: - group.append(group_name) - result = self._delete_group(group_name) - results.append(result) - changed = True - return changed, group, results - - def _delete_group(self, group_name): - """ - Delete the provided server group - :param group_name: string - the server group to delete - :return: none - """ - response = None - group, parent = self.group_dict.get(group_name) - try: - response = group.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format( - group_name, ex.response_text - )) - return response - - def _ensure_group_is_present( - self, - group_name, - parent_name, - group_description): - """ - Checks to see if a server group exists, creates it if it doesn't. - :param group_name: the name of the group to validate/create - :param parent_name: the name of the parent group for group_name - :param group_description: a short description of the server group (used when creating) - :return: (changed, group) - - changed: Boolean- whether a change was made, - group: A clc group object for the group - """ - if not self.root_group: - raise AssertionError("Implementation Error: Root Group not set") - parent = parent_name if parent_name is not None else self.root_group.name - description = group_description - changed = False - group = group_name - - parent_exists = self._group_exists(group_name=parent, parent_name=None) - child_exists = self._group_exists( - group_name=group_name, - parent_name=parent) - - if parent_exists and child_exists: - group, parent = self.group_dict[group_name] - changed = False - elif parent_exists and not child_exists: - if not self.module.check_mode: - group = self._create_group( - group=group, - parent=parent, - description=description) - changed = True - else: - self.module.fail_json( - msg="parent group: " + - parent + - " does not exist") - - return changed, group - - def _create_group(self, group, parent, description): - """ - Create the provided server group - :param group: clc_sdk.Group - the group to create - :param parent: clc_sdk.Parent - the parent group for {group} - :param description: string - a text description of the group - :return: clc_sdk.Group - the created group - """ - response = None - (parent, grandparent) = self.group_dict[parent] - try: - response = parent.Create(name=group, description=description) - except CLCException as ex: - self.module.fail_json(msg='Failed to create group :{0}. {1}'.format( - group, ex.response_text)) - return response - - def _group_exists(self, group_name, parent_name): - """ - Check to see if a group exists - :param group_name: string - the group to check - :param parent_name: string - the parent of group_name - :return: boolean - whether the group exists - """ - result = False - if group_name in self.group_dict: - (group, parent) = self.group_dict[group_name] - if parent_name is None or parent_name == parent.name: - result = True - return result - - def _get_group_tree_for_datacenter(self, datacenter=None): - """ - Walk the tree of groups for a datacenter - :param datacenter: string - the datacenter to walk (ex: 'UC1') - :return: a dictionary of groups and parents - """ - self.root_group = self.clc.v2.Datacenter( - location=datacenter).RootGroup() - return self._walk_groups_recursive( - parent_group=None, - child_group=self.root_group) - - def _walk_groups_recursive(self, parent_group, child_group): - """ - Walk a parent-child tree of groups, starting with the provided child group - :param parent_group: clc_sdk.Group - the parent group to start the walk - :param child_group: clc_sdk.Group - the child group to start the walk - :return: a dictionary of groups and parents - """ - result = {str(child_group): (child_group, parent_group)} - groups = child_group.Subgroups().groups - if len(groups) > 0: - for group in groups: - if group.type != 'default': - continue - - result.update(self._walk_groups_recursive(child_group, group)) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process group request') - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcGroup._define_module_argument_spec(), - supports_check_mode=True) - - clc_group = ClcGroup(module) - clc_group.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_loadbalancer.py b/plugins/modules/clc_loadbalancer.py deleted file mode 100644 index 4690bd6506..0000000000 --- a/plugins/modules/clc_loadbalancer.py +++ /dev/null @@ -1,943 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: clc_loadbalancer -short_description: Create, Delete shared loadbalancers in CenturyLink Cloud -description: - - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - name: - description: - - The name of the loadbalancer. - type: str - required: true - description: - description: - - A description for the loadbalancer. - type: str - alias: - description: - - The alias of your CLC Account. - type: str - required: true - location: - description: - - The location of the datacenter where the load balancer resides in. - type: str - required: true - method: - description: - - The balancing method for the load balancer pool. - type: str - choices: ['leastConnection', 'roundRobin'] - persistence: - description: - - The persistence method for the load balancer. - type: str - choices: ['standard', 'sticky'] - port: - description: - - Port to configure on the public-facing side of the load balancer pool. - type: str - choices: ['80', '443'] - nodes: - description: - - A list of nodes that needs to be added to the load balancer pool. - type: list - default: [] - elements: dict - status: - description: - - The status of the loadbalancer. - type: str - default: enabled - choices: ['enabled', 'disabled'] - state: - description: - - Whether to create or delete the load balancer pool. - type: str - default: present - choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent'] -requirements: - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud. - - E(CLC_V2_API_USERNAME), the account login id for the Centurylink Cloud. - - E(CLC_V2_API_PASSWORD), the account password for the Centurylink Cloud. - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account - login and password using the HTTP API call @ https://api.ctl.io/v2/authentication/login - - E(CLC_V2_API_TOKEN), the API token generated from https://api.ctl.io/v2/authentication/login - - E(CLC_ACCT_ALIAS), the account alias associated with the Centurylink Cloud. - - Users can set E(CLC_V2_API_URL) to specify an endpoint for pointing to a different CLC environment. -""" - -EXAMPLES = r""" -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples -- name: Create Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: present - -- name: Add node to an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.234 - privatePort: 80 - state: nodes_present - -- name: Remove node from an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.234 - privatePort: 80 - state: nodes_absent - -- name: Delete LoadbalancerPool - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: port_absent - -- name: Delete Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: absent -""" - -RETURN = r""" -loadbalancer: - description: The load balancer result object from CLC. - returned: success - type: dict - sample: - { - "description":"test-lb", - "id":"ab5b18cb81e94ab9925b61d1ca043fb5", - "ipAddress":"66.150.174.197", - "links":[ - { - "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5", - "rel":"self", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools", - "rel":"pools", - "verbs":[ - "GET", - "POST" - ] - } - ], - "name":"test-lb", - "pools":[ - - ], - "status":"enabled" - } -""" - -__version__ = '${version}' - -import json -import os -import traceback -from time import sleep - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcLoadBalancer: - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.lb_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - changed = False - result_lb = None - loadbalancer_name = self.module.params.get('name') - loadbalancer_alias = self.module.params.get('alias') - loadbalancer_location = self.module.params.get('location') - loadbalancer_description = self.module.params.get('description') - loadbalancer_port = self.module.params.get('port') - loadbalancer_method = self.module.params.get('method') - loadbalancer_persistence = self.module.params.get('persistence') - loadbalancer_nodes = self.module.params.get('nodes') - loadbalancer_status = self.module.params.get('status') - state = self.module.params.get('state') - - if loadbalancer_description is None: - loadbalancer_description = loadbalancer_name - - self._set_clc_credentials_from_env() - - self.lb_dict = self._get_loadbalancer_list( - alias=loadbalancer_alias, - location=loadbalancer_location) - - if state == 'present': - changed, result_lb, lb_id = self.ensure_loadbalancer_present( - name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location, - description=loadbalancer_description, - status=loadbalancer_status) - if loadbalancer_port: - changed, result_pool, pool_id = self.ensure_loadbalancerpool_present( - lb_id=lb_id, - alias=loadbalancer_alias, - location=loadbalancer_location, - method=loadbalancer_method, - persistence=loadbalancer_persistence, - port=loadbalancer_port) - - if loadbalancer_nodes: - changed, result_nodes = self.ensure_lbpool_nodes_set( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - elif state == 'absent': - changed, result_lb = self.ensure_loadbalancer_absent( - name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location) - - elif state == 'port_absent': - changed, result_lb = self.ensure_loadbalancerpool_absent( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port) - - elif state == 'nodes_present': - changed, result_lb = self.ensure_lbpool_nodes_present( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - elif state == 'nodes_absent': - changed, result_lb = self.ensure_lbpool_nodes_absent( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - self.module.exit_json(changed=changed, loadbalancer=result_lb) - - def ensure_loadbalancer_present( - self, name, alias, location, description, status): - """ - Checks to see if a load balancer exists and creates one if it does not. - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description of loadbalancer - :param status: Enabled / Disabled - :return: (changed, result, lb_id) - changed: Boolean whether a change was made - result: The result object from the CLC load balancer request - lb_id: The load balancer id - """ - changed = False - result = name - lb_id = self._loadbalancer_exists(name=name) - if not lb_id: - if not self.module.check_mode: - result = self.create_loadbalancer(name=name, - alias=alias, - location=location, - description=description, - status=status) - lb_id = result.get('id') - changed = True - - return changed, result, lb_id - - def ensure_loadbalancerpool_present( - self, lb_id, alias, location, method, persistence, port): - """ - Checks to see if a load balancer pool exists and creates one if it does not. - :param lb_id: The loadbalancer id - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: (changed, group, pool_id) - - changed: Boolean whether a change was made - result: The result from the CLC API call - pool_id: The string id of the load balancer pool - """ - changed = False - result = port - if not lb_id: - return changed, None, None - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if not pool_id: - if not self.module.check_mode: - result = self.create_loadbalancerpool( - alias=alias, - location=location, - lb_id=lb_id, - method=method, - persistence=persistence, - port=port) - pool_id = result.get('id') - changed = True - - return changed, result, pool_id - - def ensure_loadbalancer_absent(self, name, alias, location): - """ - Checks to see if a load balancer exists and deletes it if it does - :param name: Name of the load balancer - :param alias: Alias of account - :param location: Datacenter - :return: (changed, result) - changed: Boolean whether a change was made - result: The result from the CLC API Call - """ - changed = False - result = name - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - if not self.module.check_mode: - result = self.delete_loadbalancer(alias=alias, - location=location, - name=name) - changed = True - return changed, result - - def ensure_loadbalancerpool_absent(self, alias, location, name, port): - """ - Checks to see if a load balancer pool exists and deletes it if it does - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer listens on - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = None - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed = True - if not self.module.check_mode: - result = self.delete_loadbalancerpool( - alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id) - else: - result = "Pool doesn't exist" - else: - result = "LB Doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool - and set the nodes if any in the list those doesn't exist - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: The list of nodes to be updated to the pool - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - result = {} - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_check=nodes) - if not nodes_exist: - changed = True - result = self.set_loadbalancernodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be added - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed, result = self.add_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_add=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and removes them if found any - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be removed - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed, result = self.remove_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_remove=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def create_loadbalancer(self, name, alias, location, description, status): - """ - Create a loadbalancer w/ params - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description for loadbalancer to be created - :param status: Enabled / Disabled - :return: result: The result from the CLC API call - """ - result = None - try: - result = self.clc.v2.API.Call('POST', - '/v2/sharedLoadBalancers/%s/%s' % (alias, - location), - json.dumps({"name": name, - "description": description, - "status": status})) - sleep(1) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to create load balancer "{0}". {1}'.format( - name, str(e.response_text))) - return result - - def create_loadbalancerpool( - self, alias, location, lb_id, method, persistence, port): - """ - Creates a pool on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: result: The result from the create API call - """ - result = None - try: - result = self.clc.v2.API.Call( - 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % - (alias, location, lb_id), json.dumps( - { - "port": port, "method": method, "persistence": persistence - })) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to create pool for load balancer id "{0}". {1}'.format( - lb_id, str(e.response_text))) - return result - - def delete_loadbalancer(self, alias, location, name): - """ - Delete CLC loadbalancer - :param alias: Alias for account - :param location: Datacenter - :param name: Name of the loadbalancer to delete - :return: result: The result from the CLC API call - """ - result = None - lb_id = self._get_loadbalancer_id(name=name) - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' % - (alias, location, lb_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to delete load balancer "{0}". {1}'.format( - name, str(e.response_text))) - return result - - def delete_loadbalancerpool(self, alias, location, lb_id, pool_id): - """ - Delete the pool on the provided load balancer - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the load balancer pool - :return: result: The result from the delete API call - """ - result = None - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' % - (alias, location, lb_id, pool_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to delete pool for load balancer id "{0}". {1}'.format( - lb_id, str(e.response_text))) - return result - - def _get_loadbalancer_id(self, name): - """ - Retrieves unique ID of loadbalancer - :param name: Name of loadbalancer - :return: Unique ID of the loadbalancer - """ - id = None - for lb in self.lb_dict: - if lb.get('name') == name: - id = lb.get('id') - return id - - def _get_loadbalancer_list(self, alias, location): - """ - Retrieve a list of loadbalancers - :param alias: Alias for account - :param location: Datacenter - :return: JSON data for all loadbalancers at datacenter - """ - result = None - try: - result = self.clc.v2.API.Call( - 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to fetch load balancers for account: {0}. {1}'.format( - alias, str(e.response_text))) - return result - - def _loadbalancer_exists(self, name): - """ - Verify a loadbalancer exists - :param name: Name of loadbalancer - :return: False or the ID of the existing loadbalancer - """ - result = False - - for lb in self.lb_dict: - if lb.get('name') == name: - result = lb.get('id') - return result - - def _loadbalancerpool_exists(self, alias, location, port, lb_id): - """ - Checks to see if a pool exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param port: the port to check and see if it exists - :param lb_id: the id string of the provided load balancer - :return: result: The id string of the pool or False - """ - result = False - try: - pool_list = self.clc.v2.API.Call( - 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % - (alias, location, lb_id)) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format( - lb_id, str(e.response_text))) - for pool in pool_list: - if int(pool.get('port')) == int(port): - result = pool.get('id') - return result - - def _loadbalancerpool_nodes_exists( - self, alias, location, lb_id, pool_id, nodes_to_check): - """ - Checks to see if a set of nodes exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the provided load balancer - :param pool_id: the id string of the load balancer pool - :param nodes_to_check: the list of nodes to check for - :return: result: True / False indicating if the given nodes exist - """ - result = False - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_check: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - result = True - else: - result = False - return result - - def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes): - """ - Updates nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes: a list of dictionaries containing the nodes to set - :return: result: The result from the CLC API call - """ - result = None - if not lb_id: - return result - if not self.module.check_mode: - try: - result = self.clc.v2.API.Call('PUT', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id), json.dumps(nodes)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format( - pool_id, str(e.response_text))) - return result - - def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add): - """ - Add nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes_to_add: a list of dictionaries containing the nodes to add - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_add: - if not node.get('status'): - node['status'] = 'enabled' - if node not in nodes: - changed = True - nodes.append(node) - if changed is True and not self.module.check_mode: - result = self.set_loadbalancernodes( - alias, - location, - lb_id, - pool_id, - nodes) - return changed, result - - def remove_lbpool_nodes( - self, alias, location, lb_id, pool_id, nodes_to_remove): - """ - Removes nodes from the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes_to_remove: a list of dictionaries containing the nodes to remove - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_remove: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - changed = True - nodes.remove(node) - if changed is True and not self.module.check_mode: - result = self.set_loadbalancernodes( - alias, - location, - lb_id, - pool_id, - nodes) - return changed, result - - def _get_lbpool_nodes(self, alias, location, lb_id, pool_id): - """ - Return the list of nodes available to the provided load balancer pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :return: result: The list of nodes - """ - result = None - try: - result = self.clc.v2.API.Call('GET', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format( - pool_id, str(e.response_text))) - return result - - @staticmethod - def define_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(), - location=dict(required=True), - alias=dict(required=True), - port=dict(choices=[80, 443]), - method=dict(choices=['leastConnection', 'roundRobin']), - persistence=dict(choices=['standard', 'sticky']), - nodes=dict(type='list', default=[], elements='dict'), - status=dict(default='enabled', choices=['enabled', 'disabled']), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'port_absent', - 'nodes_present', - 'nodes_absent']) - ) - return argument_spec - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(), - supports_check_mode=True) - clc_loadbalancer = ClcLoadBalancer(module) - clc_loadbalancer.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_modify_server.py b/plugins/modules/clc_modify_server.py deleted file mode 100644 index ed01e36763..0000000000 --- a/plugins/modules/clc_modify_server.py +++ /dev/null @@ -1,965 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: clc_modify_server -short_description: Modify servers in CenturyLink Cloud -description: - - An Ansible module to modify servers in CenturyLink Cloud. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - server_ids: - description: - - A list of server Ids to modify. - type: list - required: true - elements: str - cpu: - description: - - How many CPUs to update on the server. - type: str - memory: - description: - - Memory (in GB) to set to the server. - type: str - anti_affinity_policy_id: - description: - - The anti affinity policy id to be set for a hyper scale server. This is mutually exclusive with O(anti_affinity_policy_name). - type: str - anti_affinity_policy_name: - description: - - The anti affinity policy name to be set for a hyper scale server. This is mutually exclusive with O(anti_affinity_policy_id). - type: str - alert_policy_id: - description: - - The alert policy id to be associated to the server. This is mutually exclusive with O(alert_policy_name). - type: str - alert_policy_name: - description: - - The alert policy name to be associated to the server. This is mutually exclusive with O(alert_policy_id). - type: str - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: bool - default: true -requirements: - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud. - - E(CLC_V2_API_USERNAME), the account login id for the Centurylink Cloud. - - E(CLC_V2_API_PASSWORD), the account password for the Centurylink Cloud. - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account - login and password using the HTTP API call @ https://api.ctl.io/v2/authentication/login - - E(CLC_V2_API_TOKEN), the API token generated from https://api.ctl.io/v2/authentication/login - - E(CLC_ACCT_ALIAS), the account alias associated with the Centurylink Cloud. - - Users can set E(CLC_V2_API_URL) to specify an endpoint for pointing to a different CLC environment. -""" - -EXAMPLES = r""" -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Set the cpu count to 4 on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - cpu: 4 - state: present - -- name: Set the memory to 8GB on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - memory: 8 - state: present - -- name: Set the anti affinity policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - anti_affinity_policy_name: 'aa_policy' - state: present - -- name: Remove the anti affinity policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - anti_affinity_policy_name: 'aa_policy' - state: absent - -- name: Add the alert policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - alert_policy_name: 'alert_policy' - state: present - -- name: Remove the alert policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - alert_policy_name: 'alert_policy' - state: absent - -- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - cpu: 8 - memory: 16 - state: present -""" - -RETURN = r""" -server_ids: - description: The list of server ids that are changed. - returned: success - type: list - sample: ["UC1TEST-SVR01", "UC1TEST-SVR02"] -servers: - description: The list of server objects that are changed. - returned: success - type: list - sample: - [ - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":1438196820, - "modifiedBy":"service.wfad", - "modifiedDate":1438196820 - }, - "description":"test-server", - "details":{ - "alertPolicies":[ - - ], - "cpu":1, - "customFields":[ - - ], - "diskCount":3, - "disks":[ - { - "id":"0:0", - "partitionPaths":[ - - ], - "sizeGB":1 - }, - { - "id":"0:1", - "partitionPaths":[ - - ], - "sizeGB":2 - }, - { - "id":"0:2", - "partitionPaths":[ - - ], - "sizeGB":14 - } - ], - "hostName":"", - "inMaintenanceMode":false, - "ipAddresses":[ - { - "internal":"10.1.1.1" - } - ], - "memoryGB":1, - "memoryMB":1024, - "partitions":[ - - ], - "powerState":"started", - "snapshots":[ - - ], - "storageGB":17 - }, - "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", - "id":"test-server", - "ipaddress":"10.120.45.23", - "isTemplate":false, - "links":[ - { - "href":"/v2/servers/wfad/test-server", - "id":"test-server", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"group" - }, - { - "href":"/v2/accounts/wfad", - "id":"wfad", - "rel":"account" - }, - { - "href":"/v2/billing/wfad/serverPricing/test-server", - "rel":"billing" - }, - { - "href":"/v2/servers/wfad/test-server/publicIPAddresses", - "rel":"publicIPAddresses", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/credentials", - "rel":"credentials" - }, - { - "href":"/v2/servers/wfad/test-server/statistics", - "rel":"statistics" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/capabilities", - "rel":"capabilities" - }, - { - "href":"/v2/servers/wfad/test-server/alertPolicies", - "rel":"alertPolicyMappings", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", - "rel":"antiAffinityPolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", - "rel":"cpuAutoscalePolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - } - ], - "locationId":"UC1", - "name":"test-server", - "os":"ubuntu14_64Bit", - "osType":"Ubuntu 14 64-bit", - "status":"active", - "storageType":"standard", - "type":"standard" - } - ] -""" - -__version__ = '${version}' - -import json -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcModifyServer: - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - - p = self.module.params - cpu = p.get('cpu') - memory = p.get('memory') - state = p.get('state') - if state == 'absent' and (cpu or memory): - return self.module.fail_json( - msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments') - - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of instances to modify: %s' % - server_ids) - - (changed, server_dict_array, changed_server_ids) = self._modify_servers( - server_ids=server_ids) - - self.module.exit_json( - changed=changed, - server_ids=changed_server_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - state=dict(default='present', choices=['present', 'absent']), - cpu=dict(), - memory=dict(), - anti_affinity_policy_id=dict(), - anti_affinity_policy_name=dict(), - alert_policy_id=dict(), - alert_policy_name=dict(), - wait=dict(type='bool', default=True) - ) - mutually_exclusive = [ - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'] - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: The list of server ids - :param message: the error message to throw in case of any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - return self.module.fail_json(msg=message + ': %s' % ex.message) - - def _modify_servers(self, server_ids): - """ - modify the servers configuration on the provided list - :param server_ids: list of servers to modify - :return: a list of dictionaries with server information about the servers that were modified - """ - p = self.module.params - state = p.get('state') - server_params = { - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'), - 'alert_policy_id': p.get('alert_policy_id'), - 'alert_policy_name': p.get('alert_policy_name'), - } - changed = False - server_changed = False - aa_changed = False - ap_changed = False - server_dict_array = [] - result_server_ids = [] - request_list = [] - changed_servers = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return self.module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - for server in servers: - if state == 'present': - server_changed, server_result = self._ensure_server_config( - server, server_params) - if server_result: - request_list.append(server_result) - aa_changed = self._ensure_aa_policy_present( - server, - server_params) - ap_changed = self._ensure_alert_policy_present( - server, - server_params) - elif state == 'absent': - aa_changed = self._ensure_aa_policy_absent( - server, - server_params) - ap_changed = self._ensure_alert_policy_absent( - server, - server_params) - if server_changed or aa_changed or ap_changed: - changed_servers.append(server) - changed = True - - self._wait_for_requests(self.module, request_list) - self._refresh_servers(self.module, changed_servers) - - for server in changed_servers: - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - def _ensure_server_config( - self, server, server_params): - """ - ensures the server is updated with the provided cpu and memory - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - cpu = server_params.get('cpu') - memory = server_params.get('memory') - changed = False - result = None - - if not cpu: - cpu = server.cpu - if not memory: - memory = server.memory - if memory != server.memory or cpu != server.cpu: - if not self.module.check_mode: - result = self._modify_clc_server( - self.clc, - self.module, - server.id, - cpu, - memory) - changed = True - return changed, result - - @staticmethod - def _modify_clc_server(clc, module, server_id, cpu, memory): - """ - Modify the memory or CPU of a clc server. - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param server_id: id of the server to modify - :param cpu: the new cpu value - :param memory: the new memory value - :return: the result of CLC API call - """ - result = None - acct_alias = clc.v2.Account.GetAlias() - try: - # Update the server configuration - job_obj = clc.v2.API.Call('PATCH', - 'servers/%s/%s' % (acct_alias, - server_id), - json.dumps([{"op": "set", - "member": "memory", - "value": memory}, - {"op": "set", - "member": "cpu", - "value": cpu}])) - result = clc.v2.Requests(job_obj) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to update the server configuration for server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _wait_for_requests(module, request_list): - """ - Block until server provisioning requests are completed. - :param module: the AnsibleModule object - :param request_list: a list of clc-sdk.Request instances - :return: none - """ - wait = module.params.get('wait') - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in request_list]) - - if failed_requests_count > 0: - module.fail_json( - msg='Unable to process modify server request') - - @staticmethod - def _refresh_servers(module, servers): - """ - Loop through a list of servers and refresh them. - :param module: the AnsibleModule object - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - try: - server.Refresh() - except CLCException as ex: - module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( - server.id, ex.message - )) - - def _ensure_aa_policy_present( - self, server, server_params): - """ - ensures the server is updated with the provided anti affinity policy - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = self._get_aa_policy_id_by_name( - self.clc, - self.module, - acct_alias, - aa_policy_name) - current_aa_policy_id = self._get_aa_policy_id_of_server( - self.clc, - self.module, - acct_alias, - server.id) - - if aa_policy_id and aa_policy_id != current_aa_policy_id: - self._modify_aa_policy( - self.clc, - self.module, - acct_alias, - server.id, - aa_policy_id) - changed = True - return changed - - def _ensure_aa_policy_absent( - self, server, server_params): - """ - ensures the provided anti affinity policy is removed from the server - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = self._get_aa_policy_id_by_name( - self.clc, - self.module, - acct_alias, - aa_policy_name) - current_aa_policy_id = self._get_aa_policy_id_of_server( - self.clc, - self.module, - acct_alias, - server.id) - - if aa_policy_id and aa_policy_id == current_aa_policy_id: - self._delete_aa_policy( - self.clc, - self.module, - acct_alias, - server.id) - changed = True - return changed - - @staticmethod - def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id): - """ - modifies the anti affinity policy of the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param aa_policy_id: the anti affinity policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('PUT', - 'servers/%s/%s/antiAffinityPolicy' % ( - acct_alias, - server_id), - json.dumps({"id": aa_policy_id})) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _delete_aa_policy(clc, module, acct_alias, server_id): - """ - Delete the anti affinity policy of the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('DELETE', - 'servers/%s/%s/antiAffinityPolicy' % ( - acct_alias, - server_id), - json.dumps({})) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json( - msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format( - alias, str(ex.response_text))) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) - if not aa_policy_id: - module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) - return aa_policy_id - - @staticmethod - def _get_aa_policy_id_of_server(clc, module, alias, server_id): - """ - retrieves the anti affinity policy id of the server based on the CLC server id - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param server_id: the CLC server id - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - result = clc.v2.API.Call( - method='GET', url='servers/%s/%s/antiAffinityPolicy' % - (alias, server_id)) - aa_policy_id = result.get('id') - except APIFailedResponse as ex: - if ex.response_status_code != 404: - module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format( - server_id, str(ex.response_text))) - return aa_policy_id - - def _ensure_alert_policy_present( - self, server, server_params): - """ - ensures the server is updated with the provided alert policy - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id_by_name( - self.clc, - self.module, - acct_alias, - alert_policy_name) - if alert_policy_id and not self._alert_policy_exists( - server, alert_policy_id): - self._add_alert_policy_to_server( - self.clc, - self.module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed - - def _ensure_alert_policy_absent( - self, server, server_params): - """ - ensures the alert policy is removed from the server - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - - acct_alias = self.clc.v2.Account.GetAlias() - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id_by_name( - self.clc, - self.module, - acct_alias, - alert_policy_name) - - if alert_policy_id and self._alert_policy_exists( - server, alert_policy_id): - self._remove_alert_policy_to_server( - self.clc, - self.module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed - - @staticmethod - def _add_alert_policy_to_server( - clc, module, acct_alias, server_id, alert_policy_id): - """ - add the alert policy to CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('POST', - 'servers/%s/%s/alertPolicies' % ( - acct_alias, - server_id), - json.dumps({"id": alert_policy_id})) - except APIFailedResponse as ex: - module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _remove_alert_policy_to_server( - clc, module, acct_alias, server_id, alert_policy_id): - """ - remove the alert policy to the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('DELETE', - 'servers/%s/%s/alertPolicies/%s' - % (acct_alias, server_id, alert_policy_id)) - except APIFailedResponse as ex: - module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - retrieves the alert policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - try: - alert_policies = clc.v2.API.Call(method='GET', - url='alertPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format( - alias, str(ex.response_text))) - for alert_policy in alert_policies.get('items'): - if alert_policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = alert_policy.get('id') - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _alert_policy_exists(server, alert_policy_id): - """ - Checks if the alert policy exists for the server - :param server: the clc server object - :param alert_policy_id: the alert policy - :return: True: if the given alert policy id associated to the server, False otherwise - """ - result = False - alert_policies = server.alertPolicies - if alert_policies: - for alert_policy in alert_policies: - if alert_policy.get('id') == alert_policy_id: - result = True - return result - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - - argument_dict = ClcModifyServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_modify_server = ClcModifyServer(module) - clc_modify_server.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_publicip.py b/plugins/modules/clc_publicip.py deleted file mode 100644 index f149a59a53..0000000000 --- a/plugins/modules/clc_publicip.py +++ /dev/null @@ -1,363 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: clc_publicip -short_description: Add and Delete public IPs on servers in CenturyLink Cloud -description: - - An Ansible module to add or delete public IP addresses on an existing server or servers in CenturyLink Cloud. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - protocol: - description: - - The protocol that the public IP will listen for. - type: str - default: TCP - choices: ['TCP', 'UDP', 'ICMP'] - ports: - description: - - A list of ports to expose. This is required when O(state=present). - type: list - elements: int - server_ids: - description: - - A list of servers to create public IPs on. - type: list - required: true - elements: str - state: - description: - - Determine whether to create or delete public IPs. If V(present) module will not create a second public IP if one already - exists. - type: str - default: present - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: bool - default: true -requirements: - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud. - - E(CLC_V2_API_USERNAME), the account login id for the Centurylink Cloud. - - E(CLC_V2_API_PASSWORD), the account password for the Centurylink Cloud. - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account - login and password using the HTTP API call @ https://api.ctl.io/v2/authentication/login - - E(CLC_V2_API_TOKEN), the API token generated from https://api.ctl.io/v2/authentication/login - - E(CLC_ACCT_ALIAS), the account alias associated with the Centurylink Cloud. - - Users can set E(CLC_V2_API_URL) to specify an endpoint for pointing to a different CLC environment. -""" - -EXAMPLES = r""" -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Add Public IP to Server - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create Public IP For Servers - community.general.clc_publicip: - protocol: TCP - ports: - - 80 - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - state: present - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc - -- name: Delete Public IP from Server - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create Public IP For Servers - community.general.clc_publicip: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - state: absent - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc -""" - -RETURN = r""" -server_ids: - description: The list of server ids that are changed. - returned: success - type: list - sample: ["UC1TEST-SVR01", "UC1TEST-SVR02"] -""" - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcPublicIp(object): - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - params = self.module.params - server_ids = params['server_ids'] - ports = params['ports'] - protocol = params['protocol'] - state = params['state'] - - if state == 'present': - changed, changed_server_ids, requests = self.ensure_public_ip_present( - server_ids=server_ids, protocol=protocol, ports=ports) - elif state == 'absent': - changed, changed_server_ids, requests = self.ensure_public_ip_absent( - server_ids=server_ids) - else: - return self.module.fail_json(msg="Unknown State: " + state) - self._wait_for_requests_to_complete(requests) - return self.module.exit_json(changed=changed, - server_ids=changed_server_ids) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']), - ports=dict(type='list', elements='int'), - wait=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - ) - return argument_spec - - def ensure_public_ip_present(self, server_ids, protocol, ports): - """ - Ensures the given server ids having the public ip available - :param server_ids: the list of server ids - :param protocol: the ip protocol - :param ports: the list of ports to expose - :return: (changed, changed_server_ids, results) - changed: A flag indicating if there is any change - changed_server_ids : the list of server ids that are changed - results: The result list from clc public ip call - """ - changed = False - results = [] - changed_server_ids = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.PublicIPs().public_ips) == 0] - ports_to_expose = [{'protocol': protocol, 'port': port} - for port in ports] - for server in servers_to_change: - if not self.module.check_mode: - result = self._add_publicip_to_server(server, ports_to_expose) - results.append(result) - changed_server_ids.append(server.id) - changed = True - return changed, changed_server_ids, results - - def _add_publicip_to_server(self, server, ports_to_expose): - result = None - try: - result = server.PublicIPs().Add(ports_to_expose) - except CLCException as ex: - self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_public_ip_absent(self, server_ids): - """ - Ensures the given server ids having the public ip removed if there is any - :param server_ids: the list of server ids - :return: (changed, changed_server_ids, results) - changed: A flag indicating if there is any change - changed_server_ids : the list of server ids that are changed - results: The result list from clc public ip call - """ - changed = False - results = [] - changed_server_ids = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.PublicIPs().public_ips) > 0] - for server in servers_to_change: - if not self.module.check_mode: - result = self._remove_publicip_from_server(server) - results.append(result) - changed_server_ids.append(server.id) - changed = True - return changed, changed_server_ids, results - - def _remove_publicip_from_server(self, server): - result = None - try: - for ip_address in server.PublicIPs().public_ips: - result = ip_address.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process public ip request') - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_servers_from_clc(self, server_ids, message): - """ - Gets list of servers form CLC api - """ - try: - return self.clc.v2.Servers(server_ids).servers - except CLCException as exception: - self.module.fail_json(msg=message + ': %s' % exception) - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcPublicIp._define_module_argument_spec(), - supports_check_mode=True - ) - clc_public_ip = ClcPublicIp(module) - clc_public_ip.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_server.py b/plugins/modules/clc_server.py deleted file mode 100644 index 4ff8acb7cb..0000000000 --- a/plugins/modules/clc_server.py +++ /dev/null @@ -1,1558 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: clc_server -short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud -description: - - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - additional_disks: - description: - - The list of additional disks for the server. - type: list - elements: dict - default: [] - add_public_ip: - description: - - Whether to add a public IP to the server. - type: bool - default: false - alias: - description: - - The account alias to provision the servers under. - type: str - anti_affinity_policy_id: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with O(anti_affinity_policy_name). - type: str - anti_affinity_policy_name: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with O(anti_affinity_policy_id). - type: str - alert_policy_id: - description: - - The alert policy to assign to the server. This is mutually exclusive with O(alert_policy_name). - type: str - alert_policy_name: - description: - - The alert policy to assign to the server. This is mutually exclusive with O(alert_policy_id). - type: str - count: - description: - - The number of servers to build (mutually exclusive with O(exact_count)). - default: 1 - type: int - count_group: - description: - - Required when exact_count is specified. The Server Group use to determine how many servers to deploy. - type: str - cpu: - description: - - How many CPUs to provision on the server. - default: 1 - type: int - cpu_autoscale_policy_id: - description: - - The autoscale policy to assign to the server. - type: str - custom_fields: - description: - - The list of custom fields to set on the server. - type: list - default: [] - elements: dict - description: - description: - - The description to set for the server. - type: str - exact_count: - description: - - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group, creating - and deleting them to reach that count. Requires O(count_group) to be set. - type: int - group: - description: - - The Server Group to create servers under. - type: str - default: 'Default Group' - ip_address: - description: - - The IP Address for the server. One is assigned if not provided. - type: str - location: - description: - - The Datacenter to create servers in. - type: str - managed_os: - description: - - Whether to create the server as 'Managed' or not. - type: bool - default: false - required: false - memory: - description: - - Memory in GB. - type: int - default: 1 - name: - description: - - A 1 to 6 character identifier to use for the server. This is required when O(state=present). - type: str - network_id: - description: - - The network UUID on which to create servers. - type: str - packages: - description: - - The list of blue print packages to run on the server after its created. - type: list - elements: dict - default: [] - password: - description: - - Password for the administrator / root user. - type: str - primary_dns: - description: - - Primary DNS used by the server. - type: str - public_ip_protocol: - description: - - The protocol to use for the public ip if O(add_public_ip=true). - type: str - default: 'TCP' - choices: ['TCP', 'UDP', 'ICMP'] - public_ip_ports: - description: - - A list of ports to allow on the firewall to the servers public IP, if O(add_public_ip=true). - type: list - elements: dict - default: [] - secondary_dns: - description: - - Secondary DNS used by the server. - type: str - server_ids: - description: - - Required for started, stopped, and absent states. A list of server IDs to ensure are started, stopped, or absent. - type: list - default: [] - elements: str - source_server_password: - description: - - The password for the source server if a clone is specified. - type: str - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - choices: ['present', 'absent', 'started', 'stopped'] - storage_type: - description: - - The type of storage to attach to the server. - type: str - default: 'standard' - choices: ['standard', 'hyperscale'] - template: - description: - - The template to use for server creation. Will search for a template if a partial string is provided. This is required - when O(state=present). - type: str - ttl: - description: - - The time to live for the server in seconds. The server will be deleted when this time expires. - type: str - type: - description: - - The type of server to create. - type: str - default: 'standard' - choices: ['standard', 'hyperscale', 'bareMetal'] - configuration_id: - description: - - Only required for bare metal servers. Specifies the identifier for the specific configuration type of bare metal server - to deploy. - type: str - os_type: - description: - - Only required for bare metal servers. Specifies the OS to provision with the bare metal server. - type: str - choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: bool - default: true -requirements: - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud. - - E(CLC_V2_API_USERNAME), the account login id for the Centurylink Cloud. - - E(CLC_V2_API_PASSWORD), the account password for the Centurylink Cloud. - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account - login and password using the HTTP API call @ https://api.ctl.io/v2/authentication/login - - E(CLC_V2_API_TOKEN), the API token generated from https://api.ctl.io/v2/authentication/login - - E(CLC_ACCT_ALIAS), the account alias associated with the Centurylink Cloud. - - Users can set E(CLC_V2_API_URL) to specify an endpoint for pointing to a different CLC environment. -""" - -EXAMPLES = r""" -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Provision a single Ubuntu Server - community.general.clc_server: - name: test - template: ubuntu-14-64 - count: 1 - group: Default Group - state: present - -- name: Ensure 'Default Group' has exactly 5 servers - community.general.clc_server: - name: test - template: ubuntu-14-64 - exact_count: 5 - count_group: Default Group - group: Default Group - -- name: Stop a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: stopped - -- name: Start a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: started - -- name: Delete a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: absent -""" - -RETURN = r""" -server_ids: - description: The list of server ids that are created. - returned: success - type: list - sample: ["UC1TEST-SVR01", "UC1TEST-SVR02"] -partially_created_server_ids: - description: The list of server ids that are partially created. - returned: success - type: list - sample: ["UC1TEST-SVR01", "UC1TEST-SVR02"] -servers: - description: The list of server objects returned from CLC. - returned: success - type: list - sample: - [ - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":1438196820, - "modifiedBy":"service.wfad", - "modifiedDate":1438196820 - }, - "description":"test-server", - "details":{ - "alertPolicies":[ - - ], - "cpu":1, - "customFields":[ - - ], - "diskCount":3, - "disks":[ - { - "id":"0:0", - "partitionPaths":[ - - ], - "sizeGB":1 - }, - { - "id":"0:1", - "partitionPaths":[ - - ], - "sizeGB":2 - }, - { - "id":"0:2", - "partitionPaths":[ - - ], - "sizeGB":14 - } - ], - "hostName":"", - "inMaintenanceMode":false, - "ipAddresses":[ - { - "internal":"10.1.1.1" - } - ], - "memoryGB":1, - "memoryMB":1024, - "partitions":[ - - ], - "powerState":"started", - "snapshots":[ - - ], - "storageGB":17 - }, - "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", - "id":"test-server", - "ipaddress":"10.120.45.23", - "isTemplate":false, - "links":[ - { - "href":"/v2/servers/wfad/test-server", - "id":"test-server", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"group" - }, - { - "href":"/v2/accounts/wfad", - "id":"wfad", - "rel":"account" - }, - { - "href":"/v2/billing/wfad/serverPricing/test-server", - "rel":"billing" - }, - { - "href":"/v2/servers/wfad/test-server/publicIPAddresses", - "rel":"publicIPAddresses", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/credentials", - "rel":"credentials" - }, - { - "href":"/v2/servers/wfad/test-server/statistics", - "rel":"statistics" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/capabilities", - "rel":"capabilities" - }, - { - "href":"/v2/servers/wfad/test-server/alertPolicies", - "rel":"alertPolicyMappings", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", - "rel":"antiAffinityPolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", - "rel":"cpuAutoscalePolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - } - ], - "locationId":"UC1", - "name":"test-server", - "os":"ubuntu14_64Bit", - "osType":"Ubuntu 14 64-bit", - "status":"active", - "storageType":"standard", - "type":"standard" - } - ] -""" - -__version__ = '${version}' - -import json -import os -import time -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcServer: - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - changed = False - new_server_ids = [] - server_dict_array = [] - - self._set_clc_credentials_from_env() - self.module.params = self._validate_module_params( - self.clc, - self.module) - p = self.module.params - state = p.get('state') - - # - # Handle each state - # - partial_servers_ids = [] - if state == 'absent': - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of instances to delete: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = self._delete_servers(module=self.module, - clc=self.clc, - server_ids=server_ids) - - elif state in ('started', 'stopped'): - server_ids = p.get('server_ids') - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of servers to run: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = self._start_stop_servers(self.module, - self.clc, - server_ids) - - elif state == 'present': - # Changed is always set to true when provisioning new instances - if not p.get('template') and p.get('type') != 'bareMetal': - return self.module.fail_json( - msg='template parameter is required for new instance') - - if p.get('exact_count') is None: - (server_dict_array, - new_server_ids, - partial_servers_ids, - changed) = self._create_servers(self.module, - self.clc) - else: - (server_dict_array, - new_server_ids, - partial_servers_ids, - changed) = self._enforce_count(self.module, - self.clc) - - self.module.exit_json( - changed=changed, - server_ids=new_server_ids, - partially_created_server_ids=partial_servers_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(), - template=dict(), - group=dict(default='Default Group'), - network_id=dict(), - location=dict(), - cpu=dict(default=1, type='int'), - memory=dict(default=1, type='int'), - alias=dict(), - password=dict(no_log=True), - ip_address=dict(), - storage_type=dict( - default='standard', - choices=[ - 'standard', - 'hyperscale']), - type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']), - primary_dns=dict(), - secondary_dns=dict(), - additional_disks=dict(type='list', default=[], elements='dict'), - custom_fields=dict(type='list', default=[], elements='dict'), - ttl=dict(), - managed_os=dict(type='bool', default=False), - description=dict(), - source_server_password=dict(no_log=True), - cpu_autoscale_policy_id=dict(), - anti_affinity_policy_id=dict(), - anti_affinity_policy_name=dict(), - alert_policy_id=dict(), - alert_policy_name=dict(), - packages=dict(type='list', default=[], elements='dict'), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'started', - 'stopped']), - count=dict(type='int', default=1), - exact_count=dict(type='int', ), - count_group=dict(), - server_ids=dict(type='list', default=[], elements='str'), - add_public_ip=dict(type='bool', default=False), - public_ip_protocol=dict( - default='TCP', - choices=[ - 'TCP', - 'UDP', - 'ICMP']), - public_ip_ports=dict(type='list', default=[], elements='dict'), - configuration_id=dict(), - os_type=dict(choices=[ - 'redHat6_64Bit', - 'centOS6_64Bit', - 'windows2012R2Standard_64Bit', - 'ubuntu14_64Bit' - ]), - wait=dict(type='bool', default=True)) - - mutually_exclusive = [ - ['exact_count', 'count'], - ['exact_count', 'state'], - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'], - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _validate_module_params(clc, module): - """ - Validate the module params, and lookup default values. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: dictionary of validated params - """ - params = module.params - datacenter = ClcServer._find_datacenter(clc, module) - - ClcServer._validate_types(module) - ClcServer._validate_name(module) - - params['alias'] = ClcServer._find_alias(clc, module) - params['cpu'] = ClcServer._find_cpu(clc, module) - params['memory'] = ClcServer._find_memory(clc, module) - params['description'] = ClcServer._find_description(module) - params['ttl'] = ClcServer._find_ttl(clc, module) - params['template'] = ClcServer._find_template_id(module, datacenter) - params['group'] = ClcServer._find_group(module, datacenter).id - params['network_id'] = ClcServer._find_network_id(module, datacenter) - params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id( - clc, - module) - params['alert_policy_id'] = ClcServer._find_alert_policy_id( - clc, - module) - - return params - - @staticmethod - def _find_datacenter(clc, module): - """ - Find the datacenter by calling the CLC API. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Datacenter instance - """ - location = module.params.get('location') - try: - if not location: - account = clc.v2.Account() - location = account.data.get('primaryDataCenter') - data_center = clc.v2.Datacenter(location) - return data_center - except CLCException: - module.fail_json(msg="Unable to find location: {0}".format(location)) - - @staticmethod - def _find_alias(clc, module): - """ - Find or Validate the Account Alias by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Account instance - """ - alias = module.params.get('alias') - if not alias: - try: - alias = clc.v2.Account.GetAlias() - except CLCException as ex: - module.fail_json(msg='Unable to find account alias. {0}'.format( - ex.message - )) - return alias - - @staticmethod - def _find_cpu(clc, module): - """ - Find or validate the CPU value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for CPU - """ - cpu = module.params.get('cpu') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not cpu and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("cpu"): - cpu = group.Defaults("cpu") - else: - module.fail_json( - msg=str("Can\'t determine a default cpu value. Please provide a value for cpu.")) - return cpu - - @staticmethod - def _find_memory(clc, module): - """ - Find or validate the Memory value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for Memory - """ - memory = module.params.get('memory') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not memory and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("memory"): - memory = group.Defaults("memory") - else: - module.fail_json(msg=str( - "Can\'t determine a default memory value. Please provide a value for memory.")) - return memory - - @staticmethod - def _find_description(module): - """ - Set the description module param to name if description is blank - :param module: the module to validate - :return: string description - """ - description = module.params.get('description') - if not description: - description = module.params.get('name') - return description - - @staticmethod - def _validate_types(module): - """ - Validate that type and storage_type are set appropriately, and fail if not - :param module: the module to validate - :return: none - """ - state = module.params.get('state') - server_type = module.params.get( - 'type').lower() if module.params.get('type') else None - storage_type = module.params.get( - 'storage_type').lower() if module.params.get('storage_type') else None - - if state == "present": - if server_type == "standard" and storage_type not in ( - "standard", "premium"): - module.fail_json( - msg=str("Standard VMs must have storage_type = 'standard' or 'premium'")) - - if server_type == "hyperscale" and storage_type != "hyperscale": - module.fail_json( - msg=str("Hyperscale VMs must have storage_type = 'hyperscale'")) - - @staticmethod - def _validate_name(module): - """ - Validate that name is the correct length if provided, fail if it is not - :param module: the module to validate - :return: none - """ - server_name = module.params.get('name') - state = module.params.get('state') - - if state == 'present' and ( - len(server_name) < 1 or len(server_name) > 6): - module.fail_json(msg=str( - "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6")) - - @staticmethod - def _find_ttl(clc, module): - """ - Validate that TTL is > 3600 if set, and fail if not - :param clc: clc-sdk instance to use - :param module: module to validate - :return: validated ttl - """ - ttl = module.params.get('ttl') - - if ttl: - if ttl <= 3600: - return module.fail_json(msg=str("Ttl cannot be <= 3600")) - else: - ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl) - return ttl - - @staticmethod - def _find_template_id(module, datacenter): - """ - Find the template id by calling the CLC API. - :param module: the module to validate - :param datacenter: the datacenter to search for the template - :return: a valid clc template id - """ - lookup_template = module.params.get('template') - state = module.params.get('state') - type = module.params.get('type') - result = None - - if state == 'present' and type != 'bareMetal': - try: - result = datacenter.Templates().Search(lookup_template)[0].id - except CLCException: - module.fail_json( - msg=str( - "Unable to find a template: " + - lookup_template + - " in location: " + - datacenter.id)) - return result - - @staticmethod - def _find_network_id(module, datacenter): - """ - Validate the provided network id or return a default. - :param module: the module to validate - :param datacenter: the datacenter to search for a network id - :return: a valid network id - """ - network_id = module.params.get('network_id') - - if not network_id: - try: - network_id = datacenter.Networks().networks[0].id - # -- added for clc-sdk 2.23 compatibility - # datacenter_networks = clc_sdk.v2.Networks( - # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks']) - # network_id = datacenter_networks.networks[0].id - # -- end - except CLCException: - module.fail_json( - msg=str( - "Unable to find a network in location: " + - datacenter.id)) - - return network_id - - @staticmethod - def _find_aa_policy_id(clc, module): - """ - Validate if the anti affinity policy exist for the given name and throw error if not - :param clc: the clc-sdk instance - :param module: the module to validate - :return: aa_policy_id: the anti affinity policy id of the given name. - """ - aa_policy_id = module.params.get('anti_affinity_policy_id') - aa_policy_name = module.params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - alias = module.params.get('alias') - aa_policy_id = ClcServer._get_anti_affinity_policy_id( - clc, - module, - alias, - aa_policy_name) - if not aa_policy_id: - module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) - return aa_policy_id - - @staticmethod - def _find_alert_policy_id(clc, module): - """ - Validate if the alert policy exist for the given name and throw error if not - :param clc: the clc-sdk instance - :param module: the module to validate - :return: alert_policy_id: the alert policy id of the given name. - """ - alert_policy_id = module.params.get('alert_policy_id') - alert_policy_name = module.params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alias = module.params.get('alias') - alert_policy_id = ClcServer._get_alert_policy_id_by_name( - clc=clc, - module=module, - alias=alias, - alert_policy_name=alert_policy_name - ) - if not alert_policy_id: - module.fail_json( - msg='No alert policy exist with name : %s' % alert_policy_name) - return alert_policy_id - - def _create_servers(self, module, clc, override_count=None): - """ - Create New Servers in CLC cloud - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created - """ - p = module.params - request_list = [] - servers = [] - server_dict_array = [] - created_server_ids = [] - partial_created_servers_ids = [] - - add_public_ip = p.get('add_public_ip') - public_ip_protocol = p.get('public_ip_protocol') - public_ip_ports = p.get('public_ip_ports') - - params = { - 'name': p.get('name'), - 'template': p.get('template'), - 'group_id': p.get('group'), - 'network_id': p.get('network_id'), - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'alias': p.get('alias'), - 'password': p.get('password'), - 'ip_address': p.get('ip_address'), - 'storage_type': p.get('storage_type'), - 'type': p.get('type'), - 'primary_dns': p.get('primary_dns'), - 'secondary_dns': p.get('secondary_dns'), - 'additional_disks': p.get('additional_disks'), - 'custom_fields': p.get('custom_fields'), - 'ttl': p.get('ttl'), - 'managed_os': p.get('managed_os'), - 'description': p.get('description'), - 'source_server_password': p.get('source_server_password'), - 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'packages': p.get('packages'), - 'configuration_id': p.get('configuration_id'), - 'os_type': p.get('os_type') - } - - count = override_count if override_count else p.get('count') - - changed = False if count == 0 else True - - if not changed: - return server_dict_array, created_server_ids, partial_created_servers_ids, changed - for i in range(0, count): - if not module.check_mode: - req = self._create_clc_server(clc=clc, - module=module, - server_params=params) - server = req.requests[0].Server() - request_list.append(req) - servers.append(server) - - self._wait_for_requests(module, request_list) - self._refresh_servers(module, servers) - - ip_failed_servers = self._add_public_ip_to_servers( - module=module, - should_add_public_ip=add_public_ip, - servers=servers, - public_ip_protocol=public_ip_protocol, - public_ip_ports=public_ip_ports) - ap_failed_servers = self._add_alert_policy_to_servers(clc=clc, - module=module, - servers=servers) - - for server in servers: - if server in ip_failed_servers or server in ap_failed_servers: - partial_created_servers_ids.append(server.id) - else: - # reload server details - server = clc.v2.Server(server.id) - server.data['ipaddress'] = server.details[ - 'ipAddresses'][0]['internal'] - - if add_public_ip and len(server.PublicIPs().public_ips) > 0: - server.data['publicip'] = str( - server.PublicIPs().public_ips[0]) - created_server_ids.append(server.id) - server_dict_array.append(server.data) - - return server_dict_array, created_server_ids, partial_created_servers_ids, changed - - def _enforce_count(self, module, clc): - """ - Enforce that there is the right number of servers in the provided group. - Starts or stops servers as necessary. - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created or deleted - """ - p = module.params - changed = False - count_group = p.get('count_group') - datacenter = ClcServer._find_datacenter(clc, module) - exact_count = p.get('exact_count') - server_dict_array = [] - partial_servers_ids = [] - changed_server_ids = [] - - # fail here if the exact count was specified without filtering - # on a group, as this may lead to a undesired removal of instances - if exact_count and count_group is None: - return module.fail_json( - msg="you must use the 'count_group' option with exact_count") - - servers, running_servers = ClcServer._find_running_servers_by_group( - module, datacenter, count_group) - - if len(running_servers) == exact_count: - changed = False - - elif len(running_servers) < exact_count: - to_create = exact_count - len(running_servers) - server_dict_array, changed_server_ids, partial_servers_ids, changed \ - = self._create_servers(module, clc, override_count=to_create) - - for server in server_dict_array: - running_servers.append(server) - - elif len(running_servers) > exact_count: - to_remove = len(running_servers) - exact_count - all_server_ids = sorted([x.id for x in running_servers]) - remove_ids = all_server_ids[0:to_remove] - - (changed, server_dict_array, changed_server_ids) \ - = ClcServer._delete_servers(module, clc, remove_ids) - - return server_dict_array, changed_server_ids, partial_servers_ids, changed - - @staticmethod - def _wait_for_requests(module, request_list): - """ - Block until server provisioning requests are completed. - :param module: the AnsibleModule object - :param request_list: a list of clc-sdk.Request instances - :return: none - """ - wait = module.params.get('wait') - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in request_list]) - - if failed_requests_count > 0: - module.fail_json( - msg='Unable to process server request') - - @staticmethod - def _refresh_servers(module, servers): - """ - Loop through a list of servers and refresh them. - :param module: the AnsibleModule object - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - try: - server.Refresh() - except CLCException as ex: - module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( - server.id, ex.message - )) - - @staticmethod - def _add_public_ip_to_servers( - module, - should_add_public_ip, - servers, - public_ip_protocol, - public_ip_ports): - """ - Create a public IP for servers - :param module: the AnsibleModule object - :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False - :param servers: List of servers to add public ips to - :param public_ip_protocol: a protocol to allow for the public ips - :param public_ip_ports: list of ports to allow for the public ips - :return: none - """ - failed_servers = [] - if not should_add_public_ip: - return failed_servers - - ports_lst = [] - request_list = [] - server = None - - for port in public_ip_ports: - ports_lst.append( - {'protocol': public_ip_protocol, 'port': port}) - try: - if not module.check_mode: - for server in servers: - request = server.PublicIPs().Add(ports_lst) - request_list.append(request) - except APIFailedResponse: - failed_servers.append(server) - ClcServer._wait_for_requests(module, request_list) - return failed_servers - - @staticmethod - def _add_alert_policy_to_servers(clc, module, servers): - """ - Associate the alert policy to servers - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param servers: List of servers to add alert policy to - :return: failed_servers: the list of servers which failed while associating alert policy - """ - failed_servers = [] - p = module.params - alert_policy_id = p.get('alert_policy_id') - alias = p.get('alias') - - if alert_policy_id and not module.check_mode: - for server in servers: - try: - ClcServer._add_alert_policy_to_server( - clc=clc, - alias=alias, - server_id=server.id, - alert_policy_id=alert_policy_id) - except CLCException: - failed_servers.append(server) - return failed_servers - - @staticmethod - def _add_alert_policy_to_server( - clc, alias, server_id, alert_policy_id): - """ - Associate an alert policy to a clc server - :param clc: the clc-sdk instance to use - :param alias: the clc account alias - :param server_id: The clc server id - :param alert_policy_id: the alert policy id to be associated to the server - :return: none - """ - try: - clc.v2.API.Call( - method='POST', - url='servers/%s/%s/alertPolicies' % (alias, server_id), - payload=json.dumps( - { - 'id': alert_policy_id - })) - except APIFailedResponse as e: - raise CLCException( - 'Failed to associate alert policy to the server : {0} with Error {1}'.format( - server_id, str(e.response_text))) - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - Returns the alert policy id for the given alert policy name - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the clc account alias - :param alert_policy_name: the name of the alert policy - :return: alert_policy_id: the alert policy id - """ - alert_policy_id = None - policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias) - if not policies: - return alert_policy_id - for policy in policies.get('items'): - if policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = policy.get('id') - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _delete_servers(module, clc, server_ids): - """ - Delete the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to delete - :return: a list of dictionaries with server information about the servers that were deleted - """ - terminated_server_ids = [] - server_dict_array = [] - request_list = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - for server in servers: - if not module.check_mode: - request_list.append(server.Delete()) - ClcServer._wait_for_requests(module, request_list) - - for server in servers: - terminated_server_ids.append(server.id) - - return True, server_dict_array, terminated_server_ids - - @staticmethod - def _start_stop_servers(module, clc, server_ids): - """ - Start or Stop the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to start or stop - :return: a list of dictionaries with server information about the servers that were started or stopped - """ - p = module.params - state = p.get('state') - changed = False - changed_servers = [] - server_dict_array = [] - result_server_ids = [] - request_list = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - for server in servers: - if server.powerState != state: - changed_servers.append(server) - if not module.check_mode: - request_list.append( - ClcServer._change_server_power_state( - module, - server, - state)) - changed = True - - ClcServer._wait_for_requests(module, request_list) - ClcServer._refresh_servers(module, changed_servers) - - for server in set(changed_servers + servers): - try: - server.data['ipaddress'] = server.details[ - 'ipAddresses'][0]['internal'] - server.data['publicip'] = str( - server.PublicIPs().public_ips[0]) - except (KeyError, IndexError): - pass - - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - @staticmethod - def _change_server_power_state(module, server, state): - """ - Change the server powerState - :param module: the module to check for intended state - :param server: the server to start or stop - :param state: the intended powerState for the server - :return: the request object from clc-sdk call - """ - result = None - try: - if state == 'started': - result = server.PowerOn() - else: - # Try to shut down the server and fall back to power off when unable to shut down. - result = server.ShutDown() - if result and hasattr(result, 'requests') and result.requests[0]: - return result - else: - result = server.PowerOff() - except CLCException: - module.fail_json( - msg='Unable to change power state for server {0}'.format( - server.id)) - return result - - @staticmethod - def _find_running_servers_by_group(module, datacenter, count_group): - """ - Find a list of running servers in the provided group - :param module: the AnsibleModule object - :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group - :param count_group: the group to count the servers - :return: list of servers, and list of running servers - """ - group = ClcServer._find_group( - module=module, - datacenter=datacenter, - lookup_group=count_group) - - servers = group.Servers().Servers() - running_servers = [] - - for server in servers: - if server.status == 'active' and server.powerState == 'started': - running_servers.append(server) - - return servers, running_servers - - @staticmethod - def _find_group(module, datacenter, lookup_group=None): - """ - Find a server group in a datacenter by calling the CLC API - :param module: the AnsibleModule instance - :param datacenter: clc-sdk.Datacenter instance to search for the group - :param lookup_group: string name of the group to search for - :return: clc-sdk.Group instance - """ - if not lookup_group: - lookup_group = module.params.get('group') - try: - return datacenter.Groups().Get(lookup_group) - except CLCException: - pass - - # The search above only acts on the main - result = ClcServer._find_group_recursive( - module, - datacenter.Groups(), - lookup_group) - - if result is None: - module.fail_json( - msg=str( - "Unable to find group: " + - lookup_group + - " in location: " + - datacenter.id)) - - return result - - @staticmethod - def _find_group_recursive(module, group_list, lookup_group): - """ - Find a server group by recursively walking the tree - :param module: the AnsibleModule instance to use - :param group_list: a list of groups to search - :param lookup_group: the group to look for - :return: list of groups - """ - result = None - for group in group_list.groups: - subgroups = group.Subgroups() - try: - return subgroups.Get(lookup_group) - except CLCException: - result = ClcServer._find_group_recursive( - module, - subgroups, - lookup_group) - - if result is not None: - break - - return result - - @staticmethod - def _create_clc_server( - clc, - module, - server_params): - """ - Call the CLC Rest API to Create a Server - :param clc: the clc-python-sdk instance to use - :param module: the AnsibleModule instance to use - :param server_params: a dictionary of params to use to create the servers - :return: clc-sdk.Request object linked to the queued server request - """ - - try: - res = clc.v2.API.Call( - method='POST', - url='servers/%s' % - (server_params.get('alias')), - payload=json.dumps( - { - 'name': server_params.get('name'), - 'description': server_params.get('description'), - 'groupId': server_params.get('group_id'), - 'sourceServerId': server_params.get('template'), - 'isManagedOS': server_params.get('managed_os'), - 'primaryDNS': server_params.get('primary_dns'), - 'secondaryDNS': server_params.get('secondary_dns'), - 'networkId': server_params.get('network_id'), - 'ipAddress': server_params.get('ip_address'), - 'password': server_params.get('password'), - 'sourceServerPassword': server_params.get('source_server_password'), - 'cpu': server_params.get('cpu'), - 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'), - 'memoryGB': server_params.get('memory'), - 'type': server_params.get('type'), - 'storageType': server_params.get('storage_type'), - 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'), - 'customFields': server_params.get('custom_fields'), - 'additionalDisks': server_params.get('additional_disks'), - 'ttl': server_params.get('ttl'), - 'packages': server_params.get('packages'), - 'configurationId': server_params.get('configuration_id'), - 'osType': server_params.get('os_type')})) - - result = clc.v2.Requests(res) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to create the server: {0}. {1}'.format( - server_params.get('name'), - ex.response_text - )) - - # - # Patch the Request object so that it returns a valid server - - # Find the server's UUID from the API response - server_uuid = [obj['id'] - for obj in res['links'] if obj['rel'] == 'self'][0] - - # Change the request server method to a _find_server_by_uuid closure so - # that it will work - result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry( - clc, - module, - server_uuid, - server_params.get('alias')) - - return result - - @staticmethod - def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format( - alias, ex.response_text)) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) - return aa_policy_id - - # - # This is the function that gets patched to the Request.server object using a lambda closure - # - - @staticmethod - def _find_server_by_uuid_w_retry( - clc, module, svr_uuid, alias=None, retries=5, back_out=2): - """ - Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned. - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param svr_uuid: UUID of the server - :param retries: the number of retry attempts to make prior to fail. default is 5 - :param alias: the Account Alias to search - :return: a clc-sdk.Server instance - """ - if not alias: - alias = clc.v2.Account.GetAlias() - - # Wait and retry if the api returns a 404 - while True: - retries -= 1 - try: - server_obj = clc.v2.API.Call( - method='GET', url='servers/%s/%s?uuid=true' % - (alias, svr_uuid)) - server_id = server_obj['id'] - server = clc.v2.Server( - id=server_id, - alias=alias, - server_obj=server_obj) - return server - - except APIFailedResponse as e: - if e.response_status_code != 404: - return module.fail_json( - msg='A failure response was received from CLC API when ' - 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' % - (svr_uuid, e.response_status_code, e.message)) - if retries == 0: - return module.fail_json( - msg='Unable to reach the CLC API after 5 attempts') - time.sleep(back_out) - back_out *= 2 - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_server = ClcServer(module) - clc_server.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_server_snapshot.py b/plugins/modules/clc_server_snapshot.py deleted file mode 100644 index d9eaf9abe1..0000000000 --- a/plugins/modules/clc_server_snapshot.py +++ /dev/null @@ -1,413 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: clc_server_snapshot -short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud -description: - - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - server_ids: - description: - - The list of CLC server Ids. - type: list - required: true - elements: str - expiration_days: - description: - - The number of days to keep the server snapshot before it expires. - type: int - default: 7 - required: false - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - required: false - choices: ['present', 'absent', 'restore'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - default: 'True' - required: false - type: str -requirements: - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud. - - E(CLC_V2_API_USERNAME), the account login id for the Centurylink Cloud. - - E(CLC_V2_API_PASSWORD), the account password for the Centurylink Cloud. - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account - login and password using the HTTP API call @ https://api.ctl.io/v2/authentication/login - - E(CLC_V2_API_TOKEN), the API token generated from https://api.ctl.io/v2/authentication/login - - E(CLC_ACCT_ALIAS), the account alias associated with the Centurylink Cloud. - - Users can set E(CLC_V2_API_URL) to specify an endpoint for pointing to a different CLC environment. -""" - -EXAMPLES = r""" -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Create server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - expiration_days: 10 - wait: true - state: present - -- name: Restore server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - wait: true - state: restore - -- name: Delete server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - wait: true - state: absent -""" - -RETURN = r""" -server_ids: - description: The list of server ids that are changed. - returned: success - type: list - sample: ["UC1TEST-SVR01", "UC1TEST-SVR02"] -""" - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcSnapshot: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - server_ids = p['server_ids'] - expiration_days = p['expiration_days'] - state = p['state'] - request_list = [] - changed = False - changed_servers = [] - - self._set_clc_credentials_from_env() - if state == 'present': - changed, request_list, changed_servers = self.ensure_server_snapshot_present( - server_ids=server_ids, - expiration_days=expiration_days) - elif state == 'absent': - changed, request_list, changed_servers = self.ensure_server_snapshot_absent( - server_ids=server_ids) - elif state == 'restore': - changed, request_list, changed_servers = self.ensure_server_snapshot_restore( - server_ids=server_ids) - - self._wait_for_requests_to_complete(request_list) - return self.module.exit_json( - changed=changed, - server_ids=changed_servers) - - def ensure_server_snapshot_present(self, server_ids, expiration_days): - """ - Ensures the given set of server_ids have the snapshots created - :param server_ids: The list of server_ids to create the snapshot - :param expiration_days: The number of days to keep the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) == 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._create_server_snapshot(server, expiration_days) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _create_server_snapshot(self, server, expiration_days): - """ - Create the snapshot for the CLC server - :param server: the CLC server object - :param expiration_days: The number of days to keep the snapshot - :return: the create request object from CLC API Call - """ - result = None - try: - result = server.CreateSnapshot( - delete_existing=True, - expiration_days=expiration_days) - except CLCException as ex: - self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_server_snapshot_absent(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots removed - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._delete_server_snapshot(server) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _delete_server_snapshot(self, server): - """ - Delete snapshot for the CLC server - :param server: the CLC server object - :return: the delete snapshot request object from CLC API - """ - result = None - try: - result = server.DeleteSnapshot() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_server_snapshot_restore(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots restored - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._restore_server_snapshot(server) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _restore_server_snapshot(self, server): - """ - Restore snapshot for the CLC server - :param server: the CLC server object - :return: the restore snapshot request object from CLC API - """ - result = None - try: - result = server.RestoreSnapshot() - except CLCException as ex: - self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process server snapshot request') - - @staticmethod - def define_argument_spec(): - """ - This function defines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - expiration_days=dict(default=7, type='int'), - wait=dict(default=True), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'restore']), - ) - return argument_spec - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: The list of server ids - :param message: The error message to throw in case of any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - return self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcSnapshot.define_argument_spec(), - supports_check_mode=True - ) - clc_snapshot = ClcSnapshot(module) - clc_snapshot.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud_init_data_facts.py b/plugins/modules/cloud_init_data_facts.py index 360b4119ef..544a663e5c 100644 --- a/plugins/modules/cloud_init_data_facts.py +++ b/plugins/modules/cloud_init_data_facts.py @@ -50,38 +50,41 @@ cloud_init_data_facts: description: Facts of result and status. returned: success type: dict - sample: '{ - "status": { + sample: + { + "status": { "v1": { - "datasource": "DataSourceCloudStack", - "errors": [] - }, - "result": { - "v1": { - "datasource": "DataSourceCloudStack", - "init": { - "errors": [], - "finished": 1522066377.0185432, - "start": 1522066375.2648022 - }, - "init-local": { - "errors": [], - "finished": 1522066373.70919, - "start": 1522066373.4726632 - }, - "modules-config": { - "errors": [], - "finished": 1522066380.9097016, - "start": 1522066379.0011985 - }, - "modules-final": { - "errors": [], - "finished": 1522066383.56594, - "start": 1522066382.3449218 - }, - "stage": null + "datasource": "DataSourceCloudStack", + "errors": [] } - }' + }, + "result": { + "v1": { + "datasource": "DataSourceCloudStack", + "init": { + "errors": [], + "finished": 1522066377.0185432, + "start": 1522066375.2648022 + }, + "init-local": { + "errors": [], + "finished": 1522066373.70919, + "start": 1522066373.4726632 + }, + "modules-config": { + "errors": [], + "finished": 1522066380.9097016, + "start": 1522066379.0011985 + }, + "modules-final": { + "errors": [], + "finished": 1522066383.56594, + "start": 1522066382.3449218 + }, + "stage": null + } + } + } """ import os @@ -105,9 +108,8 @@ def gather_cloud_init_data_facts(module): json_file = os.path.join(CLOUD_INIT_PATH, i + '.json') if os.path.exists(json_file): - f = open(json_file, 'rb') - contents = to_text(f.read(), errors='surrogate_or_strict') - f.close() + with open(json_file, 'rb') as f: + contents = to_text(f.read(), errors='surrogate_or_strict') if contents: res['cloud_init_data_facts'][i] = module.from_json(contents) diff --git a/plugins/modules/cloudflare_dns.py b/plugins/modules/cloudflare_dns.py index 6ce2ff8bb4..fafca00b50 100644 --- a/plugins/modules/cloudflare_dns.py +++ b/plugins/modules/cloudflare_dns.py @@ -127,7 +127,7 @@ options: description: - Whether the record should be the only one for that record type and record name. - Only use with O(state=present). - - This will delete all other records with the same record name and type. + - This deletes all other records with the same record name and type. type: bool state: description: @@ -157,8 +157,9 @@ options: - The type of DNS record to create. Required if O(state=present). - Support for V(SPF) has been removed from community.general 9.0.0 since that record type is no longer supported by CloudFlare. + - Support for V(PTR) has been added in community.general 11.1.0. type: str - choices: [A, AAAA, CNAME, DS, MX, NS, SRV, SSHFP, TLSA, CAA, TXT] + choices: [A, AAAA, CNAME, DS, MX, NS, SRV, SSHFP, TLSA, CAA, TXT, PTR] value: description: - The record value. @@ -311,6 +312,14 @@ EXAMPLES = r""" algorithm: 8 hash_type: 2 value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB + +- name: Create PTR record "1.2.0.192.in-addr.arpa" with value "test.example.com" + community.general.cloudflare_dns: + zone: 2.0.192.in-addr.arpa + record: 1 + type: PTR + value: test.example.com + state: present """ RETURN = r""" @@ -345,8 +354,16 @@ record: description: Additional record data. returned: success, if type is SRV, DS, SSHFP TLSA or CAA type: dict - sample: {name: "jabber", port: 8080, priority: 10, proto: "_tcp", service: "_xmpp", target: "jabberhost.sample.com", - weight: 5} + sample: + { + "name": "jabber", + "port": 8080, + "priority": 10, + "proto": "_tcp", + "service": "_xmpp", + "target": "jabberhost.sample.com", + "weight": 5 + } id: description: The record ID. returned: success @@ -361,7 +378,7 @@ record: description: Extra Cloudflare-specific information about the record. returned: success type: dict - sample: {auto_added: false} + sample: {"auto_added": false} modified_on: description: Record modification date. returned: success @@ -392,7 +409,7 @@ record: returned: success type: list elements: str - sample: ['production', 'app'] + sample: ["production", "app"] version_added: 10.1.0 tags_modified_on: description: When the record tags were last modified. Omitted if there are no tags. @@ -431,9 +448,11 @@ from ansible.module_utils.urls import fetch_url def lowercase_string(param): - if not isinstance(param, str): - return param - return param.lower() + return param.lower() if isinstance(param, str) else param + + +def join_str(sep, *args): + return sep.join([str(arg) for arg in args]) class CloudflareAPI(object): @@ -479,29 +498,29 @@ class CloudflareAPI(object): if (self.type == 'AAAA') and (self.value is not None): self.value = self.value.lower() - if (self.type == 'SRV'): + if self.type == 'SRV': if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = '_' + self.proto + self.proto = '_{0}'.format(self.proto) if (self.service is not None) and (not self.service.startswith('_')): - self.service = '_' + self.service + self.service = '_{0}'.format(self.service) - if (self.type == 'TLSA'): + if self.type == 'TLSA': if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = '_' + self.proto + self.proto = '_{0}'.format(self.proto) if (self.port is not None): - self.port = '_' + str(self.port) + self.port = '_{0}'.format(self.port) if not self.record.endswith(self.zone): - self.record = self.record + '.' + self.zone + self.record = join_str('.', self.record, self.zone) - if (self.type == 'DS'): + if self.type == 'DS': if self.record == self.zone: self.module.fail_json(msg="DS records only apply to subdomains.") def _cf_simple_api_call(self, api_call, method='GET', payload=None): if self.api_token: headers = { - 'Authorization': 'Bearer ' + self.api_token, + 'Authorization': 'Bearer {0}'.format(self.api_token), 'Content-Type': 'application/json', } else: @@ -551,6 +570,9 @@ class CloudflareAPI(object): try: content = resp.read() except AttributeError: + content = None + + if not content: if info['body']: content = info['body'] else: @@ -598,7 +620,7 @@ class CloudflareAPI(object): else: raw_api_call = api_call while next_page <= pagination['total_pages']: - raw_api_call += '?' + '&'.join(parameters) + raw_api_call += '?{0}'.format('&'.join(parameters)) result, status = self._cf_simple_api_call(raw_api_call, method, payload) data += result['result'] next_page += 1 @@ -623,8 +645,8 @@ class CloudflareAPI(object): name = self.zone param = '' if name: - param = '?' + urlencode({'name': name}) - zones, status = self._cf_api_call('/zones' + param) + param = '?{0}'.format(urlencode({'name': name})) + zones, status = self._cf_api_call('/zones{0}'.format(param)) return zones def get_dns_records(self, zone_name=None, type=None, record=None, value=''): @@ -649,206 +671,189 @@ class CloudflareAPI(object): if value: query['content'] = value if query: - api_call += '?' + urlencode(query) + api_call += '?{0}'.format(urlencode(query)) records, status = self._cf_api_call(api_call) return records - def delete_dns_records(self, **kwargs): - params = {} - for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone', - 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag']: - if param in kwargs: - params[param] = kwargs[param] - else: - params[param] = getattr(self, param) - + def delete_dns_records(self, solo): records = [] - content = params['value'] - search_record = params['record'] - if params['type'] == 'SRV': - if not (params['value'] is None or params['value'] == ''): - content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] - search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] - elif params['type'] == 'DS': - if not (params['value'] is None or params['value'] == ''): - content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - elif params['type'] == 'SSHFP': - if not (params['value'] is None or params['value'] == ''): - content = str(params['algorithm']) + ' ' + str(params['hash_type']) + ' ' + params['value'].upper() - elif params['type'] == 'TLSA': - if not (params['value'] is None or params['value'] == ''): - content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] - if params['solo']: + content = self.value + search_record = self.record + if self.type == 'SRV': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.weight, self.port, self.value) + search_record = join_str('.', self.service, self.proto, self.record) + elif self.type == 'DS': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value) + elif self.type == 'SSHFP': + if not (self.value is None or self.value == ''): + content = join_str(' ', self.algorithm, self.hash_type, self.value.upper()) + elif self.type == 'TLSA': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value) + search_record = join_str('.', self.port, self.proto, self.record) + if solo: search_value = None else: search_value = content - records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) + zone_id = self._get_zone_id(self.zone) + records = self.get_dns_records(self.zone, self.type, search_record, search_value) for rr in records: - if params['solo']: - if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)): + if solo: + if not ((rr['type'] == self.type) and (rr['name'] == search_record) and (rr['content'] == content)): self.changed = True if not self.module.check_mode: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE') else: self.changed = True if not self.module.check_mode: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE') return self.changed - def ensure_dns_record(self, **kwargs): - params = {} - for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone', - 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag', 'tags', 'comment']: - if param in kwargs: - params[param] = kwargs[param] - else: - params[param] = getattr(self, param) - - search_value = params['value'] - search_record = params['record'] + def ensure_dns_record(self): + search_value = self.value + search_record = self.record new_record = None - if (params['type'] is None) or (params['record'] is None): - self.module.fail_json(msg="You must provide a type and a record to create a new record") - if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS']): - if not params['value']: + if self.type in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'PTR']: + if not self.value: self.module.fail_json(msg="You must provide a non-empty value to create this record type") # there can only be one CNAME per record # ignoring the value when searching for existing # CNAME records allows us to update the value if it # changes - if params['type'] == 'CNAME': + if self.type == 'CNAME': search_value = None new_record = { - "type": params['type'], - "name": params['record'], - "content": params['value'], - "ttl": params['ttl'] + "type": self.type, + "name": self.record, + "content": self.value, + "ttl": self.ttl } - if (params['type'] in ['A', 'AAAA', 'CNAME']): - new_record["proxied"] = params["proxied"] + if self.type in ['A', 'AAAA', 'CNAME']: + new_record["proxied"] = self.proxied - if params['type'] == 'MX': - for attr in [params['priority'], params['value']]: + if self.type == 'MX': + for attr in [self.priority, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide priority and a value to create this record type") new_record = { - "type": params['type'], - "name": params['record'], - "content": params['value'], - "priority": params['priority'], - "ttl": params['ttl'] + "type": self.type, + "name": self.record, + "content": self.value, + "priority": self.priority, + "ttl": self.ttl } - if params['type'] == 'SRV': - for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]: + if self.type == 'SRV': + for attr in [self.port, self.priority, self.proto, self.service, self.weight, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type") srv_data = { - "target": params['value'], - "port": params['port'], - "weight": params['weight'], - "priority": params['priority'], + "target": self.value, + "port": self.port, + "weight": self.weight, + "priority": self.priority, } new_record = { - "type": params['type'], - "name": params['service'] + '.' + params['proto'] + '.' + params['record'], - "ttl": params['ttl'], + "type": self.type, + "name": join_str('.', self.service, self.proto, self.record), + "ttl": self.ttl, 'data': srv_data, } - search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] - search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] + search_value = join_str('\t', self.weight, self.port, self.value) + search_record = join_str('.', self.service, self.proto, self.record) - if params['type'] == 'DS': - for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]: + if self.type == 'DS': + for attr in [self.key_tag, self.algorithm, self.hash_type, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type") ds_data = { - "key_tag": params['key_tag'], - "algorithm": params['algorithm'], - "digest_type": params['hash_type'], - "digest": params['value'], + "key_tag": self.key_tag, + "algorithm": self.algorithm, + "digest_type": self.hash_type, + "digest": self.value, } new_record = { - "type": params['type'], - "name": params['record'], + "type": self.type, + "name": self.record, 'data': ds_data, - "ttl": params['ttl'], + "ttl": self.ttl, } - search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + search_value = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value) - if params['type'] == 'SSHFP': - for attr in [params['algorithm'], params['hash_type'], params['value']]: + if self.type == 'SSHFP': + for attr in [self.algorithm, self.hash_type, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type") sshfp_data = { - "fingerprint": params['value'].upper(), - "type": params['hash_type'], - "algorithm": params['algorithm'], + "fingerprint": self.value.upper(), + "type": self.hash_type, + "algorithm": self.algorithm, } new_record = { - "type": params['type'], - "name": params['record'], + "type": self.type, + "name": self.record, 'data': sshfp_data, - "ttl": params['ttl'], + "ttl": self.ttl, } - search_value = str(params['algorithm']) + ' ' + str(params['hash_type']) + ' ' + params['value'] + search_value = join_str(' ', self.algorithm, self.hash_type, self.value) - if params['type'] == 'TLSA': - for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]: + if self.type == 'TLSA': + for attr in [self.port, self.proto, self.cert_usage, self.selector, self.hash_type, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type") - search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] + search_record = join_str('.', self.port, self.proto, self.record) tlsa_data = { - "usage": params['cert_usage'], - "selector": params['selector'], - "matching_type": params['hash_type'], - "certificate": params['value'], + "usage": self.cert_usage, + "selector": self.selector, + "matching_type": self.hash_type, + "certificate": self.value, } new_record = { - "type": params['type'], + "type": self.type, "name": search_record, 'data': tlsa_data, - "ttl": params['ttl'], + "ttl": self.ttl, } - search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + search_value = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value) - if params['type'] == 'CAA': - for attr in [params['flag'], params['tag'], params['value']]: - if (attr is None) or (attr == ''): + if self.type == 'CAA': + for attr in [self.flag, self.tag, self.value]: + if attr == '': self.module.fail_json(msg="You must provide flag, tag and a value to create this record type") caa_data = { - "flags": params['flag'], - "tag": params['tag'], - "value": params['value'], + "flags": self.flag, + "tag": self.tag, + "value": self.value, } new_record = { - "type": params['type'], - "name": params['record'], + "type": self.type, + "name": self.record, 'data': caa_data, - "ttl": params['ttl'], + "ttl": self.ttl, } search_value = None - new_record['comment'] = params['comment'] or None - new_record['tags'] = params['tags'] or [] + new_record['comment'] = self.comment or None + new_record['tags'] = self.tags or [] - zone_id = self._get_zone_id(params['zone']) - records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) + zone_id = self._get_zone_id(self.zone) + records = self.get_dns_records(self.zone, self.type, search_record, search_value) # in theory this should be impossible as cloudflare does not allow # the creation of duplicate records but lets cover it anyways if len(records) > 1: # As Cloudflare API cannot filter record containing quotes # CAA records must be compared locally - if params['type'] == 'CAA': + if self.type == 'CAA': for rr in records: if rr['data']['flags'] == caa_data['flags'] and rr['data']['tag'] == caa_data['tag'] and rr['data']['value'] == caa_data['value']: return rr, self.changed @@ -858,16 +863,16 @@ class CloudflareAPI(object): if len(records) == 1: cur_record = records[0] do_update = False - if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']): + if (self.ttl is not None) and (cur_record['ttl'] != self.ttl): do_update = True - if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']): + if (self.priority is not None) and ('priority' in cur_record) and (cur_record['priority'] != self.priority): do_update = True - if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']): + if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != self.proxied): do_update = True if ('data' in new_record) and ('data' in cur_record): - if (cur_record['data'] != new_record['data']): + if cur_record['data'] != new_record['data']: do_update = True - if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']): + if (self.type == 'CNAME') and (cur_record['content'] != new_record['content']): do_update = True if cur_record['comment'] != new_record['comment']: do_update = True @@ -893,14 +898,9 @@ class CloudflareAPI(object): def main(): module = AnsibleModule( argument_spec=dict( - api_token=dict( - type="str", - required=False, - no_log=True, - fallback=(env_fallback, ["CLOUDFLARE_TOKEN"]), - ), - account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']), - account_email=dict(type='str', required=False), + api_token=dict(type="str", no_log=True, fallback=(env_fallback, ["CLOUDFLARE_TOKEN"])), + account_api_key=dict(type='str', no_log=True, aliases=['account_api_token']), + account_email=dict(type='str'), algorithm=dict(type='int'), cert_usage=dict(type='int', choices=[0, 1, 2, 3]), comment=dict(type='str'), @@ -920,7 +920,7 @@ def main(): state=dict(type='str', default='present', choices=['absent', 'present']), timeout=dict(type='int', default=30), ttl=dict(type='int', default=1), - type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT']), + type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT', 'PTR']), value=dict(type='str', aliases=['content']), weight=dict(type='int', default=1), zone=dict(type='str', required=True, aliases=['domain']), @@ -929,20 +929,21 @@ def main(): required_if=[ ('state', 'present', ['record', 'type', 'value']), ('state', 'absent', ['record']), - ('type', 'SRV', ['proto', 'service']), + ('type', 'SRV', ['proto', 'service', 'value']), ('type', 'TLSA', ['proto', 'port']), - ('type', 'CAA', ['flag', 'tag']), + ('type', 'CAA', ['flag', 'tag', 'value']), + ], + required_together=[ + ('account_api_key', 'account_email'), + ], + required_one_of=[ + ['api_token', 'account_api_key'], ], ) - if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']): - module.fail_json(msg="Either api_token or account_api_key and account_email params are required.") if module.params['type'] == 'SRV': - if not ((module.params['weight'] is not None and module.params['port'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['weight'] is None and module.params['port'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.") + if not module.params['value'] == '': + module.fail_json(msg="For SRV records the params weight, port and value all need to be defined.") if module.params['type'] == 'SSHFP': if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None @@ -959,11 +960,8 @@ def main(): module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.") if module.params['type'] == 'CAA': - if not ((module.params['flag'] is not None and module.params['tag'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['flag'] is None and module.params['tag'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined, or not at all.") + if not module.params['value'] == '': + module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined.") if module.params['type'] == 'DS': if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None diff --git a/plugins/modules/cobbler_sync.py b/plugins/modules/cobbler_sync.py index 95a3241b98..b1c92a1690 100644 --- a/plugins/modules/cobbler_sync.py +++ b/plugins/modules/cobbler_sync.py @@ -42,12 +42,12 @@ options: type: str use_ssl: description: - - If V(false), an HTTP connection will be used instead of the default HTTPS connection. + - If V(false), an HTTP connection is used instead of the default HTTPS connection. type: bool default: true validate_certs: description: - - If V(false), SSL certificates will not be validated. + - If V(false), SSL certificates are not validated. - This should only set to V(false) when used on personally controlled sites using self-signed certificates. type: bool default: true diff --git a/plugins/modules/cobbler_system.py b/plugins/modules/cobbler_system.py index fd1db6bf3e..a1a400928e 100644 --- a/plugins/modules/cobbler_system.py +++ b/plugins/modules/cobbler_system.py @@ -42,12 +42,12 @@ options: type: str use_ssl: description: - - If V(false), an HTTP connection will be used instead of the default HTTPS connection. + - If V(false), an HTTP connection is used instead of the default HTTPS connection. type: bool default: true validate_certs: description: - - If V(false), SSL certificates will not be validated. + - If V(false), SSL certificates are not validated. - This should only set to V(false) when used on personally controlled sites using self-signed certificates. type: bool default: true @@ -161,6 +161,7 @@ from ansible.module_utils.common.text.converters import to_text from ansible_collections.community.general.plugins.module_utils.datetime import ( now, ) +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion IFPROPS_MAPPING = dict( bondingopts='bonding_opts', @@ -278,7 +279,11 @@ def main(): if system: # Update existing entry - system_id = conn.get_system_handle(name, token) + system_id = '' + if LooseVersion(str(conn.version())) >= LooseVersion('3.4'): + system_id = conn.get_system_handle(name) + else: + system_id = conn.get_system_handle(name, token) for key, value in iteritems(module.params['properties']): if key not in system: diff --git a/plugins/modules/composer.py b/plugins/modules/composer.py index 59f3a15168..735b4d2d36 100644 --- a/plugins/modules/composer.py +++ b/plugins/modules/composer.py @@ -17,7 +17,7 @@ author: short_description: Dependency Manager for PHP description: - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs - and it will install them in your project for you. + and it installs them in your project for you. extends_documentation_fragment: - community.general.attributes attributes: @@ -45,7 +45,7 @@ options: type: path description: - Directory of your project (see C(--working-dir)). This is required when the command is not run globally. - - Will be ignored if O(global_command=true). + - This is ignored if O(global_command=true). global_command: description: - Runs the specified command globally. @@ -110,7 +110,8 @@ requirements: - php - composer installed in bin path (recommended C(/usr/local/bin)) or specified in O(composer_executable) notes: - - Default options that are always appended in each execution are C(--no-ansi), C(--no-interaction) and C(--no-progress) if available. + - Default options that are always appended in each execution are C(--no-ansi), C(--no-interaction) and C(--no-progress) + if available. - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues. """ diff --git a/plugins/modules/consul.py b/plugins/modules/consul.py index 84624e3489..9c36ba65f2 100644 --- a/plugins/modules/consul.py +++ b/plugins/modules/consul.py @@ -16,13 +16,13 @@ description: - Registers services and checks for an agent with a Consul cluster. A service is some process running on the agent node that should be advertised by Consul's discovery mechanism. It may optionally supply a check definition, a periodic service test to notify the Consul cluster of service's health. - - Checks may also be registered per node, for example disk usage, or cpu usage and notify the health of the entire node to the cluster. - Service level checks do not require a check name or id as these are derived by Consul from the Service name and id respectively - by appending V(service:) Node level checks require a O(check_name) and optionally a O(check_id). + - Checks may also be registered per node, for example disk usage, or cpu usage and notify the health of the entire node + to the cluster. Service level checks do not require a check name or ID as these are derived by Consul from the Service + name and ID respectively by appending V(service:) Node level checks require a O(check_name) and optionally a O(check_id). - Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result this - does not attempt to determine changes and will always report a changed occurred. An API method is planned to supply this - metadata so at that stage change management will be added. + does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply this + metadata so at that stage change management is to be added. - See U(http://consul.io) for more details. requirements: - python-consul @@ -83,25 +83,25 @@ options: service_address: type: str description: - - The address to advertise that the service will be listening on. This value will be passed as the C(address) parameter - to Consul's C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details. + - The address to advertise that the service is listening on. This value is passed as the C(address) parameter to Consul's + C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details. tags: type: list elements: str description: - - Tags that will be attached to the service registration. + - Tags that are attached to the service registration. script: type: str description: - - The script/command that will be run periodically to check the health of the service. + - The script/command that is run periodically to check the health of the service. - Requires O(interval) to be provided. - Mutually exclusive with O(ttl), O(tcp) and O(http). interval: type: str description: - - The interval at which the service check will be run. This is a number with a V(s) or V(m) suffix to signify the units - of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) will be used by default, for example - V(10) will be V(10s). + - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of + seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10) + is V(10s). - Required if one of the parameters O(script), O(http), or O(tcp) is specified. check_id: type: str @@ -122,25 +122,25 @@ options: ttl: type: str description: - - Checks can be registered with a TTL instead of a O(script) and O(interval) this means that the service will check - in with the agent before the TTL expires. If it does not the check will be considered failed. Required if registering - a check and the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix - to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) will be used - by default, for example V(10) will be V(10s). + - Checks can be registered with a TTL instead of a O(script) and O(interval) this means that the service checks in with + the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and + the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify + the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for + example V(10) is equivalent to V(10s). - Mutually exclusive with O(script), O(tcp) and O(http). tcp: type: str description: - - Checks can be registered with a TCP port. This means that Consul will check if the connection attempt to that port - is successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80). + - Checks can be registered with a TCP port. This means that Consul checks if the connection attempt to that port is + successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80). - Requires O(interval) to be provided. - Mutually exclusive with O(script), O(ttl) and O(http). version_added: '1.3.0' http: type: str description: - - Checks can be registered with an HTTP endpoint. This means that Consul will check that the http endpoint returns a - successful HTTP status. + - Checks can be registered with an HTTP endpoint. This means that Consul checks that the http endpoint returns a successful + HTTP status. - Requires O(interval) to be provided. - Mutually exclusive with O(script), O(ttl) and O(tcp). timeout: @@ -148,7 +148,7 @@ options: description: - A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s) or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) - will be used by default, for example V(10) will be V(10s). + is used by default, for example V(10) is equivalent to V(10s). token: type: str description: diff --git a/plugins/modules/consul_acl_bootstrap.py b/plugins/modules/consul_acl_bootstrap.py index 7002c3d549..ba6adf2dd3 100644 --- a/plugins/modules/consul_acl_bootstrap.py +++ b/plugins/modules/consul_acl_bootstrap.py @@ -50,7 +50,7 @@ RETURN = r""" result: description: - The bootstrap result as returned by the Consul HTTP API. - - B(Note:) If O(bootstrap_secret) has been specified the C(SecretID) and C(ID) will not contain the secret but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). + - B(Note:) If O(bootstrap_secret) has been specified the C(SecretID) and C(ID) do not contain the secret but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). If you pass O(bootstrap_secret), make sure your playbook/role does not depend on this return value! returned: changed type: dict diff --git a/plugins/modules/consul_agent_check.py b/plugins/modules/consul_agent_check.py index ca1639063c..51d9715e88 100644 --- a/plugins/modules/consul_agent_check.py +++ b/plugins/modules/consul_agent_check.py @@ -17,9 +17,9 @@ description: - Allows the addition, modification and deletion of checks in a Consul cluster using the agent. For more details on using and configuring Checks, see U(https://developer.hashicorp.com/consul/api-docs/agent/check). - Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this - metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result this - does not attempt to determine changes and will always report a changed occurred. An API method is planned to supply this - metadata so at that stage change management will be added. + metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result, the + module does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply + this metadata so at that stage change management is to be added. author: - Michael Ilg (@Ilgmi) extends_documentation_fragment: @@ -36,8 +36,8 @@ attributes: diff_mode: support: partial details: - - In check mode the diff will show the object as it is defined in the module options and not the object structure of - the Consul API. + - In check mode the diff shows the object as it is defined in the module options and not the object structure of the + Consul API. options: state: description: @@ -52,13 +52,13 @@ options: id: description: - Specifies a unique ID for this check on the node. This defaults to the O(name) parameter, but it may be necessary - to provide an ID for uniqueness. This value will return in the response as "CheckId". + to provide an ID for uniqueness. This value is returned in the response as V(CheckId). type: str interval: description: - - The interval at which the service check will be run. This is a number with a V(s) or V(m) suffix to signify the units - of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) will be used by default, for example - V(10) will be V(10s). + - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of + seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10) + is equivalent to V(10s). - Required if one of the parameters O(args), O(http), or O(tcp) is specified. type: str notes: @@ -74,11 +74,11 @@ options: elements: str ttl: description: - - Checks can be registered with a TTL instead of a O(args) and O(interval) this means that the service will check in - with the agent before the TTL expires. If it does not the check will be considered failed. Required if registering - a check and the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix - to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) will be used - by default, for example V(10) will be V(10s). + - Checks can be registered with a TTL instead of a O(args) and O(interval) this means that the service checks in with + the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and + the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify + the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for + example V(10) is equivalent to V(10s). - Mutually exclusive with O(args), O(tcp) and O(http). type: str tcp: @@ -91,8 +91,8 @@ options: version_added: '1.3.0' http: description: - - Checks can be registered with an HTTP endpoint. This means that Consul will check that the http endpoint returns a - successful HTTP status. + - Checks can be registered with an HTTP endpoint. This means that Consul checks that the HTTP endpoint returns a successful + HTTP status. - Requires O(interval) to be provided. - Mutually exclusive with O(args), O(ttl) and O(tcp). type: str @@ -100,7 +100,7 @@ options: description: - A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s) or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) - will be used by default, for example V(10) will be V(10s). + is used by default, for example V(10) is equivalent to V(10s). type: str service_id: description: diff --git a/plugins/modules/consul_agent_service.py b/plugins/modules/consul_agent_service.py index 36be3f0d8f..882e45dceb 100644 --- a/plugins/modules/consul_agent_service.py +++ b/plugins/modules/consul_agent_service.py @@ -31,7 +31,7 @@ attributes: diff_mode: support: partial details: - - In check mode the diff will miss operational attributes. + - In check mode the diff misses operational attributes. options: state: description: @@ -50,13 +50,13 @@ options: type: str tags: description: - - Tags that will be attached to the service registration. + - Tags that are attached to the service registration. type: list elements: str address: description: - - The address to advertise that the service will be listening on. This value will be passed as the C(address) parameter - to Consul's C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details. + - The address to advertise that the service listens on. This value is passed as the C(address) parameter to Consul's + C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details. type: str meta: description: @@ -70,8 +70,8 @@ options: type: int enable_tag_override: description: - - Specifies to disable the anti-entropy feature for this service's tags. If C(EnableTagOverride) is set to true then external - agents can update this service in the catalog and modify the tags. + - Specifies to disable the anti-entropy feature for this service's tags. If C(EnableTagOverride) is set to true then + external agents can update this service in the catalog and modify the tags. type: bool default: false weights: diff --git a/plugins/modules/consul_auth_method.py b/plugins/modules/consul_auth_method.py index a5cfd3b305..4658f906e3 100644 --- a/plugins/modules/consul_auth_method.py +++ b/plugins/modules/consul_auth_method.py @@ -29,7 +29,7 @@ attributes: diff_mode: support: partial details: - - In check mode the diff will miss operational attributes. + - In check mode the diff misses operational attributes. options: state: description: @@ -71,7 +71,7 @@ options: config: description: - The raw configuration to use for the chosen auth method. - - Contents will vary depending upon the type chosen. + - Contents vary depending upon the O(type) chosen. - Required when the auth method is created. type: dict """ diff --git a/plugins/modules/consul_binding_rule.py b/plugins/modules/consul_binding_rule.py index bce48323c7..0a4531fdf7 100644 --- a/plugins/modules/consul_binding_rule.py +++ b/plugins/modules/consul_binding_rule.py @@ -14,8 +14,8 @@ module: consul_binding_rule short_description: Manipulate Consul binding rules version_added: 8.3.0 description: - - Allows the addition, modification and deletion of binding rules in a Consul cluster using the agent. For more details on - using and configuring binding rules, see U(https://developer.hashicorp.com/consul/api-docs/acl/binding-rules). + - Allows the addition, modification and deletion of binding rules in a Consul cluster using the agent. For more details + on using and configuring binding rules, see U(https://developer.hashicorp.com/consul/api-docs/acl/binding-rules). author: - Florian Apolloner (@apollo13) extends_documentation_fragment: @@ -29,7 +29,7 @@ attributes: diff_mode: support: partial details: - - In check mode the diff will miss operational attributes. + - In check mode the diff misses operational attributes. options: state: description: diff --git a/plugins/modules/consul_kv.py b/plugins/modules/consul_kv.py index 61831c47fe..2987e71a86 100644 --- a/plugins/modules/consul_kv.py +++ b/plugins/modules/consul_kv.py @@ -36,12 +36,12 @@ options: state: description: - The action to take with the supplied key and value. If the state is V(present) and O(value) is set, the key contents - will be set to the value supplied and C(changed) will be set to V(true) only if the value was different to the current - contents. If the state is V(present) and O(value) is not set, the existing value associated to the key will be returned. - The state V(absent) will remove the key/value pair, again C(changed) will be set to V(true) only if the key actually - existed prior to the removal. An attempt can be made to obtain or free the lock associated with a key/value pair with - the states V(acquire) or V(release) respectively. A valid session must be supplied to make the attempt C(changed) will - be V(true) if the attempt is successful, V(false) otherwise. + is set to the value supplied and C(changed) is set to V(true) only if the value was different to the current contents. + If the state is V(present) and O(value) is not set, the existing value associated to the key is returned. The state + V(absent) is used to remove the key/value pair, again C(changed) is set to V(true) only if the key actually existed + prior to the removal. An attempt can be made to obtain or free the lock associated with a key/value pair with the + states V(acquire) or V(release) respectively. A valid session must be supplied to make the attempt C(changed) is V(true) + if the attempt is successful, V(false) otherwise. type: str choices: [absent, acquire, present, release] default: present @@ -73,9 +73,8 @@ options: type: str cas: description: - - Used when acquiring a lock with a session. If the O(cas) is V(0), then Consul will only put the key if it does not - already exist. If the O(cas) value is non-zero, then the key is only set if the index matches the ModifyIndex of that - key. + - Used when acquiring a lock with a session. If the O(cas) is V(0), then Consul only puts the key if it does not already + exist. If the O(cas) value is non-zero, then the key is only set if the index matches the ModifyIndex of that key. type: str flags: description: @@ -103,8 +102,7 @@ options: default: true datacenter: description: - - The name of the datacenter to query. If unspecified, the query will default to the datacenter of the Consul agent - on O(host). + - The name of the datacenter to query. If unspecified, the query defaults to the datacenter of the Consul agent on O(host). type: str version_added: 10.0.0 """ @@ -302,7 +300,7 @@ def main(): module = AnsibleModule( argument_spec=dict( cas=dict(type='str'), - datacenter=dict(type='str', default=None), + datacenter=dict(type='str'), flags=dict(type='str'), key=dict(type='str', required=True, no_log=False), host=dict(type='str', default='localhost'), diff --git a/plugins/modules/consul_policy.py b/plugins/modules/consul_policy.py index c9758780b2..e009e44434 100644 --- a/plugins/modules/consul_policy.py +++ b/plugins/modules/consul_policy.py @@ -31,7 +31,7 @@ attributes: support: partial version_added: 8.3.0 details: - - In check mode the diff will miss operational attributes. + - In check mode the diff misses operational attributes. action_group: version_added: 8.3.0 options: @@ -132,7 +132,7 @@ from ansible_collections.community.general.plugins.module_utils.consul import ( _ARGUMENT_SPEC = { "name": dict(required=True), - "description": dict(required=False, type="str"), + "description": dict(type="str"), "rules": dict(type="str"), "valid_datacenters": dict(type="list", elements="str"), "state": dict(default="present", choices=["present", "absent"]), diff --git a/plugins/modules/consul_role.py b/plugins/modules/consul_role.py index f8b8d604ad..4efbef699a 100644 --- a/plugins/modules/consul_role.py +++ b/plugins/modules/consul_role.py @@ -14,8 +14,8 @@ module: consul_role short_description: Manipulate Consul roles version_added: 7.5.0 description: - - Allows the addition, modification and deletion of roles in a Consul cluster using the agent. For more details on using and - configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). + - Allows the addition, modification and deletion of roles in a Consul cluster using the agent. For more details on using + and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). author: - Håkon Lerring (@Hakon) extends_documentation_fragment: @@ -29,7 +29,7 @@ attributes: diff_mode: support: partial details: - - In check mode the diff will miss operational attributes. + - In check mode the diff misses operational attributes. version_added: 8.3.0 action_group: version_added: 8.3.0 @@ -48,15 +48,15 @@ options: description: description: - Description of the role. - - If not specified, the assigned description will not be changed. + - If not specified, the assigned description is not changed. type: str policies: type: list elements: dict description: - List of policies to attach to the role. Each policy is a dict. - - If the parameter is left blank, any policies currently assigned will not be changed. - - Any empty array (V([])) will clear any policies previously set. + - If the parameter is left blank, any policies currently assigned are not changed. + - Any empty array (V([])) clears any policies previously set. suboptions: name: description: @@ -90,8 +90,8 @@ options: elements: dict description: - List of service identities to attach to the role. - - If not specified, any service identities currently assigned will not be changed. - - If the parameter is an empty array (V([])), any node identities assigned will be unassigned. + - If not specified, any service identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. suboptions: service_name: description: @@ -106,9 +106,9 @@ options: - name datacenters: description: - - The datacenters the policies will be effective. - - This will result in effective policy only being valid in this datacenter. - - If an empty array (V([])) is specified, the policies will valid in all datacenters. + - The datacenters where the policies are effective. + - This results in effective policy only being valid in this datacenter. + - If an empty array (V([])) is specified, the policies are valid in all datacenters. - Including those which do not yet exist but may in the future. type: list elements: str @@ -117,8 +117,8 @@ options: elements: dict description: - List of node identities to attach to the role. - - If not specified, any node identities currently assigned will not be changed. - - If the parameter is an empty array (V([])), any node identities assigned will be unassigned. + - If not specified, any node identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. suboptions: node_name: description: @@ -134,7 +134,7 @@ options: datacenter: description: - The nodes datacenter. - - This will result in effective policy only being valid in this datacenter. + - This results in effective policy only being valid in this datacenter. type: str required: true """ @@ -182,17 +182,20 @@ role: returned: success type: dict sample: - { - "CreateIndex": 39, - "Description": "", - "Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=", - "ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5", - "ModifyIndex": 39, - "Name": "foo-role", - "Policies": [ - {"ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774", "Name": "foo-access"} - ] - } + { + "CreateIndex": 39, + "Description": "", + "Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=", + "ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5", + "ModifyIndex": 39, + "Name": "foo-role", + "Policies": [ + { + "ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774", + "Name": "foo-access" + } + ] + } operation: description: The operation performed on the role. returned: changed diff --git a/plugins/modules/consul_session.py b/plugins/modules/consul_session.py index ac9b126eeb..637b09aff2 100644 --- a/plugins/modules/consul_session.py +++ b/plugins/modules/consul_session.py @@ -37,10 +37,11 @@ options: type: str state: description: - - Whether the session should be present, in other words it should be created if it does not exist, or absent, removed if present. If created, - the O(id) for the session is returned in the output. If V(absent), O(id) is required to remove the session. Info for - a single session, all the sessions for a node or all available sessions can be retrieved by specifying V(info), V(node) - or V(list) for the O(state); for V(node) or V(info), the node O(name) or session O(id) is required as parameter. + - Whether the session should be present, in other words it should be created if it does not exist, or absent, removed + if present. If created, the O(id) for the session is returned in the output. If V(absent), O(id) is required to remove + the session. Info for a single session, all the sessions for a node or all available sessions can be retrieved by + specifying V(info), V(node) or V(list) for the O(state); for V(node) or V(info), the node O(name) or session O(id) + is required as parameter. choices: [absent, info, list, node, present] type: str default: present @@ -56,7 +57,7 @@ options: default: 15 node: description: - - The name of the node that with which the session will be associated. By default this is the name of the agent. + - The name of the node that with which the session is associated. By default this is the name of the agent. type: str datacenter: description: @@ -64,8 +65,8 @@ options: type: str checks: description: - - Checks that will be used to verify the session health. If all the checks fail, the session will be invalidated and - any locks associated with the session will be release and can be acquired once the associated lock delay has expired. + - Checks that are used to verify the session health. If all the checks fail, the session is invalidated and any locks + associated with the session are released and can be acquired once the associated lock delay has expired. type: list elements: str behavior: diff --git a/plugins/modules/consul_token.py b/plugins/modules/consul_token.py index dccfa2f7a3..1e5aa19f4c 100644 --- a/plugins/modules/consul_token.py +++ b/plugins/modules/consul_token.py @@ -29,7 +29,7 @@ attributes: diff_mode: support: partial details: - - In check mode the diff will miss operational attributes. + - In check mode the diff misses operational attributes. action_group: version_added: 8.3.0 options: @@ -41,11 +41,11 @@ options: type: str accessor_id: description: - - Specifies a UUID to use as the token's Accessor ID. If not specified a UUID will be generated for this field. + - Specifies a UUID to use as the token's Accessor ID. If not specified a UUID is generated for this field. type: str secret_id: description: - - Specifies a UUID to use as the token's Secret ID. If not specified a UUID will be generated for this field. + - Specifies a UUID to use as the token's Secret ID. If not specified a UUID is generated for this field. type: str description: description: @@ -56,8 +56,8 @@ options: elements: dict description: - List of policies to attach to the token. Each policy is a dict. - - If the parameter is left blank, any policies currently assigned will not be changed. - - Any empty array (V([])) will clear any policies previously set. + - If the parameter is left blank, any policies currently assigned are not changed. + - Any empty array (V([])) clears any policies previously set. suboptions: name: description: @@ -74,8 +74,8 @@ options: elements: dict description: - List of roles to attach to the token. Each role is a dict. - - If the parameter is left blank, any roles currently assigned will not be changed. - - Any empty array (V([])) will clear any roles previously set. + - If the parameter is left blank, any roles currently assigned are not changed. + - Any empty array (V([])) clears any roles previously set. suboptions: name: description: @@ -108,8 +108,8 @@ options: elements: dict description: - List of service identities to attach to the token. - - If not specified, any service identities currently assigned will not be changed. - - If the parameter is an empty array (V([])), any node identities assigned will be unassigned. + - If not specified, any service identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. suboptions: service_name: description: @@ -120,8 +120,8 @@ options: required: true datacenters: description: - - The datacenters the token will be effective. - - If an empty array (V([])) is specified, the token will valid in all datacenters. + - The datacenters where the token is effective. + - If an empty array (V([])) is specified, the token is valid in all datacenters. - Including those which do not yet exist but may in the future. type: list elements: str @@ -130,8 +130,8 @@ options: elements: dict description: - List of node identities to attach to the token. - - If not specified, any node identities currently assigned will not be changed. - - If the parameter is an empty array (V([])), any node identities assigned will be unassigned. + - If not specified, any node identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. suboptions: node_name: description: @@ -143,7 +143,7 @@ options: datacenter: description: - The nodes datacenter. - - This will result in effective token only being valid in this datacenter. + - This results in effective token only being valid in this datacenter. type: str required: true local: @@ -152,7 +152,7 @@ options: type: bool expiration_ttl: description: - - This is a convenience field and if set will initialize the C(expiration_time). Can be specified in the form of V(60s) + - This is a convenience field and if set it initializes the C(expiration_time). Can be specified in the form of V(60s) or V(5m) (that is, 60 seconds or 5 minutes, respectively). Ingored when the token is updated! type: str """ @@ -220,7 +220,7 @@ def normalize_link_obj(api_obj, module_obj, key): for obj in module_objs: identifier = obj.get("ID") - name = obj.get("Name)") + name = obj.get("Name") if identifier and not name and identifier in id_to_name: obj["Name"] = id_to_name[identifier] if not identifier and name and name in name_to_id: diff --git a/plugins/modules/copr.py b/plugins/modules/copr.py index 90cb931210..940fc0eedd 100644 --- a/plugins/modules/copr.py +++ b/plugins/modules/copr.py @@ -77,6 +77,13 @@ EXAMPLES = r""" community.general.copr: state: absent name: '@copr/integration_tests' + +- name: Install Caddy + community.general.copr: + name: '@caddy/caddy' + chroot: fedora-rawhide-{{ ansible_facts.architecture }} + includepkgs: + - caddy """ RETURN = r""" @@ -487,8 +494,8 @@ def run_module(): name=dict(type="str", required=True), state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"), chroot=dict(type="str"), - includepkgs=dict(type='list', elements="str", required=False), - excludepkgs=dict(type='list', elements="str", required=False), + includepkgs=dict(type='list', elements="str"), + excludepkgs=dict(type='list', elements="str"), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) params = module.params diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py index 3f708581ac..482183c0e0 100644 --- a/plugins/modules/cpanm.py +++ b/plugins/modules/cpanm.py @@ -56,6 +56,24 @@ options: - Only install dependencies. type: bool default: false + install_recommendations: + description: + - If V(true), installs dependencies declared as recommends per META spec. + - If V(false), it ensures the dependencies declared as recommends are not installed, overriding any decision made earlier + in E(PERL_CPANM_OPT). + - If parameter is not set, C(cpanm) uses its existing defaults. + - When these dependencies fail to install, cpanm continues the installation, since they are just recommendation. + type: bool + version_added: 10.3.0 + install_suggestions: + description: + - If V(true), installs dependencies declared as suggests per META spec. + - If V(false), it ensures the dependencies declared as suggests are not installed, overriding any decision made earlier + in E(PERL_CPANM_OPT). + - If parameter is not set, C(cpanm) uses its existing defaults. + - When these dependencies fail to install, cpanm continues the installation, since they are just suggestion. + type: bool + version_added: 10.3.0 version: description: - Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted. @@ -81,14 +99,13 @@ options: notes: - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. - 'This module now comes with a choice of execution O(mode): V(compatibility) or V(new).' - - 'O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility. This was the default + - 'O(mode=compatibility): When using V(compatibility) mode, the module keeps backward compatibility. This was the default mode before community.general 9.0.0. O(name) must be either a module name or a distribution file. If the perl module given - by O(name) is installed (at the exact O(version) when specified), then nothing happens. Otherwise, it will be installed - using the C(cpanm) executable. O(name) cannot be an URL, or a git URL. C(cpanm) version specifiers do not work in this - mode.' - - 'O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module - name, a distribution file, a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version - specifiers are recognized. This is the default mode from community.general 9.0.0 onwards.' + by O(name) is installed (at the exact O(version) when specified), then nothing happens. Otherwise, it is installed using + the C(cpanm) executable. O(name) cannot be an URL, or a git URL. C(cpanm) version specifiers do not work in this mode.' + - 'O(mode=new): When using V(new) mode, the module behaves differently. The O(name) parameter may refer to a module name, + a distribution file, a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers + are recognized. This is the default mode from community.general 9.0.0 onwards.' seealso: - name: C(cpanm) command manual page description: Manual page for the command. @@ -167,6 +184,8 @@ class CPANMinus(ModuleHelper): mirror=dict(type='str'), mirror_only=dict(type='bool', default=False), installdeps=dict(type='bool', default=False), + install_recommendations=dict(type='bool'), + install_suggestions=dict(type='bool'), executable=dict(type='path'), mode=dict(type='str', default='new', choices=['compatibility', 'new']), name_check=dict(type='str') @@ -181,10 +200,11 @@ class CPANMinus(ModuleHelper): mirror=cmd_runner_fmt.as_opt_val('--mirror'), mirror_only=cmd_runner_fmt.as_bool("--mirror-only"), installdeps=cmd_runner_fmt.as_bool("--installdeps"), + install_recommendations=cmd_runner_fmt.as_bool("--with-recommends", "--without-recommends", ignore_none=True), + install_suggestions=cmd_runner_fmt.as_bool("--with-suggests", "--without-suggests", ignore_none=True), pkg_spec=cmd_runner_fmt.as_list(), cpanm_version=cmd_runner_fmt.as_fixed("--version"), ) - use_old_vardict = False def __init_module__(self): v = self.vars @@ -254,7 +274,16 @@ class CPANMinus(ModuleHelper): return pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version) - with self.runner(['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'pkg_spec'], output_process=process) as ctx: + with self.runner([ + 'notest', + 'locallib', + 'mirror', + 'mirror_only', + 'installdeps', + 'install_recommendations', + 'install_suggestions', + 'pkg_spec' + ], output_process=process) as ctx: self.changed = ctx.run(pkg_spec=pkg_spec) diff --git a/plugins/modules/cronvar.py b/plugins/modules/cronvar.py index 488e739704..5f7d02bfc3 100644 --- a/plugins/modules/cronvar.py +++ b/plugins/modules/cronvar.py @@ -43,12 +43,12 @@ options: type: str insertafter: description: - - If specified, the variable will be inserted after the variable specified. + - If specified, the variable is inserted after the variable specified. - Used with O(state=present). type: str insertbefore: description: - - Used with O(state=present). If specified, the variable will be inserted just before the variable specified. + - Used with O(state=present). If specified, the variable is inserted just before the variable specified. type: str state: description: @@ -135,6 +135,9 @@ class CronVar(object): self.cron_file = cron_file else: self.cron_file = os.path.join('/etc/cron.d', cron_file) + parent_dir = os.path.dirname(self.cron_file) + if parent_dir and not os.path.isdir(parent_dir): + module.fail_json(msg="Parent directory '{}' does not exist for cron_file: '{}'".format(parent_dir, cron_file)) else: self.cron_file = None @@ -146,9 +149,8 @@ class CronVar(object): if self.cron_file: # read the cronfile try: - f = open(self.cron_file, 'r') - self.lines = f.read().splitlines() - f.close() + with open(self.cron_file, 'r') as f: + self.lines = f.read().splitlines() except IOError: # cron file does not exist return @@ -394,6 +396,8 @@ def main(): old_value = cronvar.find_variable(name) if ensure_present: + if value == "" and old_value != "": + value = '""' if old_value is None: cronvar.add_variable(name, value, insertbefore, insertafter) changed = True diff --git a/plugins/modules/crypttab.py b/plugins/modules/crypttab.py index b6a0e52cc3..5749d75cec 100644 --- a/plugins/modules/crypttab.py +++ b/plugins/modules/crypttab.py @@ -24,14 +24,14 @@ options: name: description: - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or optionally prefixed with V(/dev/mapper/), - as it appears in the filesystem. V(/dev/mapper/) will be stripped from O(name). + as it appears in the filesystem. V(/dev/mapper/) is stripped from O(name). type: str required: true state: description: - Use V(present) to add a line to C(/etc/crypttab) or update its definition if already present. - Use V(absent) to remove a line with matching O(name). - - Use V(opts_present) to add options to those already present; options with different values will be updated. + - Use V(opts_present) to add options to those already present; options with different values are updated. - Use V(opts_absent) to remove options from the existing set. type: str required: true @@ -72,7 +72,15 @@ EXAMPLES = r""" state: opts_present opts: discard loop: '{{ ansible_mounts }}' - when: "'/dev/mapper/luks-' in {{ item.device }}" + when: "'/dev/mapper/luks-' in item.device" + +- name: Add entry to /etc/crypttab for luks-home with password file + community.general.crypttab: + name: luks-home + backing_device: UUID=123e4567-e89b-12d3-a456-426614174000 + password: /root/keys/luks-home.key + opts: discard,cipher=aes-cbc-essiv:sha256 + state: present """ import os @@ -116,7 +124,7 @@ def main(): ('backing_device', backing_device), ('password', password), ('opts', opts)): - if (arg is not None and (' ' in arg or '\t' in arg or arg == '')): + if arg is not None and (' ' in arg or '\t' in arg or arg == ''): module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name, **module.params) @@ -154,11 +162,8 @@ def main(): changed, reason = existing_line.opts.remove(opts) if changed and not module.check_mode: - try: - f = open(path, 'wb') + with open(path, 'wb') as f: f.write(to_bytes(crypttab, errors='surrogate_or_strict')) - finally: - f.close() module.exit_json(changed=changed, msg=reason, **module.params) @@ -173,12 +178,9 @@ class Crypttab(object): os.makedirs(os.path.dirname(path)) open(path, 'a').close() - try: - f = open(path, 'r') + with open(path, 'r') as f: for line in f.readlines(): self._lines.append(Line(line)) - finally: - f.close() def add(self, line): self._lines.append(line) diff --git a/plugins/modules/datadog_downtime.py b/plugins/modules/datadog_downtime.py index f693ba3c2d..9e48410014 100644 --- a/plugins/modules/datadog_downtime.py +++ b/plugins/modules/datadog_downtime.py @@ -126,30 +126,31 @@ RETURN = r""" # Returns the downtime JSON dictionary from the API response under the C(downtime) key. # See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details. downtime: - description: The downtime returned by the API. - type: dict - returned: always - sample: { - "active": true, - "canceled": null, - "creator_id": 1445416, - "disabled": false, - "downtime_type": 2, - "end": null, - "id": 1055751000, - "message": "Downtime for foo:bar", - "monitor_id": null, - "monitor_tags": [ - "foo:bar" - ], - "parent_id": null, - "recurrence": null, - "scope": [ - "test" - ], - "start": 1607015009, - "timezone": "UTC", - "updater_id": null + description: The downtime returned by the API. + type: dict + returned: always + sample: + { + "active": true, + "canceled": null, + "creator_id": 1445416, + "disabled": false, + "downtime_type": 2, + "end": null, + "id": 1055751000, + "message": "Downtime for foo:bar", + "monitor_id": null, + "monitor_tags": [ + "foo:bar" + ], + "parent_id": null, + "recurrence": null, + "scope": [ + "test" + ], + "start": 1607015009, + "timezone": "UTC", + "updater_id": null } """ @@ -174,18 +175,18 @@ def main(): module = AnsibleModule( argument_spec=dict( api_key=dict(required=True, no_log=True), - api_host=dict(required=False, default="https://api.datadoghq.com"), + api_host=dict(default="https://api.datadoghq.com"), app_key=dict(required=True, no_log=True), - state=dict(required=False, choices=["present", "absent"], default="present"), - monitor_tags=dict(required=False, type="list", elements="str"), - scope=dict(required=False, type="list", elements="str"), - monitor_id=dict(required=False, type="int"), - downtime_message=dict(required=False, no_log=True), - start=dict(required=False, type="int"), - end=dict(required=False, type="int"), - timezone=dict(required=False, type="str"), - rrule=dict(required=False, type="str"), - id=dict(required=False, type="int"), + state=dict(choices=["present", "absent"], default="present"), + monitor_tags=dict(type="list", elements="str"), + scope=dict(type="list", elements="str"), + monitor_id=dict(type="int"), + downtime_message=dict(no_log=True), + start=dict(type="int"), + end=dict(type="int"), + timezone=dict(type="str"), + rrule=dict(type="str"), + id=dict(type="int"), ) ) diff --git a/plugins/modules/datadog_event.py b/plugins/modules/datadog_event.py index 97be0c9b16..fd75ea81de 100644 --- a/plugins/modules/datadog_event.py +++ b/plugins/modules/datadog_event.py @@ -16,7 +16,7 @@ __metaclass__ = type DOCUMENTATION = r""" module: datadog_event -short_description: Posts events to Datadog service +short_description: Posts events to Datadog service description: - Allows to post events to Datadog (www.datadoghq.com) service. - Uses http://docs.datadoghq.com/api/#events API. @@ -89,8 +89,8 @@ options: - An arbitrary string to use for aggregation. validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using - self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool default: true """ diff --git a/plugins/modules/datadog_monitor.py b/plugins/modules/datadog_monitor.py index eec0db0d32..f778d2444d 100644 --- a/plugins/modules/datadog_monitor.py +++ b/plugins/modules/datadog_monitor.py @@ -92,26 +92,26 @@ options: type: dict description: - Dictionary of scopes to silence, with timestamps or None. - - Each scope will be muted until the given POSIX timestamp or forever if the value is None. + - Each scope is muted until the given POSIX timestamp or forever if the value is V(None). notify_no_data: description: - - Whether this monitor will notify when data stops reporting. + - Whether this monitor notifies when data stops reporting. type: bool default: false no_data_timeframe: description: - - The number of minutes before a monitor will notify when data stops reporting. + - The number of minutes before a monitor notifies when data stops reporting. - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks. - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service. type: str timeout_h: description: - - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state. + - The number of hours of the monitor not reporting data before it automatically resolves from a triggered state. type: str renotify_interval: description: - - The number of minutes after the last notification before a monitor will re-notify on the current status. - - It will only re-notify if it is not resolved. + - The number of minutes after the last notification before a monitor re-notifies on the current status. + - It only re-notifies if it is not resolved. type: str escalation_message: description: @@ -120,7 +120,7 @@ options: type: str notify_audit: description: - - Whether tagged users will be notified on changes to this monitor. + - Whether tagged users are notified on changes to this monitor. type: bool default: false thresholds: @@ -138,7 +138,7 @@ options: require_full_window: description: - Whether this monitor needs a full window of data before it gets evaluated. - - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped. + - We highly recommend you set this to V(false) for sparse metrics, otherwise some evaluations are skipped. type: bool new_host_delay: description: @@ -153,7 +153,7 @@ options: id: description: - The ID of the alert. - - If set, will be used instead of the name to locate the alert. + - If set, it is used instead of O(name) to locate the alert. type: str include_tags: description: @@ -275,14 +275,14 @@ def main(): renotify_interval=dict(), escalation_message=dict(), notify_audit=dict(default=False, type='bool'), - thresholds=dict(type='dict', default=None), - tags=dict(type='list', elements='str', default=None), + thresholds=dict(type='dict'), + tags=dict(type='list', elements='str'), locked=dict(default=False, type='bool'), require_full_window=dict(type='bool'), new_host_delay=dict(), evaluation_delay=dict(), id=dict(), - include_tags=dict(required=False, default=True, type='bool'), + include_tags=dict(default=True, type='bool'), priority=dict(type='int'), notification_preset_name=dict(choices=['show_all', 'hide_query', 'hide_handles', 'hide_all']), renotify_occurrences=dict(type='int'), @@ -435,7 +435,7 @@ def mute_monitor(module): module.fail_json(msg="Monitor %s not found!" % module.params['name']) elif monitor['options']['silenced']: module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") - elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0): + elif module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0: module.exit_json(changed=False) try: if module.params['silenced'] is None or module.params['silenced'] == "": diff --git a/plugins/modules/dconf.py b/plugins/modules/dconf.py index ccde7191f9..762c443130 100644 --- a/plugins/modules/dconf.py +++ b/plugins/modules/dconf.py @@ -17,10 +17,10 @@ short_description: Modify and read dconf database description: - This module allows modifications and reading of C(dconf) database. The module is implemented as a wrapper around C(dconf) tool. Please see the dconf(1) man page for more details. - - Since C(dconf) requires a running D-Bus session to change values, the module will try to detect an existing session and - reuse it, or run the tool using C(dbus-run-session). + - Since C(dconf) requires a running D-Bus session to change values, the module tries to detect an existing session and reuse + it, or run the tool using C(dbus-run-session). requirements: - - Optionally the C(gi.repository) Python library (usually included in the OS on hosts which have C(dconf)); this will become + - Optionally the C(gi.repository) Python library (usually included in the OS on hosts which have C(dconf)); this is to become a non-optional requirement in a future major release of community.general. notes: - This module depends on C(psutil) Python library (version 4.0.0 and upwards), C(dconf), C(dbus-send), and C(dbus-run-session) @@ -28,11 +28,11 @@ notes: - This module uses the C(gi.repository) Python library when available for accurate comparison of values in C(dconf) to values specified in Ansible code. C(gi.repository) is likely to be present on most systems which have C(dconf) but may not be present everywhere. When it is missing, a simple string comparison between values is used, and there may be false positives, - that is, Ansible may think that a value is being changed when it is not. This fallback will be removed in a future version + that is, Ansible may think that a value is being changed when it is not. This fallback is to be removed in a future version of this module, at which point the module will stop working on hosts without C(gi.repository). - - Detection of existing, running D-Bus session, required to change settings using C(dconf), is not 100% reliable due to implementation - details of D-Bus daemon itself. This might lead to running applications not picking-up changes on-the-fly if options are - changed using Ansible and C(dbus-run-session). + - Detection of existing, running D-Bus session, required to change settings using C(dconf), is not 100% reliable due to + implementation details of D-Bus daemon itself. This might lead to running applications not picking-up changes on-the-fly + if options are changed using Ansible and C(dbus-run-session). - Keep in mind that the C(dconf) CLI tool, which this module wraps around, utilises an unusual syntax for the values (GVariant). For example, if you wanted to provide a string value, the correct syntax would be O(value="'myvalue'") - with single quotes as part of the Ansible parameter value. @@ -398,7 +398,7 @@ def main(): state=dict(default='present', choices=['present', 'absent', 'read']), key=dict(required=True, type='str', no_log=False), # Converted to str below after special handling of bool. - value=dict(required=False, default=None, type='raw'), + value=dict(type='raw'), ), supports_check_mode=True, required_if=[ diff --git a/plugins/modules/decompress.py b/plugins/modules/decompress.py index aa7a14aefb..03be61a8e6 100644 --- a/plugins/modules/decompress.py +++ b/plugins/modules/decompress.py @@ -33,13 +33,12 @@ options: required: true dest: description: - - The file name of the destination file where the compressed file will be decompressed. - - If the destination file exists, it will be truncated and overwritten. - - If not specified, the destination filename will be derived from O(src) by removing the compression format extension. - For example, if O(src) is V(/path/to/file.txt.gz) and O(format) is V(gz), O(dest) will be V(/path/to/file.txt). If - the O(src) file does not have an extension for the current O(format), the O(dest) filename will be made by appending - C(_decompressed) to the O(src) filename. For instance, if O(src) is V(/path/to/file.myextension), the (dest) filename - will be V(/path/to/file.myextension_decompressed). + - The file name of the destination file where the compressed file is decompressed. + - If the destination file exists, it is truncated and overwritten. + - If not specified, the destination filename is derived from O(src) by removing the compression format extension. For + example, when O(src) is V(/path/to/file.txt.gz) and O(format) is V(gz), O(dest) is V(/path/to/file.txt). If the O(src) + file does not have an extension for the current O(format), the O(dest) filename is made by appending C(_decompressed) + to the O(src) filename. For instance, when O(src) is V(/path/to/file.myextension), the (dest) filename is V(/path/to/file.myextension_decompressed). type: path format: description: @@ -132,7 +131,6 @@ def decompress(b_src, b_dest, handler): class Decompress(ModuleHelper): destination_filename_template = "%s_decompressed" - use_old_vardict = False output_params = 'dest' module = dict( diff --git a/plugins/modules/deploy_helper.py b/plugins/modules/deploy_helper.py index 14a7d4f8c7..b25e68392b 100644 --- a/plugins/modules/deploy_helper.py +++ b/plugins/modules/deploy_helper.py @@ -18,8 +18,8 @@ short_description: Manages some of the steps common in deploying projects description: - The Deploy Helper manages some of the steps common in deploying software. It creates a folder structure, manages a symlink for the current release and cleans up old releases. - - Running it with the O(state=query) or O(state=present) will return the C(deploy_helper) fact. C(project_path), whatever - you set in the O(path) parameter, C(current_path), the path to the symlink that points to the active release, C(releases_path), + - Running it with the O(state=query) or O(state=present) returns the C(deploy_helper) fact. C(project_path), whatever you + set in the O(path) parameter, C(current_path), the path to the symlink that points to the active release, C(releases_path), the path to the folder to keep releases in, C(shared_path), the path to the folder to keep shared resources in, C(unfinished_filename), the file to check for to recognize unfinished builds, C(previous_release), the release the 'current' symlink is pointing to, C(previous_release_path), the full path to the 'current' symlink target, C(new_release), either the O(release) parameter @@ -41,12 +41,12 @@ options: type: str description: - The state of the project. - - V(query) will only gather facts. - - V(present) will create the project C(root) folder, and in it the C(releases) and C(shared) folders. - - V(finalize) will remove the unfinished_filename file, create a symlink to the newly deployed release and optionally - clean old releases. - - V(clean) will remove failed & old releases. - - V(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with O(state=absent)). + - V(query) gathers facts. + - V(present) creates the project C(root) folder, and in it the C(releases) and C(shared) folders. + - V(finalize) removes the unfinished_filename file, creates a symlink to the newly deployed release and optionally cleans + old releases. + - V(clean) removes failed & old releases. + - V(absent) removes the project folder (synonymous to the M(ansible.builtin.file) module with O(state=absent)). choices: [present, finalize, absent, clean, query] default: present @@ -59,15 +59,15 @@ options: releases_path: type: str description: - - The name of the folder that will hold the releases. This can be relative to O(path) or absolute. Returned in the C(deploy_helper.releases_path) + - The name of the folder that holds the releases. This can be relative to O(path) or absolute. Returned in the C(deploy_helper.releases_path) fact. default: releases shared_path: type: path description: - - The name of the folder that will hold the shared resources. This can be relative to O(path) or absolute. If this is - set to an empty string, no shared folder will be created. Returned in the C(deploy_helper.shared_path) fact. + - The name of the folder that holds the shared resources. This can be relative to O(path) or absolute. If this is set + to an empty string, no shared folder is created. Returned in the C(deploy_helper.shared_path) fact. default: shared current_path: @@ -81,8 +81,8 @@ options: type: str description: - The name of the file that indicates a deploy has not finished. All folders in the O(releases_path) that contain this - file will be deleted on O(state=finalize) with O(clean=true), or O(state=clean). This file is automatically deleted - from the C(new_release_path) during O(state=finalize). + file are deleted on O(state=finalize) with O(clean=true), or O(state=clean). This file is automatically deleted from + the C(new_release_path) during O(state=finalize). default: DEPLOY_UNFINISHED clean: @@ -95,16 +95,16 @@ options: type: int description: - The number of old releases to keep when cleaning. Used in O(state=finalize) and O(state=clean). Any unfinished builds - will be deleted first, so only correct releases will count. The current version will not count. + are deleted first, so only correct releases count. The current version does not count. default: 5 notes: - Facts are only returned for O(state=query) and O(state=present). If you use both, you should pass any overridden parameters - to both calls, otherwise the second call will overwrite the facts of the first one. + to both calls, otherwise the second call overwrites the facts of the first one. - When using O(state=clean), the releases are ordered by I(creation date). You should be able to switch to a new naming strategy without problems. - - Because of the default behaviour of generating the C(new_release) fact, this module will not be idempotent unless you - pass your own release name with O(release). Due to the nature of deploying software, this should not be much of a problem. + - Because of the default behaviour of generating the C(new_release) fact, this module is not idempotent unless you pass + your own release name with O(release). Due to the nature of deploying software, this should not be much of a problem. extends_documentation_fragment: - ansible.builtin.files - community.general.attributes diff --git a/plugins/modules/dimensiondata_network.py b/plugins/modules/dimensiondata_network.py index 6617d6aef1..04fff21e58 100644 --- a/plugins/modules/dimensiondata_network.py +++ b/plugins/modules/dimensiondata_network.py @@ -140,7 +140,7 @@ class DimensionDataNetworkModule(DimensionDataModule): module=AnsibleModule( argument_spec=DimensionDataModule.argument_spec_with_wait( name=dict(type='str', required=True), - description=dict(type='str', required=False), + description=dict(type='str'), service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']), state=dict(default='present', choices=['present', 'absent']) ), diff --git a/plugins/modules/dimensiondata_vlan.py b/plugins/modules/dimensiondata_vlan.py index d7aa72fcf2..b28b12d998 100644 --- a/plugins/modules/dimensiondata_vlan.py +++ b/plugins/modules/dimensiondata_vlan.py @@ -39,7 +39,7 @@ options: default: '' network_domain: description: - - The Id or name of the target network domain. + - The ID or name of the target network domain. required: true type: str private_ipv4_base_address: @@ -56,8 +56,7 @@ options: state: description: - The desired state for the target VLAN. - - V(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not - exist). + - V(readonly) ensures that the state is only ever read, not modified (the module fails if the resource does not exist). choices: [present, absent, readonly] default: present type: str @@ -65,7 +64,7 @@ options: description: - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses. - - If V(false), the module will fail under these conditions. + - If V(false), the module fails under these conditions. - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible). type: bool default: false @@ -187,7 +186,7 @@ class DimensionDataVlanModule(DimensionDataModule): network_domain=dict(required=True, type='str'), private_ipv4_base_address=dict(default='', type='str'), private_ipv4_prefix_size=dict(default=0, type='int'), - allow_expand=dict(required=False, default=False, type='bool'), + allow_expand=dict(default=False, type='bool'), state=dict(default='present', choices=['present', 'absent', 'readonly']) ), required_together=DimensionDataModule.required_together() diff --git a/plugins/modules/django_check.py b/plugins/modules/django_check.py index 9699428b9c..e6e03c8276 100644 --- a/plugins/modules/django_check.py +++ b/plugins/modules/django_check.py @@ -22,7 +22,7 @@ options: database: description: - Specify databases to run checks against. - - If not specified, Django will not run database tests. + - If not specified, Django does not run database tests. type: list elements: str deploy: @@ -32,7 +32,7 @@ options: default: false fail_level: description: - - Message level that will trigger failure. + - Message level that triggers failure. - Default is the Django default value. Check the documentation for the version being used. type: str choices: [CRITICAL, ERROR, WARNING, INFO, DEBUG] @@ -49,7 +49,7 @@ options: elements: str notes: - The outcome of the module is found in the common return values RV(ignore:stdout), RV(ignore:stderr), RV(ignore:rc). - - The module will fail if RV(ignore:rc) is not zero. + - The module fails if RV(ignore:rc) is not zero. attributes: check_mode: support: full diff --git a/plugins/modules/django_manage.py b/plugins/modules/django_manage.py index dab544a29d..0fe07890f8 100644 --- a/plugins/modules/django_manage.py +++ b/plugins/modules/django_manage.py @@ -15,7 +15,7 @@ module: django_manage short_description: Manages a Django application description: - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the O(virtualenv) parameter, - all management commands will be executed by the given C(virtualenv) installation. + all management commands are executed by the given C(virtualenv) installation. extends_documentation_fragment: - community.general.attributes attributes: @@ -34,8 +34,8 @@ options: - V(loaddata) - Searches for and loads the contents of the named O(fixtures) into the database. - V(migrate) - Synchronizes the database state with models and migrations. - V(test) - Runs tests for all installed apps. - - Other commands can be entered, but will fail if they are unknown to Django. Other commands that may prompt for user - input should be run with the C(--noinput) flag. + - Custom commands can be entered, but they fail unless they are known to Django. Custom commands that may prompt for + user input should be run with the C(--noinput) flag. - Support for the values V(cleanup), V(syncdb), V(validate) was removed in community.general 9.0.0. See note about supported versions of Django. type: str @@ -62,7 +62,7 @@ options: virtualenv: description: - An optional path to a C(virtualenv) installation to use while running the manage application. - - The virtual environment must exist, otherwise the module will fail. + - The virtual environment must exist, otherwise the module fails. type: path aliases: [virtual_env] apps: @@ -78,7 +78,7 @@ options: clear: description: - Clear the existing files before trying to copy or link the original file. - - Used only with the V(collectstatic) command. The C(--noinput) argument will be added automatically. + - Used only with the V(collectstatic) command. The C(--noinput) argument is added automatically. required: false default: false type: bool @@ -101,18 +101,18 @@ options: required: false skip: description: - - Will skip over out-of-order missing migrations, you can only use this parameter with V(migrate) command. + - Skips over out-of-order missing migrations, you can only use this parameter with V(migrate) command. required: false type: bool merge: description: - - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with + - Runs out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with V(migrate) command. required: false type: bool link: description: - - Will create links to the files instead of copying them, you can only use this parameter with V(collectstatic) command. + - Creates links to the files instead of copying them, you can only use this parameter with V(collectstatic) command. required: false type: bool testrunner: @@ -122,19 +122,13 @@ options: type: str required: false aliases: [test_runner] - ack_venv_creation_deprecation: - description: - - This option no longer has any effect since community.general 9.0.0. - - It will be removed from community.general 11.0.0. - type: bool - version_added: 5.8.0 notes: - 'B(ATTENTION): Support for Django releases older than 4.1 has been removed in community.general version 9.0.0. While the module allows for free-form commands, not verifying the version of Django being used, it is B(strongly recommended) to use a more recent version of the framework.' - Please notice that Django 4.1 requires Python 3.8 or greater. - - This module will not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment does not + - This module does not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment does not already exist at the given location. This behavior changed in community.general version 9.0.0. - The recommended way to create a virtual environment in Ansible is by using M(ansible.builtin.pip). - This module assumes English error messages for the V(createcachetable) command to detect table existence, unfortunately. @@ -291,7 +285,6 @@ def main(): skip=dict(type='bool'), merge=dict(type='bool'), link=dict(type='bool'), - ack_venv_creation_deprecation=dict(type='bool', removed_in_version='11.0.0', removed_from_collection='community.general'), ), ) diff --git a/plugins/modules/dnf_config_manager.py b/plugins/modules/dnf_config_manager.py index 2d896f3742..eb64bee864 100644 --- a/plugins/modules/dnf_config_manager.py +++ b/plugins/modules/dnf_config_manager.py @@ -39,6 +39,8 @@ options: required: false type: str choices: [enabled, disabled] +notes: + - Does not work with C(dnf5). seealso: - module: ansible.builtin.dnf - module: ansible.builtin.yum_repository @@ -118,7 +120,7 @@ changed_repos: returned: success type: list elements: str - sample: ['crb'] + sample: ["crb"] """ from ansible.module_utils.basic import AnsibleModule @@ -173,8 +175,8 @@ def pack_repo_states_for_return(states): def main(): module_args = dict( - name=dict(type='list', elements='str', required=False, default=[]), - state=dict(type='str', required=False, choices=['enabled', 'disabled'], default='enabled') + name=dict(type='list', elements='str', default=[]), + state=dict(type='str', choices=['enabled', 'disabled'], default='enabled') ) result = dict( diff --git a/plugins/modules/dnf_versionlock.py b/plugins/modules/dnf_versionlock.py index 07a85c11c2..b3e2e2bcc9 100644 --- a/plugins/modules/dnf_versionlock.py +++ b/plugins/modules/dnf_versionlock.py @@ -22,9 +22,9 @@ attributes: support: partial details: - The logics of the C(versionlock) plugin for corner cases could be confusing, so please take in account that this module - will do its best to give a C(check_mode) prediction on what is going to happen. In case of doubt, check the documentation + does its best to give a C(check_mode) prediction on what is going to happen. In case of doubt, check the documentation of the plugin. - - Sometimes the module could predict changes in C(check_mode) that will not be such because C(versionlock) concludes + - Sometimes the module could predict changes in C(check_mode) that are not fulfilled because C(versionlock) concludes that there is already a entry in C(locklist) that already matches. diff_mode: support: none @@ -47,12 +47,12 @@ options: state: description: - Whether to add (V(present) or V(excluded)) to or remove (V(absent) or V(clean)) from the C(locklist). - - V(present) will add a package name spec to the C(locklist). If there is a installed package that matches, then only - that version will be added. Otherwise, all available package versions will be added. - - V(excluded) will add a package name spec as excluded to the C(locklist). It means that packages represented by the - package name spec will be excluded from transaction operations. All available package versions will be added. - - V(absent) will delete entries in the C(locklist) that match the package name spec. - - V(clean) will delete all entries in the C(locklist). This option is mutually exclusive with O(name). + - V(present) adds a package name spec to the C(locklist). If there is a installed package that matches, then only that + version is added. Otherwise, all available package versions are added. + - V(excluded) adds a package name spec as excluded to the C(locklist). It means that packages represented by the package + name spec are excluded from transaction operations. All available package versions are added. + - V(absent) deletes entries in the C(locklist) that match the package name spec. + - V(clean) deletes all entries in the C(locklist). This option is mutually exclusive with O(name). choices: ['absent', 'clean', 'excluded', 'present'] type: str default: present @@ -60,6 +60,7 @@ notes: - In an ideal world, the C(versionlock) plugin would have a dry-run option to know for sure what is going to happen. So far we have to work with a best guess as close as possible to the behaviour inferred from its code. - For most of cases where you want to lock and unlock specific versions of a package, this works fairly well. + - Does not work with C(dnf5). requirements: - dnf - dnf-plugin-versionlock @@ -82,12 +83,12 @@ EXAMPLES = r""" - name: Remove lock from nginx to be updated again community.general.dnf_versionlock: - package: nginx + name: nginx state: absent - name: Exclude bind 32:9.11 from installs or updates community.general.dnf_versionlock: - package: bind-32:9.11* + name: bind-32:9.11* state: excluded - name: Keep bash package in major version 4 @@ -107,25 +108,25 @@ locklist_pre: returned: success type: list elements: str - sample: ['bash-0:4.4.20-1.el8_4.*', '!bind-32:9.11.26-4.el8_4.*'] + sample: ["bash-0:4.4.20-1.el8_4.*", "!bind-32:9.11.26-4.el8_4.*"] locklist_post: description: Locklist after module execution. returned: success and (not check mode or state is clean) type: list elements: str - sample: ['bash-0:4.4.20-1.el8_4.*'] + sample: ["bash-0:4.4.20-1.el8_4.*"] specs_toadd: description: Package name specs meant to be added by versionlock. returned: success type: list elements: str - sample: ['bash'] + sample: ["bash"] specs_todelete: description: Package name specs meant to be deleted by versionlock. returned: success type: list elements: str - sample: ['bind'] + sample: ["bind"] """ from ansible.module_utils.basic import AnsibleModule @@ -221,6 +222,43 @@ def get_packages(module, patterns, only_installed=False): return packages_available_map_name_evrs +def get_package_mgr(): + for bin_path in (DNF_BIN,): + if os.path.exists(bin_path): + return "dnf5" if os.path.realpath(bin_path) == "/usr/bin/dnf5" else "dnf" + # fallback to dnf + return "dnf" + + +def get_package_list(module, package_mgr="dnf"): + if package_mgr == "dnf": + return do_versionlock(module, "list").split() + + package_list = [] + if package_mgr == "dnf5": + stanza_start = False + package_name = None + for line in do_versionlock(module, "list").splitlines(): + if line.startswith(("#", " ")): + continue + if line.startswith("Package name:"): + stanza_start = True + dummy, name = line.split(":", 1) + name = name.strip() + pkg_name = get_packages(module, patterns=[name]) + package_name = "%s-%s.*" % (name, pkg_name[name].pop()) + if package_name and package_name not in package_list: + package_list.append(package_name) + if line.startswith("evr"): + dummy, package_version = line.split("=", 1) + package_version = package_version.strip() + if stanza_start: + if package_name and package_name not in package_list: + package_list.append(package_name) + stanza_start = False + return package_list + + def main(): module = AnsibleModule( argument_spec=dict( @@ -239,9 +277,10 @@ def main(): msg = "" # Check module pre-requisites. - if not os.path.exists(DNF_BIN): - module.fail_json(msg="%s was not found" % DNF_BIN) - if not os.path.exists(VERSIONLOCK_CONF): + global DNF_BIN + DNF_BIN = module.get_bin_path('dnf', True) + package_mgr = get_package_mgr() + if package_mgr == "dnf" and not os.path.exists(VERSIONLOCK_CONF): module.fail_json(msg="plugin versionlock is required") # Check incompatible options. @@ -250,7 +289,7 @@ def main(): if state != "clean" and not patterns: module.fail_json(msg="name list is required for %s state" % state) - locklist_pre = do_versionlock(module, "list").split() + locklist_pre = get_package_list(module, package_mgr=package_mgr) specs_toadd = [] specs_todelete = [] @@ -329,7 +368,7 @@ def main(): "specs_todelete": specs_todelete } if not module.check_mode: - response["locklist_post"] = do_versionlock(module, "list").split() + response["locklist_post"] = get_package_list(module, package_mgr=package_mgr) else: if state == "clean": response["locklist_post"] = [] diff --git a/plugins/modules/dnsimple.py b/plugins/modules/dnsimple.py index c72208f5c5..729c876841 100644 --- a/plugins/modules/dnsimple.py +++ b/plugins/modules/dnsimple.py @@ -25,8 +25,8 @@ attributes: options: account_email: description: - - Account email. If omitted, the environment variables E(DNSIMPLE_EMAIL) and E(DNSIMPLE_API_TOKEN) will be looked for. - - "If those variables are not found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)." + - Account email. If omitted, the environment variables E(DNSIMPLE_EMAIL) and E(DNSIMPLE_API_TOKEN) are looked for. + - 'If those variables are not found, a C(.dnsimple) file is looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started).' - C(.dnsimple) config files are only supported in dnsimple-python<2.0.0. type: str account_api_token: @@ -36,12 +36,12 @@ options: domain: description: - Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNSimple. - - If omitted, a list of domains will be returned. - - If domain is present but the domain does not exist, it will be created. + - If omitted, a list of domains is returned. + - If domain is present but the domain does not exist, it is created. type: str record: description: - - Record to add, if blank a record for the domain will be created, supports the wildcard (*). + - Record to add, if blank a record for the domain is created, supports the wildcard (*). type: str record_ids: description: @@ -51,8 +51,23 @@ options: type: description: - The type of DNS record to create. - choices: ['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', - 'CAA'] + choices: + - A + - ALIAS + - CNAME + - MX + - SPF + - URL + - TXT + - NS + - SRV + - NAPTR + - PTR + - AAAA + - SSHFP + - HINFO + - POOL + - CAA type: str ttl: description: @@ -151,7 +166,7 @@ EXAMPLES = r""" delegate_to: localhost """ -RETURN = r"""# """ +RETURN = r"""#""" import traceback import re diff --git a/plugins/modules/dnsimple_info.py b/plugins/modules/dnsimple_info.py index c508525fac..78b4ceae25 100644 --- a/plugins/modules/dnsimple_info.py +++ b/plugins/modules/dnsimple_info.py @@ -26,8 +26,8 @@ options: name: description: - The domain name to retrieve info from. - - Will return all associated records for this domain if specified. - - If not specified, will return all domains associated with the account ID. + - Returns all associated records for this domain if specified. + - If not specified, returns all domains associated with the account ID. type: str account_id: @@ -43,7 +43,7 @@ options: record: description: - The record to find. - - If specified, only this record will be returned instead of all records. + - If specified, only this record is returned instead of all records. required: false type: str diff --git a/plugins/modules/dnsmadeeasy.py b/plugins/modules/dnsmadeeasy.py index b04b58787e..ec17880af7 100644 --- a/plugins/modules/dnsmadeeasy.py +++ b/plugins/modules/dnsmadeeasy.py @@ -37,8 +37,8 @@ options: domain: description: - - Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNS Made Easy - (for example V(839989)) for faster resolution. + - Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNS Made + Easy (for example V(839989)) for faster resolution. required: true type: str @@ -50,7 +50,7 @@ options: record_name: description: - - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned + - Record name to get/create/delete/update. If O(record_name) is not specified; all records for the domain are returned in "result" regardless of the state argument. type: str @@ -64,8 +64,8 @@ options: description: - 'Record value. HTTPRED: , MX: , NS: , PTR: , SRV: , TXT: ".' - - If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words, - this module can be used to fetch a record's current id, type, and ttl). + - If O(record_value) is not specified; no changes are made and the record is returned in RV(ignore:result) (in other + words, this module can be used to fetch a record's current ID, type, and TTL). type: str record_ttl: @@ -83,8 +83,8 @@ options: validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using - self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool default: true @@ -128,7 +128,7 @@ options: contactList: description: - - Name or id of the contact list that the monitor will notify. + - Name or ID of the contact list that the monitor notifies. - The default V('') means the Account Owner. type: str @@ -195,7 +195,7 @@ notes: - Only A records can have a O(monitor) or O(failover). - To add failover, the O(failover), O(autoFailover), O(port), O(protocol), O(ip1), and O(ip2) options are required. - To add monitor, the O(monitor), O(port), O(protocol), O(maxEmails), O(systemDescription), and O(ip1) options are required. - - The monitor and the failover will share O(port), O(protocol), and O(ip1) options. + - The options O(monitor) and O(failover) share O(port), O(protocol), and O(ip1) options. requirements: [hashlib, hmac] author: "Brice Burgess (@briceburg)" """ @@ -553,28 +553,28 @@ def main(): domain=dict(required=True), sandbox=dict(default=False, type='bool'), state=dict(required=True, choices=['present', 'absent']), - record_name=dict(required=False), - record_type=dict(required=False, choices=[ + record_name=dict(), + record_type=dict(choices=[ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), - record_value=dict(required=False), - record_ttl=dict(required=False, default=1800, type='int'), + record_value=dict(), + record_ttl=dict(default=1800, type='int'), monitor=dict(default=False, type='bool'), systemDescription=dict(default=''), maxEmails=dict(default=1, type='int'), protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']), port=dict(default=80, type='int'), sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']), - contactList=dict(default=None), - httpFqdn=dict(required=False), - httpFile=dict(required=False), - httpQueryString=dict(required=False), + contactList=dict(), + httpFqdn=dict(), + httpFile=dict(), + httpQueryString=dict(), failover=dict(default=False, type='bool'), autoFailover=dict(default=False, type='bool'), - ip1=dict(required=False), - ip2=dict(required=False), - ip3=dict(required=False), - ip4=dict(required=False), - ip5=dict(required=False), + ip1=dict(), + ip2=dict(), + ip3=dict(), + ip4=dict(), + ip5=dict(), validate_certs=dict(default=True, type='bool'), ), required_together=[ diff --git a/plugins/modules/dpkg_divert.py b/plugins/modules/dpkg_divert.py index 5783e2b314..6ef1f394e4 100644 --- a/plugins/modules/dpkg_divert.py +++ b/plugins/modules/dpkg_divert.py @@ -17,8 +17,8 @@ author: - quidame (@quidame) description: - A diversion is for C(dpkg) the knowledge that only a given package (or the local administrator) is allowed to install - a file at a given location. Other packages shipping their own version of this file will be forced to O(divert) it, that - is to install it at another location. It allows one to keep changes in a file provided by a debian package by preventing + a file at a given location. Other packages shipping their own version of this file are forced to O(divert) it, that is + to install it at another location. It allows one to keep changes in a file provided by a debian package by preventing it being overwritten on package upgrade. - This module manages diversions of debian packages files using the C(dpkg-divert) commandline tool. It can either create or remove a diversion for a given file, but also update an existing diversion to modify its O(holder) and/or its O(divert) @@ -33,8 +33,8 @@ attributes: options: path: description: - - The original and absolute path of the file to be diverted or undiverted. This path is unique, in other words it is not possible - to get two diversions for the same O(path). + - The original and absolute path of the file to be diverted or undiverted. This path is unique, in other words it is + not possible to get two diversions for the same O(path). required: true type: path state: @@ -54,7 +54,7 @@ options: type: str divert: description: - - The location where the versions of file will be diverted. + - The location where the versions of file are diverted. - Default is to add suffix C(.distrib) to the file path. - This parameter is ignored when O(state=absent). type: path @@ -69,8 +69,8 @@ options: default: false force: description: - - When O(rename=true) and O(force=true), renaming is performed even if the target of the renaming exists, in other words the existing - contents of the file at this location will be lost. + - When O(rename=true) and O(force=true), renaming is performed even if the target of the renaming exists, in other words + the existing contents of the file at this location are lost. - This parameter is ignored when O(rename=false). type: bool default: false @@ -166,11 +166,11 @@ def main(): module = AnsibleModule( argument_spec=dict( path=dict(required=True, type='path'), - state=dict(required=False, type='str', default='present', choices=['absent', 'present']), - holder=dict(required=False, type='str'), - divert=dict(required=False, type='path'), - rename=dict(required=False, type='bool', default=False), - force=dict(required=False, type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + holder=dict(type='str'), + divert=dict(type='path'), + rename=dict(type='bool', default=False), + force=dict(type='bool', default=False), ), supports_check_mode=True, ) diff --git a/plugins/modules/easy_install.py b/plugins/modules/easy_install.py index 734f0dc4df..8d0a39333e 100644 --- a/plugins/modules/easy_install.py +++ b/plugins/modules/easy_install.py @@ -33,8 +33,8 @@ options: - An optional O(virtualenv) directory path to install into. If the O(virtualenv) does not exist, it is created automatically. virtualenv_site_packages: description: - - Whether the virtual environment will inherit packages from the global site-packages directory. Note that if this setting - is changed on an already existing virtual environment it will not have any effect, the environment must be deleted + - Whether the virtual environment inherits packages from the global site-packages directory. Note that this setting + has no effect on an already existing virtual environment, so if you want to change it, the environment must be deleted and newly created. type: bool default: false @@ -74,6 +74,12 @@ EXAMPLES = r""" community.general.easy_install: name: bottle virtualenv: /webapps/myapp/venv + +- name: Install a python package using pyvenv as the virtualenv tool + community.general.easy_install: + name: package_name + virtualenv: /opt/myenv + virtualenv_command: pyvenv """ import os @@ -127,14 +133,13 @@ def _get_easy_install(module, env=None, executable=None): def main(): arg_spec = dict( name=dict(required=True), - state=dict(required=False, - default='present', + state=dict(default='present', choices=['present', 'latest'], type='str'), - virtualenv=dict(default=None, required=False), + virtualenv=dict(), virtualenv_site_packages=dict(default=False, type='bool'), - virtualenv_command=dict(default='virtualenv', required=False), - executable=dict(default='easy_install', required=False), + virtualenv_command=dict(default='virtualenv'), + executable=dict(default='easy_install'), ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) diff --git a/plugins/modules/elasticsearch_plugin.py b/plugins/modules/elasticsearch_plugin.py index f7b73b8323..8552b55ccd 100644 --- a/plugins/modules/elasticsearch_plugin.py +++ b/plugins/modules/elasticsearch_plugin.py @@ -66,7 +66,7 @@ options: type: bool plugin_bin: description: - - Location of the plugin binary. If this file is not found, the default plugin binaries will be used. + - Location of the plugin binary. If this file is not found, the default plugin binaries are used. type: path plugin_dir: description: @@ -83,7 +83,7 @@ options: type: str version: description: - - Version of the plugin to be installed. If plugin exists with previous version, it will NOT be updated. + - Version of the plugin to be installed. If plugin exists with previous version, it is NOT updated. type: str """ @@ -163,33 +163,38 @@ def parse_error(string): def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]] + cmd = [plugin_bin, PACKAGE_STATE_MAP["present"]] is_old_command = (os.path.basename(plugin_bin) == 'plugin') # Timeout and version are only valid for plugin, not elasticsearch-plugin if is_old_command: if timeout: - cmd_args.append("--timeout %s" % timeout) + cmd.append("--timeout") + cmd.append(timeout) if version: plugin_name = plugin_name + '/' + version - cmd_args[2] = plugin_name + cmd[2] = plugin_name if proxy_host and proxy_port: - cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) + java_opts = ["-Dhttp.proxyHost=%s" % proxy_host, + "-Dhttp.proxyPort=%s" % proxy_port, + "-Dhttps.proxyHost=%s" % proxy_host, + "-Dhttps.proxyPort=%s" % proxy_port] + module.run_command_environ_update = dict(CLI_JAVA_OPTS=" ".join(java_opts), # Elasticsearch 8.x + ES_JAVA_OPTS=" ".join(java_opts)) # Older Elasticsearch versions # Legacy ES 1.x if url: - cmd_args.append("--url %s" % url) + cmd.append("--url") + cmd.append(url) if force: - cmd_args.append("--batch") + cmd.append("--batch") if src: - cmd_args.append(src) + cmd.append(src) else: - cmd_args.append(plugin_name) - - cmd = " ".join(cmd_args) + cmd.append(plugin_name) if module.check_mode: rc, out, err = 0, "check mode", "" @@ -204,9 +209,7 @@ def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_hos def remove_plugin(module, plugin_bin, plugin_name): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] - - cmd = " ".join(cmd_args) + cmd = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] if module.check_mode: rc, out, err = 0, "check mode", "" @@ -256,15 +259,15 @@ def main(): argument_spec=dict( name=dict(required=True), state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), - src=dict(default=None), - url=dict(default=None), + src=dict(), + url=dict(), timeout=dict(default="1m"), force=dict(type='bool', default=False), plugin_bin=dict(type="path"), plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"), - proxy_host=dict(default=None), - proxy_port=dict(default=None), - version=dict(default=None) + proxy_host=dict(), + proxy_port=dict(), + version=dict() ), mutually_exclusive=[("src", "url")], supports_check_mode=True diff --git a/plugins/modules/emc_vnx_sg_member.py b/plugins/modules/emc_vnx_sg_member.py index cbdb0f4442..a0b1e920e2 100644 --- a/plugins/modules/emc_vnx_sg_member.py +++ b/plugins/modules/emc_vnx_sg_member.py @@ -38,7 +38,7 @@ options: type: str lunid: description: - - Lun id to be added. + - LUN ID to be added. required: true type: int state: @@ -77,7 +77,7 @@ EXAMPLES = r""" RETURN = r""" hluid: - description: LUNID that hosts attached to the storage group will see. + description: LUNID visible to hosts attached to the storage group. type: int returned: success """ diff --git a/plugins/modules/facter.py b/plugins/modules/facter.py index ce9320282d..20be3d4a4d 100644 --- a/plugins/modules/facter.py +++ b/plugins/modules/facter.py @@ -62,7 +62,7 @@ from ansible.module_utils.basic import AnsibleModule def main(): module = AnsibleModule( argument_spec=dict( - arguments=dict(required=False, type='list', elements='str') + arguments=dict(type='list', elements='str') ) ) diff --git a/plugins/modules/filesystem.py b/plugins/modules/filesystem.py index 2edc8be5ab..f14458c337 100644 --- a/plugins/modules/filesystem.py +++ b/plugins/modules/filesystem.py @@ -64,10 +64,10 @@ options: description: - If V(true), if the block device and filesystem size differ, grow the filesystem into the space. - Supported for C(bcachefs), C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) - filesystems. Attempts to resize other filesystem types will fail. - - XFS Will only grow if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations, + filesystems. Attempts to resize other filesystem types fail. + - XFS only grows if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations, so resizing of XFS is not supported on FreeBSD systems. - - VFAT will likely fail if C(fatresize < 1.04). + - VFAT is likely to fail if C(fatresize < 1.04). - Mutually exclusive with O(uuid). type: bool default: false @@ -82,7 +82,7 @@ options: - See xfs_admin(8) (C(xfs)), tune2fs(8) (C(ext2), C(ext3), C(ext4), C(ext4dev)) for possible values. - For O(fstype=lvm) the value is ignored, it resets the PV UUID if set. - Supported for O(fstype) being one of C(bcachefs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(lvm), or C(xfs). - - This is B(not idempotent). Specifying this option will always result in a change. + - This is B(not idempotent). Specifying this option always results in a change. - Mutually exclusive with O(resizefs). type: str version_added: 7.1.0 @@ -633,7 +633,7 @@ def main(): opts=dict(type='str'), force=dict(type='bool', default=False), resizefs=dict(type='bool', default=False), - uuid=dict(type='str', required=False), + uuid=dict(type='str'), ), required_if=[ ('state', 'present', ['fstype']) diff --git a/plugins/modules/flatpak.py b/plugins/modules/flatpak.py index 13898c3349..98de9de3ed 100644 --- a/plugins/modules/flatpak.py +++ b/plugins/modules/flatpak.py @@ -27,7 +27,7 @@ attributes: check_mode: support: partial details: - - If O(state=latest), the module will always return C(changed=true). + - If O(state=latest), the module always returns RV(ignore:changed=true). diff_mode: support: none options: @@ -53,7 +53,7 @@ options: - When supplying a reverse DNS name, you can use the O(remote) option to specify on what remote to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit). - When used with O(state=absent) or O(state=latest), it is recommended to specify the name in the reverse DNS format. - - When supplying a URL with O(state=absent) or O(state=latest), the module will try to match the installed flatpak based + - When supplying a URL with O(state=absent) or O(state=latest), the module tries to match the installed flatpak based on the name of the flatpakref to remove or update it. However, there is no guarantee that the names of the flatpakref file and the reverse DNS name of the installed flatpak do match. type: list @@ -107,6 +107,12 @@ EXAMPLES = r""" state: present remote: gnome +- name: Install GIMP using custom flatpak binary path + community.general.flatpak: + name: org.gimp.GIMP + state: present + executable: /usr/local/bin/flatpak-dev + - name: Install multiple packages community.general.flatpak: name: @@ -165,26 +171,6 @@ command: returned: When a flatpak command has been executed type: str sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator" -msg: - description: Module error message. - returned: failure - type: str - sample: "Executable '/usr/local/bin/flatpak' was not found on the system." -rc: - description: Return code from flatpak binary. - returned: When a flatpak command has been executed - type: int - sample: 0 -stderr: - description: Error output from flatpak binary. - returned: When a flatpak command has been executed - type: str - sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE" -stdout: - description: Output from flatpak binary. - returned: When a flatpak command has been executed - type: str - sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n" """ from ansible.module_utils.six.moves.urllib.parse import urlparse diff --git a/plugins/modules/flatpak_remote.py b/plugins/modules/flatpak_remote.py index ba202d3033..641ce930d0 100644 --- a/plugins/modules/flatpak_remote.py +++ b/plugins/modules/flatpak_remote.py @@ -17,7 +17,7 @@ description: - Allows users to add or remove flatpak remotes. - The flatpak remotes concept is comparable to what is called repositories in other packaging formats. - Currently, remote addition is only supported using C(flatpakrepo) file URLs. - - Existing remotes will not be updated. + - Existing remotes are not updated. - See the M(community.general.flatpak) module for managing flatpaks. author: - John Kwiatkoski (@JayKayy) @@ -56,8 +56,8 @@ options: name: description: - The desired name for the flatpak remote to be registered under on the managed host. - - When used with O(state=present), the remote will be added to the managed host under the specified O(name). - - When used with O(state=absent) the remote with that name will be removed. + - When used with O(state=present), the remote is added to the managed host under the specified O(name). + - When used with O(state=absent) the remote with that name is removed. type: str required: true state: @@ -112,26 +112,6 @@ command: returned: When a flatpak command has been executed type: str sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo" -msg: - description: Module error message. - returned: failure - type: str - sample: "Executable '/usr/local/bin/flatpak' was not found on the system." -rc: - description: Return code from flatpak binary. - returned: When a flatpak command has been executed - type: int - sample: 0 -stderr: - description: Error output from flatpak binary. - returned: When a flatpak command has been executed - type: str - sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n" -stdout: - description: Output from flatpak binary. - returned: When a flatpak command has been executed - type: str - sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n" """ from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py index 55c624420d..ef5b4381c2 100644 --- a/plugins/modules/gconftool2.py +++ b/plugins/modules/gconftool2.py @@ -15,8 +15,8 @@ author: - Kenneth D. Evensen (@kevensen) short_description: Edit GNOME Configurations description: - - This module allows for the manipulation of GNOME 2 Configuration using C(gconftool-2). Please see the gconftool-2(1) man pages - for more details. + - This module allows for the manipulation of GNOME 2 Configuration using C(gconftool-2). Please see the gconftool-2(1) man + pages for more details. seealso: - name: C(gconftool-2) command manual page description: Manual page for the command. @@ -128,7 +128,6 @@ class GConftool(StateModuleHelper): ], supports_check_mode=True, ) - use_old_vardict = False def __init_module__(self): self.runner = gconftool2_runner(self.module, check_rc=True) diff --git a/plugins/modules/gconftool2_info.py b/plugins/modules/gconftool2_info.py index 29965be46b..3f6aa7b2e3 100644 --- a/plugins/modules/gconftool2_info.py +++ b/plugins/modules/gconftool2_info.py @@ -67,7 +67,6 @@ class GConftoolInfo(ModuleHelper): ), supports_check_mode=True, ) - use_old_vardict = False def __init_module__(self): self.runner = gconftool2_runner(self.module, check_rc=True) diff --git a/plugins/modules/gem.py b/plugins/modules/gem.py index c01433cb90..1ea9c68a94 100644 --- a/plugins/modules/gem.py +++ b/plugins/modules/gem.py @@ -48,7 +48,7 @@ options: repository: type: str description: - - The repository from which the gem will be installed. + - The repository from which the gem is installed. required: false aliases: [source] user_install: @@ -65,7 +65,7 @@ options: install_dir: type: path description: - - Install the gems into a specific directory. These gems will be independent from the global installed ones. Specifying + - Install the gems into a specific directory. These gems are independent from the global installed ones. Specifying this requires user_install to be false. required: false bindir: @@ -295,22 +295,22 @@ def main(): module = AnsibleModule( argument_spec=dict( - executable=dict(required=False, type='path'), - gem_source=dict(required=False, type='path'), - include_dependencies=dict(required=False, default=True, type='bool'), + executable=dict(type='path'), + gem_source=dict(type='path'), + include_dependencies=dict(default=True, type='bool'), name=dict(required=True, type='str'), - repository=dict(required=False, aliases=['source'], type='str'), - state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'), - user_install=dict(required=False, default=True, type='bool'), - install_dir=dict(required=False, type='path'), + repository=dict(aliases=['source'], type='str'), + state=dict(default='present', choices=['present', 'absent', 'latest'], type='str'), + user_install=dict(default=True, type='bool'), + install_dir=dict(type='path'), bindir=dict(type='path'), norc=dict(type='bool', default=True), - pre_release=dict(required=False, default=False, type='bool'), - include_doc=dict(required=False, default=False, type='bool'), - env_shebang=dict(required=False, default=False, type='bool'), - version=dict(required=False, type='str'), - build_flags=dict(required=False, type='str'), - force=dict(required=False, default=False, type='bool'), + pre_release=dict(default=False, type='bool'), + include_doc=dict(default=False, type='bool'), + env_shebang=dict(default=False, type='bool'), + version=dict(type='str'), + build_flags=dict(type='str'), + force=dict(default=False, type='bool'), ), supports_check_mode=True, mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']], diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py index 216b7faae0..b8864ea3e3 100644 --- a/plugins/modules/gio_mime.py +++ b/plugins/modules/gio_mime.py @@ -26,12 +26,12 @@ attributes: options: mime_type: description: - - MIME type for which a default handler will be set. + - MIME type for which a default handler is set. type: str required: true handler: description: - - Default handler will be set for the MIME type. + - Default handler set for the MIME type. type: str required: true notes: @@ -61,18 +61,6 @@ handler: returned: success type: str sample: google-chrome.desktop -stdout: - description: - - The output of the C(gio) command. - returned: success - type: str - sample: Set google-chrome.desktop as the default for x-scheme-handler/https -stderr: - description: - - The error output of the C(gio) command. - returned: failure - type: str - sample: 'gio: Failed to load info for handler "never-existed.desktop"' version: description: Version of gio. type: str @@ -94,7 +82,6 @@ class GioMime(ModuleHelper): ), supports_check_mode=True, ) - use_old_vardict = False def __init_module__(self): self.runner = gio_mime_runner(self.module, check_rc=True) diff --git a/plugins/modules/git_config.py b/plugins/modules/git_config.py index 6a6eff0be2..93ca6265b9 100644 --- a/plugins/modules/git_config.py +++ b/plugins/modules/git_config.py @@ -31,17 +31,11 @@ attributes: diff_mode: support: none options: - list_all: - description: - - List all settings (optionally limited to a given O(scope)). - - This option is B(deprecated) and will be removed from community.general 11.0.0. Please use M(community.general.git_config_info) - instead. - type: bool - default: false name: description: - - The name of the setting. If no value is supplied, the value will be read from the config if it has been set. + - The name of the setting. type: str + required: true repo: description: - Path to a git repository for reading and writing values from a specific repo. @@ -57,7 +51,7 @@ options: - This is required when setting config values. - If this is set to V(local), you must also specify the O(repo) parameter. - If this is set to V(file), you must also specify the O(file) parameter. - - It defaults to system only when not using O(list_all=true). + - It defaults to system. choices: ["file", "local", "global", "system"] type: str state: @@ -70,7 +64,7 @@ options: value: description: - When specifying the name of a single setting, supply a value to set that setting to the given value. - - From community.general 11.0.0 on, O(value) will be required if O(state=present). To read values, use the M(community.general.git_config_info) + - From community.general 11.0.0 on, O(value) is required if O(state=present). To read values, use the M(community.general.git_config_info) module instead. type: str add_mode: @@ -144,21 +138,6 @@ EXAMPLES = r""" """ RETURN = r""" -config_value: - description: When O(list_all=false) and value is not set, a string containing the value of the setting in name. - returned: success - type: str - sample: "vim" - -config_values: - description: When O(list_all=true), a dict containing key/value pairs of multiple configuration settings. - returned: success - type: dict - sample: - core.editor: "vim" - color.ui: "auto" - alias.diffc: "diff --cached" - alias.remotev: "remote -v" """ from ansible.module_utils.basic import AnsibleModule @@ -167,21 +146,19 @@ from ansible.module_utils.basic import AnsibleModule def main(): module = AnsibleModule( argument_spec=dict( - list_all=dict(required=False, type='bool', default=False, removed_in_version='11.0.0', removed_from_collection='community.general'), - name=dict(type='str'), + name=dict(type='str', required=True), repo=dict(type='path'), file=dict(type='path'), - add_mode=dict(required=False, type='str', default='replace-all', choices=['add', 'replace-all']), - scope=dict(required=False, type='str', choices=['file', 'local', 'global', 'system']), - state=dict(required=False, type='str', default='present', choices=['present', 'absent']), - value=dict(required=False), + add_mode=dict(type='str', default='replace-all', choices=['add', 'replace-all']), + scope=dict(type='str', choices=['file', 'local', 'global', 'system']), + state=dict(type='str', default='present', choices=['present', 'absent']), + value=dict(), ), - mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']], required_if=[ ('scope', 'local', ['repo']), - ('scope', 'file', ['file']) + ('scope', 'file', ['file']), + ('state', 'present', ['value']), ], - required_one_of=[['list_all', 'name']], supports_check_mode=True, ) git_path = module.get_bin_path('git', True) @@ -196,13 +173,8 @@ def main(): new_value = params['value'] or '' add_mode = params['add_mode'] - if not unset and not new_value and not params['list_all']: - module.deprecate( - 'If state=present, a value must be specified from community.general 11.0.0 on.' - ' To read a config value, use the community.general.git_config_info module instead.', - version='11.0.0', - collection_name='community.general', - ) + if not unset and not new_value: + module.fail_json(msg="If state=present, a value must be specified. Use the community.general.git_config_info module to read a config value.") scope = determine_scope(params) cwd = determine_cwd(scope, params) @@ -217,33 +189,18 @@ def main(): list_args = list(base_args) - if params['list_all']: - list_args.append('-l') - - if name: - list_args.append("--get-all") - list_args.append(name) + list_args.append("--get-all") + list_args.append(name) (rc, out, err) = module.run_command(list_args, cwd=cwd, expand_user_and_vars=False) - if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err: - # This just means nothing has been set at the given scope - module.exit_json(changed=False, msg='', config_values={}) - elif rc >= 2: + if rc >= 2: # If the return code is 1, it just means the option hasn't been set yet, which is fine. module.fail_json(rc=rc, msg=err, cmd=' '.join(list_args)) old_values = out.rstrip().splitlines() - if params['list_all']: - config_values = {} - for value in old_values: - k, v = value.split('=', 1) - config_values[k] = v - module.exit_json(changed=False, msg='', config_values=config_values) - elif not new_value and not unset: - module.exit_json(changed=False, msg='', config_value=old_values[0] if old_values else '') - elif unset and not out: + if unset and not out: module.exit_json(changed=False, msg='no setting to unset') elif new_value in old_values and (len(old_values) == 1 or add_mode == "add") and not unset: module.exit_json(changed=False, msg="") @@ -286,30 +243,22 @@ def main(): def determine_scope(params): if params['scope']: return params['scope'] - elif params['list_all']: - return "" - else: - return 'system' + return 'system' def build_diff_value(value): if not value: return "\n" - elif len(value) == 1: + if len(value) == 1: return value[0] + "\n" - else: - return value + return value def determine_cwd(scope, params): if scope == 'local': return params['repo'] - elif params['list_all'] and params['repo']: - # Include local settings from a specific repo when listing all available settings - return params['repo'] - else: - # Run from root directory to avoid accidentally picking up any local config settings - return "/" + # Run from root directory to avoid accidentally picking up any local config settings + return "/" if __name__ == '__main__': diff --git a/plugins/modules/git_config_info.py b/plugins/modules/git_config_info.py index c8152cfa42..29922382de 100644 --- a/plugins/modules/git_config_info.py +++ b/plugins/modules/git_config_info.py @@ -26,7 +26,7 @@ options: name: description: - The name of the setting to read. - - If not provided, all settings will be returned as RV(config_values). + - If not provided, all settings are returned as RV(config_values). type: str path: description: @@ -94,8 +94,8 @@ config_values: description: - This is a dictionary mapping a git configuration setting to a list of its values. - When O(name) is not set, all configuration settings are returned here. - - When O(name) is set, only the setting specified in O(name) is returned here. If that setting is not set, the key will - still be present, and its value will be an empty list. + - When O(name) is set, only the setting specified in O(name) is returned here. If that setting is not set, the key is + still present, and its value is an empty list. returned: success type: dict sample: @@ -113,7 +113,7 @@ def main(): argument_spec=dict( name=dict(type="str"), path=dict(type="path"), - scope=dict(required=False, type="str", default="system", choices=["global", "system", "local", "file"]), + scope=dict(type="str", default="system", choices=["global", "system", "local", "file"]), ), required_if=[ ("scope", "local", ["path"]), diff --git a/plugins/modules/github_deploy_key.py b/plugins/modules/github_deploy_key.py index 509a67c491..4ec7fbb769 100644 --- a/plugins/modules/github_deploy_key.py +++ b/plugins/modules/github_deploy_key.py @@ -57,8 +57,8 @@ options: type: str read_only: description: - - If V(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to - read and write. + - If V(true), the deploy key is only able to read repository contents. Otherwise, the deploy key is able to read and + write. type: bool default: true state: @@ -259,7 +259,12 @@ class GithubDeployKey(object): key_id = response_body["id"] self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id) elif status_code == 422: - self.module.exit_json(changed=False, msg="Deploy key already exists") + # there might be multiple reasons for a 422 + # so we must check if the reason is that the key already exists + if self.get_existing_key(): + self.module.exit_json(changed=False, msg="Deploy key already exists") + else: + self.handle_error(method="POST", info=info) else: self.handle_error(method="POST", info=info) @@ -296,18 +301,18 @@ class GithubDeployKey(object): def main(): module = AnsibleModule( argument_spec=dict( - github_url=dict(required=False, type='str', default="https://api.github.com"), + github_url=dict(type='str', default="https://api.github.com"), owner=dict(required=True, type='str', aliases=['account', 'organization']), repo=dict(required=True, type='str', aliases=['repository']), name=dict(required=True, type='str', aliases=['title', 'label']), key=dict(required=True, type='str', no_log=False), - read_only=dict(required=False, type='bool', default=True), + read_only=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), - force=dict(required=False, type='bool', default=False), - username=dict(required=False, type='str'), - password=dict(required=False, type='str', no_log=True), - otp=dict(required=False, type='int', no_log=True), - token=dict(required=False, type='str', no_log=True) + force=dict(type='bool', default=False), + username=dict(type='str'), + password=dict(type='str', no_log=True), + otp=dict(type='int', no_log=True), + token=dict(type='str', no_log=True) ), mutually_exclusive=[ ['password', 'token'] diff --git a/plugins/modules/github_key.py b/plugins/modules/github_key.py index f3d5863d54..80b0a6bf70 100644 --- a/plugins/modules/github_key.py +++ b/plugins/modules/github_key.py @@ -14,6 +14,7 @@ module: github_key short_description: Manage GitHub access keys description: - Creates, removes, or updates GitHub access keys. + - Works with both GitHub.com and GitHub Enterprise Server installations. extends_documentation_fragment: - community.general.attributes attributes: @@ -44,10 +45,16 @@ options: type: str force: description: - - The default is V(true), which will replace the existing remote key if it is different than O(pubkey). If V(false), - the key will only be set if no key with the given O(name) exists. + - The default is V(true), which replaces the existing remote key if it is different than O(pubkey). If V(false), the + key is only set if no key with the given O(name) exists. type: bool default: true + api_url: + description: + - URL to the GitHub API if not using github.com but your own GitHub Enterprise instance. + type: str + default: 'https://api.github.com' + version_added: "11.0.0" author: Robert Estelle (@erydo) """ @@ -57,20 +64,42 @@ deleted_keys: description: An array of key objects that were deleted. Only present on state=absent. type: list returned: When state=absent - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', - 'read_only': false}] + sample: + [ + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } + ] matching_keys: description: An array of keys matching the specified name. Only present on state=present. type: list returned: When state=present - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', - 'read_only': false}] + sample: + [ + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } + ] key: description: Metadata about the key just created. Only present on state=present. type: dict returned: success - sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', - 'read_only': false} + sample: + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } """ EXAMPLES = r""" @@ -91,6 +120,14 @@ EXAMPLES = r""" name: Access Key for Some Machine token: '{{ github_access_token }}' pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}" + +# GitHub Enterprise Server usage +- name: Authorize key with GitHub Enterprise + community.general.github_key: + name: Access Key for Some Machine + token: '{{ github_enterprise_token }}' + pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}" + api_url: 'https://github.company.com/api/v3' """ import datetime @@ -105,9 +142,6 @@ from ansible_collections.community.general.plugins.module_utils.datetime import ) -API_BASE = 'https://api.github.com' - - class GitHubResponse(object): def __init__(self, response, info): self.content = response.read() @@ -127,9 +161,10 @@ class GitHubResponse(object): class GitHubSession(object): - def __init__(self, module, token): + def __init__(self, module, token, api_url): self.module = module self.token = token + self.api_url = api_url.rstrip('/') def request(self, method, url, data=None): headers = { @@ -147,7 +182,7 @@ class GitHubSession(object): def get_all_keys(session): - url = API_BASE + '/user/keys' + url = session.api_url + '/user/keys' result = [] while url: r = session.request('GET', url) @@ -171,7 +206,7 @@ def create_key(session, name, pubkey, check_mode): else: return session.request( 'POST', - API_BASE + '/user/keys', + session.api_url + '/user/keys', data=json.dumps({'title': name, 'key': pubkey})).json() @@ -180,7 +215,7 @@ def delete_keys(session, to_delete, check_mode): return for key in to_delete: - session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"]) + session.request('DELETE', session.api_url + '/user/keys/%s' % key["id"]) def ensure_key_absent(session, name, check_mode): @@ -228,6 +263,7 @@ def main(): 'pubkey': {}, 'state': {'choices': ['present', 'absent'], 'default': 'present'}, 'force': {'default': True, 'type': 'bool'}, + 'api_url': {'default': 'https://api.github.com', 'type': 'str'}, } module = AnsibleModule( argument_spec=argument_spec, @@ -239,6 +275,7 @@ def main(): state = module.params['state'] force = module.params['force'] pubkey = module.params.get('pubkey') + api_url = module.params.get('api_url') if pubkey: pubkey_parts = pubkey.split(' ') @@ -248,7 +285,7 @@ def main(): elif state == 'present': module.fail_json(msg='"pubkey" is required when state=present') - session = GitHubSession(module, token) + session = GitHubSession(module, token, api_url) if state == 'present': result = ensure_key_present(module, session, name, pubkey, force=force, check_mode=module.check_mode) diff --git a/plugins/modules/github_release.py b/plugins/modules/github_release.py index 1376bf4f3d..eae2081701 100644 --- a/plugins/modules/github_release.py +++ b/plugins/modules/github_release.py @@ -182,13 +182,29 @@ def main(): else: gh_obj = github3.GitHub() - # test if we're actually logged in - if password or login_token: + # GitHub's token formats: + # - ghp_ - Personal access token (classic) + # - github_pat_ - Fine-grained personal access token + # - gho_ - OAuth access token + # - ghu_ - User access token for a GitHub App + # - ghs_ - Installation access token for a GitHub App + # - ghr_ - Refresh token for a GitHub App + # + # References: + # https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-authentication-to-github#githubs-token-formats + # + # Test if we're actually logged in, but skip this check for some token prefixes + SKIPPED_TOKEN_PREFIXES = ['ghs_'] + if password or (login_token and not any(login_token.startswith(prefix) for prefix in SKIPPED_TOKEN_PREFIXES)): gh_obj.me() except github3.exceptions.AuthenticationFailed as e: module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e), details="Please check username and password or token " "for repository %s" % repo) + except github3.exceptions.GitHubError as e: + module.fail_json(msg='GitHub API error: %s' % to_native(e), + details="Please check username and password or token " + "for repository %s" % repo) repository = gh_obj.repository(user, repo) diff --git a/plugins/modules/github_repo.py b/plugins/modules/github_repo.py index 2d2c6f8588..abaddb3c28 100644 --- a/plugins/modules/github_repo.py +++ b/plugins/modules/github_repo.py @@ -72,7 +72,7 @@ options: organization: description: - Organization for the repository. - - When O(state=present), the repository will be created in the current user profile. + - When O(state=present), the repository is created in the current user profile. type: str required: false api_url: @@ -246,12 +246,12 @@ def main(): password=dict(type='str', no_log=True), access_token=dict(type='str', no_log=True), name=dict(type='str', required=True), - state=dict(type='str', required=False, default="present", + state=dict(type='str', default="present", choices=["present", "absent"]), - organization=dict(type='str', required=False, default=None), + organization=dict(type='str', ), private=dict(type='bool'), description=dict(type='str'), - api_url=dict(type='str', required=False, default='https://api.github.com'), + api_url=dict(type='str', default='https://api.github.com'), force_defaults=dict(type='bool', default=True), ) module = AnsibleModule( diff --git a/plugins/modules/github_webhook.py b/plugins/modules/github_webhook.py index 8608c90bc9..1ae2e71aaa 100644 --- a/plugins/modules/github_webhook.py +++ b/plugins/modules/github_webhook.py @@ -32,7 +32,7 @@ options: - repo url: description: - - URL to which payloads will be delivered. + - URL to which payloads are delivered. type: str required: true content_type: @@ -208,25 +208,16 @@ def main(): argument_spec=dict( repository=dict(type='str', required=True, aliases=['repo']), url=dict(type='str', required=True), - content_type=dict( - type='str', - choices=('json', 'form'), - required=False, - default='form'), - secret=dict(type='str', required=False, no_log=True), - insecure_ssl=dict(type='bool', required=False, default=False), - events=dict(type='list', elements='str', required=False), - active=dict(type='bool', required=False, default=True), - state=dict( - type='str', - required=False, - choices=('absent', 'present'), - default='present'), + content_type=dict(type='str', choices=('json', 'form'), default='form'), + secret=dict(type='str', no_log=True), + insecure_ssl=dict(type='bool', default=False), + events=dict(type='list', elements='str', ), + active=dict(type='bool', default=True), + state=dict(type='str', choices=('absent', 'present'), default='present'), user=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - token=dict(type='str', required=False, no_log=True), - github_url=dict( - type='str', required=False, default="https://api.github.com")), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), + github_url=dict(type='str', default="https://api.github.com")), mutually_exclusive=(('password', 'token'),), required_one_of=(("password", "token"),), required_if=(("state", "present", ("events",)),), diff --git a/plugins/modules/github_webhook_info.py b/plugins/modules/github_webhook_info.py index 440a373f1d..75315c77aa 100644 --- a/plugins/modules/github_webhook_info.py +++ b/plugins/modules/github_webhook_info.py @@ -76,16 +76,17 @@ hooks: type: list elements: dict sample: - - { - "has_shared_secret": true, - "url": "https://jenkins.example.com/ghprbhook/", - "events": ["issue_comment", "pull_request"], - "insecure_ssl": "1", - "content_type": "json", - "active": true, - "id": 6206, - "last_response": {"status": "active", "message": "OK", "code": 200} - } + - has_shared_secret: true + url: https://jenkins.example.com/ghprbhook/ + events: [issue_comment, pull_request] + insecure_ssl: "1" + content_type: json + active: true + id: 6206 + last_response: + status: active + message: OK + code: 200 """ import traceback @@ -123,10 +124,10 @@ def main(): argument_spec=dict( repository=dict(type='str', required=True, aliases=["repo"]), user=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - token=dict(type='str', required=False, no_log=True), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), github_url=dict( - type='str', required=False, default="https://api.github.com")), + type='str', default="https://api.github.com")), mutually_exclusive=(('password', 'token'), ), required_one_of=(("password", "token"), ), supports_check_mode=True) diff --git a/plugins/modules/gitlab_branch.py b/plugins/modules/gitlab_branch.py index b32169ef5a..6ed6e6a0c5 100644 --- a/plugins/modules/gitlab_branch.py +++ b/plugins/modules/gitlab_branch.py @@ -118,7 +118,7 @@ def main(): argument_spec.update( project=dict(type='str', required=True), branch=dict(type='str', required=True), - ref_branch=dict(type='str', required=False), + ref_branch=dict(type='str'), state=dict(type='str', default="present", choices=["absent", "present"]), ) diff --git a/plugins/modules/gitlab_deploy_key.py b/plugins/modules/gitlab_deploy_key.py index 5a2f582357..d116df0714 100644 --- a/plugins/modules/gitlab_deploy_key.py +++ b/plugins/modules/gitlab_deploy_key.py @@ -35,7 +35,7 @@ attributes: options: project: description: - - Id or Full path of project in the form of group/name. + - ID or Full path of project in the form of group/name. required: true type: str title: @@ -55,8 +55,8 @@ options: default: false state: description: - - When V(present) the deploy key added to the project if it does not exist. - - When V(absent) it will be removed from the project if it exists. + - When V(present) the deploy key is added to the project if it does not exist. + - When V(absent) it is removed from the project if it exists. default: present type: str choices: ["present", "absent"] @@ -208,7 +208,7 @@ class GitLabDeployKey(object): ''' def find_deploy_key(self, project, key_title): for deploy_key in project.keys.list(**list_all_kwargs): - if (deploy_key.title == key_title): + if deploy_key.title == key_title: return deploy_key ''' diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py index 711318c6d4..d6105642b8 100644 --- a/plugins/modules/gitlab_group.py +++ b/plugins/modules/gitlab_group.py @@ -13,8 +13,8 @@ DOCUMENTATION = r""" module: gitlab_group short_description: Creates/updates/deletes GitLab Groups description: - - When the group does not exist in GitLab, it will be created. - - When the group does exist and state=absent, the group will be deleted. + - When the group does not exist in GitLab, it is created. + - When the group does exist and O(state=absent), the group is deleted. author: - Werner Dijkerman (@dj-wasabi) - Guillaume Martinez (@Lunik) @@ -97,12 +97,12 @@ options: parent: description: - Allow to create subgroups. - - Id or Full path of parent group in the form of group/name. + - ID or Full path of parent group in the form of group/name. type: str path: description: - - The path of the group you want to create, this will be api_url/group_path. - - If not supplied, the group_name will be used. + - The path of the group you want to create, this is O(api_url)/O(path). + - If not supplied, O(name) is used. type: str prevent_forking_outside_group: description: @@ -129,7 +129,7 @@ options: service_access_tokens_expiration_enforced: description: - Service account token expiration. - - Changes will not affect existing token expiration dates. + - Changes do not affect existing token expiration dates. - Only available for top level groups. type: bool version_added: 9.5.0 diff --git a/plugins/modules/gitlab_group_access_token.py b/plugins/modules/gitlab_group_access_token.py index bcf75e056b..0fe6c14af2 100644 --- a/plugins/modules/gitlab_group_access_token.py +++ b/plugins/modules/gitlab_group_access_token.py @@ -28,7 +28,7 @@ extends_documentation_fragment: - community.general.attributes notes: - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens - will be recreated is controlled by the O(recreate) option, which defaults to V(never). + are recreated or not is controlled by the O(recreate) option, which defaults to V(never). - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards. - Token matching is done by comparing O(name) option. attributes: @@ -55,8 +55,16 @@ options: type: list elements: str aliases: ["scope"] - choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner", - "ai_features", "k8s_proxy"] + choices: + - api + - read_api + - read_registry + - write_registry + - read_repository + - write_repository + - create_runner + - ai_features + - k8s_proxy access_level: description: - Access level of the access token. @@ -71,17 +79,17 @@ options: required: true recreate: description: - - Whether the access token will be recreated if it already exists. - - When V(never) the token will never be recreated. - - When V(always) the token will always be recreated. - - When V(state_change) the token will be recreated if there is a difference between desired state and actual state. + - Whether the access token is recreated if it already exists. + - When V(never) the token is never recreated. + - When V(always) the token is always recreated. + - When V(state_change) the token is recreated if there is a difference between desired state and actual state. type: str choices: ["never", "always", "state_change"] default: never state: description: - - When V(present) the access token will be added to the group if it does not exist. - - When V(absent) it will be removed from the group if it exists. + - When V(present) the access token is added to the group if it does not exist. + - When V(absent) it is removed from the group if it exists. default: present type: str choices: ["present", "absent"] @@ -185,9 +193,9 @@ class GitLabGroupAccessToken(object): @param name of the access token ''' def find_access_token(self, group, name): - access_tokens = group.access_tokens.list(all=True) + access_tokens = [x for x in group.access_tokens.list(all=True) if not getattr(x, 'revoked', False)] for access_token in access_tokens: - if (access_token.name == name): + if access_token.name == name: self.access_token_object = access_token return False return False @@ -237,7 +245,7 @@ def main(): 'create_runner', 'ai_features', 'k8s_proxy']), - access_level=dict(type='str', required=False, default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), + access_level=dict(type='str', default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), expires_at=dict(type='str', required=True), recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change']) )) diff --git a/plugins/modules/gitlab_group_variable.py b/plugins/modules/gitlab_group_variable.py index 926f4fe20a..10ca467bcd 100644 --- a/plugins/modules/gitlab_group_variable.py +++ b/plugins/modules/gitlab_group_variable.py @@ -15,9 +15,9 @@ short_description: Creates, updates, or deletes GitLab groups variables version_added: 1.2.0 description: - Creates a group variable if it does not exist. - - When a group variable does exist, its value will be updated when the values are different. + - When a group variable does exist, its value is updated when the values are different. - Variables which are untouched in the playbook, but are not untouched in the GitLab group, they stay untouched (O(purge=false)) - or will be deleted (O(purge=true)). + or are deleted (O(purge=true)). author: - Florent Madiot (@scodeman) requirements: @@ -52,7 +52,7 @@ options: type: bool vars: description: - - When the list element is a simple key-value pair, masked, raw and protected will be set to false. + - When the list element is a simple key-value pair, masked, raw and protected are set to V(false). - When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can have full control about whether a value should be masked, raw, protected or both. - Support for group variables requires GitLab >= 9.5. @@ -185,22 +185,22 @@ group_variable: description: A list of variables which were created. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] untouched: description: A list of variables which exist. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] removed: description: A list of variables which were deleted. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] updated: description: A list of variables whose values were changed. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] """ from ansible.module_utils.basic import AnsibleModule @@ -372,11 +372,11 @@ def main(): argument_spec.update(auth_argument_spec()) argument_spec.update( group=dict(type='str', required=True), - purge=dict(type='bool', required=False, default=False), - vars=dict(type='dict', required=False, default=dict(), no_log=True), + purge=dict(type='bool', default=False), + vars=dict(type='dict', default=dict(), no_log=True), # please mind whenever changing the variables dict to also change module_utils/gitlab.py's # KNOWN dict in filter_returned_variables or bad evil will happen - variables=dict(type='list', elements='dict', required=False, default=list(), options=dict( + variables=dict(type='list', elements='dict', default=list(), options=dict( name=dict(type='str', required=True), value=dict(type='str', no_log=True), masked=dict(type='bool', default=False), diff --git a/plugins/modules/gitlab_hook.py b/plugins/modules/gitlab_hook.py index 9502982296..87c8aa635a 100644 --- a/plugins/modules/gitlab_hook.py +++ b/plugins/modules/gitlab_hook.py @@ -35,18 +35,18 @@ attributes: options: project: description: - - Id or Full path of the project in the form of group/name. + - ID or Full path of the project in the form of group/name. required: true type: str hook_url: description: - - The url that you want GitLab to post to, this is used as the primary key for updates and deletion. + - The URL that you want GitLab to post to, this is used as the primary key for updates and deletion. required: true type: str state: description: - - When V(present) the hook will be updated to match the input or created if it does not exist. - - When V(absent) hook will be deleted if it exists. + - When V(present) the hook is updated to match the input or created if it does not exist. + - When V(absent) hook is deleted if it exists. default: present type: str choices: ["present", "absent"] @@ -103,15 +103,15 @@ options: version_added: '8.4.0' hook_validate_certs: description: - - Whether GitLab will do SSL verification when triggering the hook. + - Whether GitLab performs SSL verification when triggering the hook. type: bool default: false aliases: [enable_ssl_verification] token: description: - Secret token to validate hook messages at the receiver. - - If this is present it will always result in a change as it cannot be retrieved from GitLab. - - Will show up in the X-GitLab-Token HTTP request header. + - If this is present it always results in a change as it cannot be retrieved from GitLab. + - It shows up in the C(X-GitLab-Token) HTTP request header. required: false type: str """ @@ -271,7 +271,7 @@ class GitLabHook(object): ''' def find_hook(self, project, hook_url): for hook in project.hooks.list(**list_all_kwargs): - if (hook.url == hook_url): + if hook.url == hook_url: return hook ''' @@ -307,7 +307,7 @@ def main(): job_events=dict(type='bool', default=False), pipeline_events=dict(type='bool', default=False), wiki_page_events=dict(type='bool', default=False), - releases_events=dict(type='bool', default=None), + releases_events=dict(type='bool'), hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']), token=dict(type='str', no_log=True), )) diff --git a/plugins/modules/gitlab_instance_variable.py b/plugins/modules/gitlab_instance_variable.py index 2023b0ad7d..0f2c9b7752 100644 --- a/plugins/modules/gitlab_instance_variable.py +++ b/plugins/modules/gitlab_instance_variable.py @@ -16,10 +16,10 @@ short_description: Creates, updates, or deletes GitLab instance variables version_added: 7.1.0 description: - Creates a instance variable if it does not exist. - - When a instance variable does exist, its value will be updated if the values are different. + - When a instance variable does exist, its value is updated if the values are different. - Support for instance variables requires GitLab >= 13.0. - - Variables which are not mentioned in the modules options, but are present on the GitLab instance, will either stay (O(purge=false)) - or will be deleted (O(purge=true)). + - Variables which are not mentioned in the modules options, but are present on the GitLab instance, either stay (O(purge=false)) + or are deleted (O(purge=true)). author: - Benedikt Braunger (@benibr) requirements: @@ -124,22 +124,22 @@ instance_variable: description: A list of variables which were created. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] untouched: description: A list of variables which exist. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] removed: description: A list of variables which were deleted. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] updated: description: A list pre-existing variables whose values have been set. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] """ from ansible.module_utils.basic import AnsibleModule @@ -301,8 +301,8 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - purge=dict(type='bool', required=False, default=False), - variables=dict(type='list', elements='dict', required=False, default=list(), options=dict( + purge=dict(type='bool', default=False), + variables=dict(type='list', elements='dict', default=list(), options=dict( name=dict(type='str', required=True), value=dict(type='str', no_log=True), masked=dict(type='bool', default=False), diff --git a/plugins/modules/gitlab_issue.py b/plugins/modules/gitlab_issue.py index 47b6f072e8..c6bf6f8328 100644 --- a/plugins/modules/gitlab_issue.py +++ b/plugins/modules/gitlab_issue.py @@ -18,8 +18,8 @@ short_description: Create, update, or delete GitLab issues version_added: '8.1.0' description: - Creates an issue if it does not exist. - - When an issue does exist, it will be updated if the provided parameters are different. - - When an issue does exist and O(state=absent), the issue will be deleted. + - When an issue does exist, it is updated if the provided parameters are different. + - When an issue does exist and O(state=absent), the issue is deleted. - When multiple issues are detected, the task fails. - Existing issues are matched based on O(title) and O(state_filter) filters. author: @@ -284,13 +284,13 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - assignee_ids=dict(type='list', elements='str', required=False), - description=dict(type='str', required=False), - description_path=dict(type='path', required=False), - issue_type=dict(type='str', default='issue', choices=["issue", "incident", "test_case"], required=False), - labels=dict(type='list', elements='str', required=False), - milestone_search=dict(type='str', required=False), - milestone_group_id=dict(type='str', required=False), + assignee_ids=dict(type='list', elements='str'), + description=dict(type='str'), + description_path=dict(type='path'), + issue_type=dict(type='str', default='issue', choices=["issue", "incident", "test_case"]), + labels=dict(type='list', elements='str'), + milestone_search=dict(type='str'), + milestone_group_id=dict(type='str'), project=dict(type='str', required=True), state=dict(type='str', default="present", choices=["absent", "present"]), state_filter=dict(type='str', default="opened", choices=["opened", "closed"]), diff --git a/plugins/modules/gitlab_label.py b/plugins/modules/gitlab_label.py index 8b9503e325..a139d1fcbd 100644 --- a/plugins/modules/gitlab_label.py +++ b/plugins/modules/gitlab_label.py @@ -12,8 +12,8 @@ module: gitlab_label short_description: Creates/updates/deletes GitLab Labels belonging to project or group version_added: 8.3.0 description: - - When a label does not exist, it will be created. - - When a label does exist, its value will be updated when the values are different. + - When a label does not exist, it is created. + - When a label does exist, its value is updated when the values are different. - Labels can be purged. author: - "Gabriele Pongelli (@gpongelli)" @@ -197,22 +197,22 @@ labels: description: A list of labels which were created. returned: always type: list - sample: ['abcd', 'label-one'] + sample: ["abcd", "label-one"] untouched: description: A list of labels which exist. returned: always type: list - sample: ['defg', 'new-label'] + sample: ["defg", "new-label"] removed: description: A list of labels which were deleted. returned: always type: list - sample: ['defg', 'new-label'] + sample: ["defg", "new-label"] updated: description: A list pre-existing labels whose values have been set. returned: always type: list - sample: ['defg', 'new-label'] + sample: ["defg", "new-label"] labels_obj: description: API object. returned: success @@ -410,16 +410,16 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - project=dict(type='str', required=False, default=None), - group=dict(type='str', required=False, default=None), - purge=dict(type='bool', required=False, default=False), - labels=dict(type='list', elements='dict', required=False, default=list(), + project=dict(type='str'), + group=dict(type='str'), + purge=dict(type='bool', default=False), + labels=dict(type='list', elements='dict', default=list(), options=dict( name=dict(type='str', required=True), - color=dict(type='str', required=False), - description=dict(type='str', required=False), - priority=dict(type='int', required=False), - new_name=dict(type='str', required=False),) + color=dict(type='str'), + description=dict(type='str'), + priority=dict(type='int'), + new_name=dict(type='str'),) ), state=dict(type='str', default="present", choices=["absent", "present"]), ) diff --git a/plugins/modules/gitlab_merge_request.py b/plugins/modules/gitlab_merge_request.py index fd6068980a..922b224c1f 100644 --- a/plugins/modules/gitlab_merge_request.py +++ b/plugins/modules/gitlab_merge_request.py @@ -18,8 +18,8 @@ short_description: Create, update, or delete GitLab merge requests version_added: 7.1.0 description: - Creates a merge request if it does not exist. - - When a single merge request does exist, it will be updated if the provided parameters are different. - - When a single merge request does exist and O(state=absent), the merge request will be deleted. + - When a single merge request does exist, it is updated if the provided parameters are different. + - When a single merge request does exist and O(state=absent), the merge request is deleted. - When multiple merge requests are detected, the task fails. - Existing merge requests are matched based on O(title), O(source_branch), O(target_branch), and O(state_filter) filters. author: @@ -287,13 +287,13 @@ def main(): source_branch=dict(type='str', required=True), target_branch=dict(type='str', required=True), title=dict(type='str', required=True), - description=dict(type='str', required=False), - labels=dict(type='str', default="", required=False), - description_path=dict(type='path', required=False), - remove_source_branch=dict(type='bool', default=False, required=False), + description=dict(type='str'), + labels=dict(type='str', default=""), + description_path=dict(type='path'), + remove_source_branch=dict(type='bool', default=False), state_filter=dict(type='str', default="opened", choices=["opened", "closed", "locked", "merged"]), - assignee_ids=dict(type='str', required=False), - reviewer_ids=dict(type='str', required=False), + assignee_ids=dict(type='str'), + reviewer_ids=dict(type='str'), state=dict(type='str', default="present", choices=["absent", "present"]), ) diff --git a/plugins/modules/gitlab_milestone.py b/plugins/modules/gitlab_milestone.py index 99b922c4dd..1406f96ffb 100644 --- a/plugins/modules/gitlab_milestone.py +++ b/plugins/modules/gitlab_milestone.py @@ -12,8 +12,8 @@ module: gitlab_milestone short_description: Creates/updates/deletes GitLab Milestones belonging to project or group version_added: 8.3.0 description: - - When a milestone does not exist, it will be created. - - When a milestone does exist, its value will be updated when the values are different. + - When a milestone does not exist, it is created. + - When a milestone does exist, its value is updated when the values are different. - Milestones can be purged. author: - "Gabriele Pongelli (@gpongelli)" @@ -181,22 +181,22 @@ milestones: description: A list of milestones which were created. returned: always type: list - sample: ['abcd', 'milestone-one'] + sample: ["abcd", "milestone-one"] untouched: description: A list of milestones which exist. returned: always type: list - sample: ['defg', 'new-milestone'] + sample: ["defg", "new-milestone"] removed: description: A list of milestones which were deleted. returned: always type: list - sample: ['defg', 'new-milestone'] + sample: ["defg", "new-milestone"] updated: description: A list pre-existing milestones whose values have been set. returned: always type: list - sample: ['defg', 'new-milestone'] + sample: ["defg", "new-milestone"] milestones_obj: description: API object. returned: success @@ -411,15 +411,15 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - project=dict(type='str', required=False, default=None), - group=dict(type='str', required=False, default=None), - purge=dict(type='bool', required=False, default=False), - milestones=dict(type='list', elements='dict', required=False, default=list(), + project=dict(type='str'), + group=dict(type='str'), + purge=dict(type='bool', default=False), + milestones=dict(type='list', elements='dict', default=list(), options=dict( title=dict(type='str', required=True), - description=dict(type='str', required=False), - due_date=dict(type='str', required=False), - start_date=dict(type='str', required=False),) + description=dict(type='str'), + due_date=dict(type='str'), + start_date=dict(type='str'),) ), state=dict(type='str', default="present", choices=["absent", "present"]), ) diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py index 942e1d9816..1e2140e24a 100644 --- a/plugins/modules/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -13,9 +13,9 @@ DOCUMENTATION = r""" module: gitlab_project short_description: Creates/updates/deletes GitLab Projects description: - - When the project does not exist in GitLab, it will be created. - - When the project does exist and O(state=absent), the project will be deleted. - - When changes are made to the project, the project will be updated. + - When the project does not exist in GitLab, it is created. + - When the project does exist and O(state=absent), the project is deleted. + - When changes are made to the project, the project is updated. author: - Werner Dijkerman (@dj-wasabi) - Guillaume Martinez (@Lunik) @@ -44,6 +44,12 @@ options: - This option is only used on creation, not for updates. type: path version_added: "4.2.0" + build_timeout: + description: + - Maximum number of seconds a CI job can run. + - If not specified on creation, GitLab imposes a default value. + type: int + version_added: "10.6.0" builds_access_level: description: - V(private) means that repository CI/CD is allowed only to project members. @@ -138,11 +144,11 @@ options: version_added: "6.2.0" group: description: - - Id or the full path of the group of which this projects belongs to. + - ID or the full path of the group of which this projects belongs to. type: str import_url: description: - - Git repository which will be imported into gitlab. + - Git repository which is imported into gitlab. - GitLab server needs read access to this git repository. required: false type: str @@ -156,7 +162,7 @@ options: version_added: "6.4.0" initialize_with_readme: description: - - Will initialize the project with a default C(README.md). + - Initializes the project with a default C(README.md). - Is only used when the project is created, and ignored otherwise. type: bool default: false @@ -242,8 +248,8 @@ options: version_added: "9.3.0" path: description: - - The path of the project you want to create, this will be server_url//path. - - If not supplied, name will be used. + - The path of the project you want to create, this is server_url/O(group)/O(path). + - If not supplied, O(name) is used. type: str releases_access_level: description: @@ -430,6 +436,7 @@ class GitLabProject(object): project_options = { 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], 'builds_access_level': options['builds_access_level'], + 'build_timeout': options['build_timeout'], 'ci_config_path': options['ci_config_path'], 'container_expiration_policy': options['container_expiration_policy'], 'container_registry_access_level': options['container_registry_access_level'], @@ -591,8 +598,9 @@ def main(): allow_merge_on_skipped_pipeline=dict(type='bool'), avatar_path=dict(type='path'), builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + build_timeout=dict(type='int'), ci_config_path=dict(type='str'), - container_expiration_policy=dict(type='dict', default=None, options=dict( + container_expiration_policy=dict(type='dict', options=dict( cadence=dict(type='str', choices=["1d", "7d", "14d", "1month", "3month"]), enabled=dict(type='bool'), keep_n=dict(type='int', choices=[0, 1, 5, 10, 25, 50, 100]), @@ -664,6 +672,7 @@ def main(): allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] avatar_path = module.params['avatar_path'] builds_access_level = module.params['builds_access_level'] + build_timeout = module.params['build_timeout'] ci_config_path = module.params['ci_config_path'] container_expiration_policy = module.params['container_expiration_policy'] container_registry_access_level = module.params['container_registry_access_level'] @@ -748,6 +757,7 @@ def main(): "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, "avatar_path": avatar_path, "builds_access_level": builds_access_level, + "build_timeout": build_timeout, "ci_config_path": ci_config_path, "container_expiration_policy": container_expiration_policy, "container_registry_access_level": container_registry_access_level, diff --git a/plugins/modules/gitlab_project_access_token.py b/plugins/modules/gitlab_project_access_token.py index a93d5531bf..3747870d9a 100644 --- a/plugins/modules/gitlab_project_access_token.py +++ b/plugins/modules/gitlab_project_access_token.py @@ -28,7 +28,7 @@ extends_documentation_fragment: - community.general.attributes notes: - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens - will be recreated is controlled by the O(recreate) option, which defaults to V(never). + are recreated or not is controlled by the O(recreate) option, which defaults to V(never). - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards. - Token matching is done by comparing O(name) option. attributes: @@ -55,8 +55,16 @@ options: type: list elements: str aliases: ["scope"] - choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner", - "ai_features", "k8s_proxy"] + choices: + - api + - read_api + - read_registry + - write_registry + - read_repository + - write_repository + - create_runner + - ai_features + - k8s_proxy access_level: description: - Access level of the access token. @@ -71,17 +79,17 @@ options: required: true recreate: description: - - Whether the access token will be recreated if it already exists. - - When V(never) the token will never be recreated. - - When V(always) the token will always be recreated. - - When V(state_change) the token will be recreated if there is a difference between desired state and actual state. + - Whether the access token is recreated if it already exists. + - When V(never) the token is never recreated. + - When V(always) the token is always recreated. + - When V(state_change) the token is recreated if there is a difference between desired state and actual state. type: str choices: ["never", "always", "state_change"] default: never state: description: - - When V(present) the access token will be added to the project if it does not exist. - - When V(absent) it will be removed from the project if it exists. + - When V(present) the access token is added to the project if it does not exist. + - When V(absent) it is removed from the project if it exists. default: present type: str choices: ["present", "absent"] @@ -183,9 +191,9 @@ class GitLabProjectAccessToken(object): @param name of the access token ''' def find_access_token(self, project, name): - access_tokens = project.access_tokens.list(all=True) + access_tokens = [x for x in project.access_tokens.list(all=True) if not getattr(x, 'revoked', False)] for access_token in access_tokens: - if (access_token.name == name): + if access_token.name == name: self.access_token_object = access_token return False return False @@ -235,7 +243,7 @@ def main(): 'create_runner', 'ai_features', 'k8s_proxy']), - access_level=dict(type='str', required=False, default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), + access_level=dict(type='str', default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), expires_at=dict(type='str', required=True), recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change']) )) diff --git a/plugins/modules/gitlab_project_members.py b/plugins/modules/gitlab_project_members.py index 228af9a062..b5d0f6e2d1 100644 --- a/plugins/modules/gitlab_project_members.py +++ b/plugins/modules/gitlab_project_members.py @@ -48,8 +48,9 @@ options: description: - The access level for the user. - Required if O(state=present), user state is set to present. + - V(owner) was added in community.general 10.6.0. type: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] gitlab_users_access: description: - Provide a list of user to access level mappings. @@ -67,8 +68,9 @@ options: description: - The access level for the user. - Required if O(state=present), user state is set to present. + - V(owner) was added in community.general 10.6.0. type: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] required: true version_added: 3.7.0 state: @@ -84,9 +86,10 @@ options: - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. If omitted do not purge orphaned members. - Is only used when O(state=present). + - V(owner) was added in community.general 10.6.0. type: list elements: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] version_added: 3.7.0 """ @@ -239,16 +242,16 @@ def main(): project=dict(type='str', required=True), gitlab_user=dict(type='list', elements='str'), state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']), + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), purge_users=dict(type='list', elements='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer']), + 'guest', 'reporter', 'developer', 'maintainer', 'owner']), gitlab_users_access=dict( type='list', elements='dict', options=dict( name=dict(type='str', required=True), access_level=dict(type='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer'], required=True), + 'guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), ) ), )) @@ -286,6 +289,7 @@ def main(): 'reporter': gitlab.const.REPORTER_ACCESS, 'developer': gitlab.const.DEVELOPER_ACCESS, 'maintainer': gitlab.const.MAINTAINER_ACCESS, + 'owner': gitlab.const.OWNER_ACCESS, } gitlab_project = module.params['project'] diff --git a/plugins/modules/gitlab_project_variable.py b/plugins/modules/gitlab_project_variable.py index 5903c9b5c4..4c261f5978 100644 --- a/plugins/modules/gitlab_project_variable.py +++ b/plugins/modules/gitlab_project_variable.py @@ -11,10 +11,10 @@ DOCUMENTATION = r""" module: gitlab_project_variable short_description: Creates/updates/deletes GitLab Projects Variables description: - - When a project variable does not exist, it will be created. - - When a project variable does exist, its value will be updated when the values are different. + - When a project variable does not exist, it is created. + - When a project variable does exist, its value is updated when the values are different. - Variables which are untouched in the playbook, but are not untouched in the GitLab project, they stay untouched (O(purge=false)) - or will be deleted (O(purge=true)). + or are deleted (O(purge=true)). author: - "Markus Bergholz (@markuman)" requirements: @@ -45,12 +45,12 @@ options: type: str purge: description: - - When set to true, all variables which are not untouched in the task will be deleted. + - When set to V(true), all variables which are not untouched in the task are deleted. default: false type: bool vars: description: - - When the list element is a simple key-value pair, masked, raw and protected will be set to false. + - When the list element is a simple key-value pair, masked, raw and protected are set to V(false). - When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can have full control about whether a value should be masked, raw, protected or both. - Support for protected values requires GitLab >= 9.3. @@ -202,22 +202,22 @@ project_variable: description: A list of variables which were created. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] untouched: description: A list of variables which exist. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] removed: description: A list of variables which were deleted. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] updated: description: A list of variables whose values were changed. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] """ from ansible.module_utils.basic import AnsibleModule @@ -394,11 +394,11 @@ def main(): argument_spec.update(auth_argument_spec()) argument_spec.update( project=dict(type='str', required=True), - purge=dict(type='bool', required=False, default=False), - vars=dict(type='dict', required=False, default=dict(), no_log=True), + purge=dict(type='bool', default=False), + vars=dict(type='dict', default=dict(), no_log=True), # please mind whenever changing the variables dict to also change module_utils/gitlab.py's # KNOWN dict in filter_returned_variables or bad evil will happen - variables=dict(type='list', elements='dict', required=False, default=list(), options=dict( + variables=dict(type='list', elements='dict', default=list(), options=dict( name=dict(type='str', required=True), value=dict(type='str', no_log=True), masked=dict(type='bool', default=False), diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py index 62875c552a..87ba152ffa 100644 --- a/plugins/modules/gitlab_runner.py +++ b/plugins/modules/gitlab_runner.py @@ -81,8 +81,8 @@ options: description: - The registration token is used to register new runners before GitLab 16.0. - Required if O(state=present) for GitLab < 16.0. - - If set, the runner will be created using the old runner creation workflow. - - If not set, the runner will be created using the new runner creation workflow, introduced in GitLab 16.0. + - If set, the runner is created using the old runner creation workflow. + - If not set, the runner is created using the new runner creation workflow, introduced in GitLab 16.0. - If not set, requires python-gitlab >= 4.0.0. type: str owned: @@ -122,8 +122,8 @@ options: - If set to V(not_protected), runner can pick up jobs from both protected and unprotected branches. - If set to V(ref_protected), runner can pick up jobs only from protected branches. - Before community.general 8.0.0 the default was V(ref_protected). This was changed to no default in community.general - 8.0.0. If this option is not specified explicitly, GitLab will use V(not_protected) on creation, and the value set - will not be changed on any updates. + 8.0.0. If this option is not specified explicitly, GitLab uses V(not_protected) on creation, and the value set is + not changed on any updates. required: false choices: ["not_protected", "ref_protected"] type: str @@ -393,10 +393,10 @@ class GitLabRunner(object): # python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner # object, so we need to handle both if hasattr(runner, "description"): - if (runner.description == description): + if runner.description == description: return self._gitlab.runners.get(runner.id) else: - if (runner['description'] == description): + if runner['description'] == description: return self._gitlab.runners.get(runner['id']) ''' diff --git a/plugins/modules/gitlab_user.py b/plugins/modules/gitlab_user.py index dd8685312d..4d7bd506f6 100644 --- a/plugins/modules/gitlab_user.py +++ b/plugins/modules/gitlab_user.py @@ -14,10 +14,10 @@ DOCUMENTATION = r""" module: gitlab_user short_description: Creates/updates/deletes/blocks/unblocks GitLab Users description: - - When the user does not exist in GitLab, it will be created. - - When the user exists and state=absent, the user will be deleted. - - When the user exists and state=blocked, the user will be blocked. - - When changes are made to user, the user will be updated. + - When the user does not exist in GitLab, it is created. + - When the user exists and state=absent, the user is deleted. + - When the user exists and state=blocked, the user is blocked. + - When changes are made to user, the user is updated. notes: - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user. author: @@ -82,7 +82,7 @@ options: version_added: 3.1.0 group: description: - - Id or Full path of parent group in the form of group/name. + - ID or Full path of parent group in the form of group/name. - Add user as a member to this group. type: str access_level: diff --git a/plugins/modules/grove.py b/plugins/modules/grove.py index abdc303f90..81417657c8 100644 --- a/plugins/modules/grove.py +++ b/plugins/modules/grove.py @@ -51,8 +51,8 @@ options: required: false validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using - self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. default: true type: bool author: "Jonas Pfenniger (@zimbatm)" @@ -101,8 +101,8 @@ def main(): channel_token=dict(type='str', required=True, no_log=True), message_content=dict(type='str', required=True), service=dict(type='str', default='ansible'), - url=dict(type='str', default=None), - icon_url=dict(type='str', default=None), + url=dict(type='str'), + icon_url=dict(type='str'), validate_certs=dict(default=True, type='bool'), ) ) diff --git a/plugins/modules/gunicorn.py b/plugins/modules/gunicorn.py index baf24c53b8..b524165c90 100644 --- a/plugins/modules/gunicorn.py +++ b/plugins/modules/gunicorn.py @@ -50,8 +50,8 @@ options: pid: type: path description: - - A filename to use for the PID file. If not set and not found on the configuration file a tmp pid file will be created - to check a successful run of gunicorn. + - A filename to use for the PID file. If not set and not found on the configuration file a tmp pid file is created to + check a successful run of gunicorn. worker: type: str choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp'] @@ -62,8 +62,8 @@ options: description: - Switch worker processes to run as this user. notes: - - If not specified on config file, a temporary error log will be created on /tmp dir. Please make sure you have write access - in /tmp dir. Not needed but will help you to identify any problem with configuration. + - If not specified on config file, a temporary error log is created on C(/tmp) directory. Please make sure you have write + access in C(/tmp) directory. Not needed but it is helpful to identify any problem with configuration. """ EXAMPLES = r""" @@ -96,7 +96,7 @@ EXAMPLES = r""" RETURN = r""" gunicorn: - description: Process id of gunicorn. + description: Process ID of gunicorn. returned: changed type: str sample: "1234" diff --git a/plugins/modules/haproxy.py b/plugins/modules/haproxy.py index 9c60e59040..b0e56de061 100644 --- a/plugins/modules/haproxy.py +++ b/plugins/modules/haproxy.py @@ -32,7 +32,7 @@ options: backend: description: - Name of the HAProxy backend pool. - - If this parameter is unset, it will be auto-detected. + - If this parameter is unset, it is auto-detected. type: str drain: description: @@ -62,8 +62,7 @@ options: state: description: - Desired state of the provided backend host. - - Note that V(drain) state is supported only by HAProxy version 1.5 or later. When used on versions < 1.5, it will be - ignored. + - Note that V(drain) state is supported only by HAProxy version 1.5 or later. When used on versions < 1.5, it is ignored. type: str required: true choices: [disabled, drain, enabled] @@ -103,7 +102,7 @@ options: weight: description: - The value passed in argument. - - If the value ends with the V(%) sign, then the new weight will be relative to the initially configured weight. + - If the value ends with the V(%) sign, then the new weight is relative to the initially configured weight. - Relative weights are only permitted between 0 and 100% and absolute weights are permitted between 0 and 256. type: str """ diff --git a/plugins/modules/hg.py b/plugins/modules/hg.py index 92e10f8725..982364504c 100644 --- a/plugins/modules/hg.py +++ b/plugins/modules/hg.py @@ -61,16 +61,16 @@ options: default: true executable: description: - - Path to hg executable to use. If not supplied, the normal mechanism for resolving binary paths will be used. + - Path to C(hg) executable to use. If not supplied, the normal mechanism for resolving binary paths is used. type: str notes: - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). - - 'If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the - first contact with a remote host. To avoid this prompt, one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) + - 'If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH prompts user to authorize the first + contact with a remote host. To avoid this prompt, one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling the hg module, with the following command: C(ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts).' - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, if the underlying system - still uses a Python version below 2.7.9, you will have issues checking out bitbucket repositories. - See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). + still uses a Python version below 2.7.9, you are bound to have issues checking out bitbucket repositories. See + U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). """ EXAMPLES = r""" @@ -222,12 +222,12 @@ def main(): argument_spec=dict( repo=dict(type='str', required=True, aliases=['name']), dest=dict(type='path'), - revision=dict(type='str', default=None, aliases=['version']), + revision=dict(type='str', aliases=['version']), force=dict(type='bool', default=False), purge=dict(type='bool', default=False), update=dict(type='bool', default=True), clone=dict(type='bool', default=True), - executable=dict(type='str', default=None), + executable=dict(type='str'), ), ) repo = module.params['repo'] diff --git a/plugins/modules/hipchat.py b/plugins/modules/hipchat.py deleted file mode 100644 index e605278507..0000000000 --- a/plugins/modules/hipchat.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: hipchat -short_description: Send a message to Hipchat -description: - - Send a message to a Hipchat room, with options to control the formatting. -extends_documentation_fragment: - - community.general.attributes -deprecated: - removed_in: 11.0.0 - why: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. - alternative: There is none. -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - token: - type: str - description: - - API token. - required: true - room: - type: str - description: - - ID or name of the room. - required: true - msg_from: - type: str - description: - - Name the message will appear to be sent from. Max length is 15 characters - above this it will be truncated. - default: Ansible - aliases: [from] - msg: - type: str - description: - - The message body. - required: true - color: - type: str - description: - - Background color for the message. - default: yellow - choices: ["yellow", "red", "green", "purple", "gray", "random"] - msg_format: - type: str - description: - - Message format. - default: text - choices: ["text", "html"] - notify: - description: - - If true, a notification will be triggered for users in the room. - type: bool - default: true - validate_certs: - description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using - self-signed certificates. - type: bool - default: true - api: - type: str - description: - - API url if using a self-hosted hipchat server. For Hipchat API version 2 use the default URI with C(/v2) instead of - C(/v1). - default: 'https://api.hipchat.com/v1' - -author: - - Shirou Wakayama (@shirou) - - Paul Bourdel (@pb8226) -""" - -EXAMPLES = r""" -- name: Send a message to a Hipchat room - community.general.hipchat: - room: notif - msg: Ansible task finished - -- name: Send a message to a Hipchat room using Hipchat API version 2 - community.general.hipchat: - api: https://api.hipchat.com/v2/ - token: OAUTH2_TOKEN - room: notify - msg: Ansible task finished -""" - -# =========================================== -# HipChat module specific support methods. -# - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.six.moves.urllib.request import pathname2url -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -DEFAULT_URI = "https://api.hipchat.com/v1" - -MSG_URI_V1 = "/rooms/message" - -NOTIFY_URI_V2 = "/room/{id_or_name}/notification" - - -def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=MSG_URI_V1): - '''sending message to hipchat v1 server''' - - params = {} - params['room_id'] = room - params['from'] = msg_from[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['api'] = api - params['notify'] = int(notify) - - url = api + MSG_URI_V1 + "?auth_token=%s" % (token) - data = urlencode(params) - - if module.check_mode: - # In check mode, exit before actually sending the message - module.exit_json(changed=False) - - response, info = fetch_url(module, url, data=data) - if info['status'] == 200: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=NOTIFY_URI_V2): - '''sending message to hipchat v2 server''' - - headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} - - body = dict() - body['message'] = msg - body['color'] = color - body['message_format'] = msg_format - body['notify'] = notify - - POST_URL = api + NOTIFY_URI_V2 - - url = POST_URL.replace('{id_or_name}', pathname2url(room)) - data = json.dumps(body) - - if module.check_mode: - # In check mode, exit before actually sending the message - module.exit_json(changed=False) - - response, info = fetch_url(module, url, data=data, headers=headers, method='POST') - - # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows - # 204 to be the expected result code. - if info['status'] in [200, 204]: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - room=dict(required=True), - msg=dict(required=True), - msg_from=dict(default="Ansible", aliases=['from']), - color=dict(default="yellow", choices=["yellow", "red", "green", - "purple", "gray", "random"]), - msg_format=dict(default="text", choices=["text", "html"]), - notify=dict(default=True, type='bool'), - validate_certs=dict(default=True, type='bool'), - api=dict(default=DEFAULT_URI), - ), - supports_check_mode=True - ) - - token = module.params["token"] - room = str(module.params["room"]) - msg = module.params["msg"] - msg_from = module.params["msg_from"] - color = module.params["color"] - msg_format = module.params["msg_format"] - notify = module.params["notify"] - api = module.params["api"] - - try: - if api.find('/v2') != -1: - send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) - else: - send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) - except Exception as e: - module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) - - changed = True - module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py index 1290ceb465..021f990e67 100644 --- a/plugins/modules/homebrew.py +++ b/plugins/modules/homebrew.py @@ -83,7 +83,7 @@ options: default: false version_added: 9.0.0 notes: - - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly to the O(name) option. """ @@ -173,7 +173,7 @@ changed_pkgs: - List of package names which are changed after module run. returned: success type: list - sample: ['git', 'git-cola'] + sample: ["git", "git-cola"] version_added: '0.2.0' """ @@ -385,12 +385,39 @@ class Homebrew(object): self.outdated_packages.add(package_name) def _extract_package_name(self, package_detail, is_cask): - canonical_name = package_detail["token"] if is_cask else package_detail["name"] # For ex: 'sqlite' - all_valid_names = set(package_detail.get("aliases", [])) # For ex: {'sqlite3'} - all_valid_names.add(canonical_name) + # "brew info" can lookup by name, full_name, token, full_token, or aliases + # In addition, any name can be prefixed by the tap. + # Any of these can be supplied by the user as the package name. In case + # of ambiguity, where a given name might match multiple packages, + # formulae are preferred over casks. For all other ambiguities, the + # results are an error. Note that in the homebrew/core and + # homebrew/cask taps, there are no "other" ambiguities. + if is_cask: # according to brew info + name = package_detail["token"] + full_name = package_detail["full_token"] + else: + name = package_detail["name"] + full_name = package_detail["full_name"] + + # Issue https://github.com/ansible-collections/community.general/issues/9803: + # name can include the tap as a prefix, in order to disambiguate, + # e.g. casks from identically named formulae. + # + # Issue https://github.com/ansible-collections/community.general/issues/10012: + # package_detail["tap"] is None if package is no longer available. + tapped_name = [package_detail["tap"] + "/" + name] if package_detail["tap"] else [] + aliases = package_detail.get("aliases", []) + package_names = set([name, full_name] + tapped_name + aliases) + + # Finally, identify which of all those package names was the one supplied by the user. + package_names = package_names & set(self.packages) + if len(package_names) != 1: + self.failed = True + self.message = "Package names are missing or ambiguous: " + ", ".join(str(p) for p in package_names) + raise HomebrewException(self.message) # Then make sure the user provided name resurface. - return (all_valid_names & set(self.packages)).pop() + return package_names.pop() def _get_packages_info(self): cmd = [ @@ -780,13 +807,11 @@ def main(): argument_spec=dict( name=dict( aliases=["pkg", "package", "formula"], - required=False, type='list', elements='str', ), path=dict( default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", - required=False, type='path', ), state=dict( @@ -808,13 +833,11 @@ def main(): type='bool', ), install_options=dict( - default=None, aliases=['options'], type='list', elements='str', ), upgrade_options=dict( - default=None, type='list', elements='str', ), @@ -831,7 +854,7 @@ def main(): p = module.params if p['name']: - packages = p['name'] + packages = [package_name.lower() for package_name in p['name']] else: packages = None diff --git a/plugins/modules/homebrew_cask.py b/plugins/modules/homebrew_cask.py index d69fd266a2..948f5c1fd1 100644 --- a/plugins/modules/homebrew_cask.py +++ b/plugins/modules/homebrew_cask.py @@ -425,10 +425,7 @@ class HomebrewCask(object): cmd = base_opts + [self.current_cask] rc, out, err = self.module.run_command(cmd) - if rc == 0: - return True - else: - return False + return rc == 0 def _get_brew_version(self): if self.brew_version: @@ -436,11 +433,13 @@ class HomebrewCask(object): cmd = [self.brew_path, '--version'] - rc, out, err = self.module.run_command(cmd, check_rc=True) + dummy, out, dummy = self.module.run_command(cmd, check_rc=True) - # get version string from first line of "brew --version" output - version = out.split('\n')[0].split(' ')[1] - self.brew_version = version + pattern = r"Homebrew (.*)(\d+\.\d+\.\d+)(-dirty)?" + rematch = re.search(pattern, out) + if not rematch: + self.module.fail_json(msg="Failed to match regex to get brew version", stdout=out) + self.brew_version = rematch.groups()[1] return self.brew_version def _brew_cask_command_is_deprecated(self): @@ -735,13 +734,11 @@ def main(): argument_spec=dict( name=dict( aliases=["pkg", "package", "cask"], - required=False, type='list', elements='str', ), path=dict( default="/usr/local/bin:/opt/homebrew/bin", - required=False, type='path', ), state=dict( @@ -754,7 +751,6 @@ def main(): ), sudo_password=dict( type="str", - required=False, no_log=True, ), update_homebrew=dict( @@ -762,7 +758,6 @@ def main(): type='bool', ), install_options=dict( - default=None, aliases=['options'], type='list', elements='str', diff --git a/plugins/modules/homebrew_services.py b/plugins/modules/homebrew_services.py index 5d84563d33..750e771fc3 100644 --- a/plugins/modules/homebrew_services.py +++ b/plugins/modules/homebrew_services.py @@ -72,7 +72,7 @@ EXAMPLES = r""" - name: Remove the foo service (equivalent to `brew services stop foo`) community.general.homebrew_services: name: foo - service_state: absent + state: absent """ RETURN = r""" diff --git a/plugins/modules/homebrew_tap.py b/plugins/modules/homebrew_tap.py index f070ccccc7..f50472f90d 100644 --- a/plugins/modules/homebrew_tap.py +++ b/plugins/modules/homebrew_tap.py @@ -220,11 +220,10 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(aliases=['tap'], type='list', required=True, elements='str'), - url=dict(default=None, required=False), + url=dict(), state=dict(default='present', choices=['present', 'absent']), path=dict( default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", - required=False, type='path', ), ), diff --git a/plugins/modules/homectl.py b/plugins/modules/homectl.py index 72f1882dec..f93867d03b 100644 --- a/plugins/modules/homectl.py +++ b/plugins/modules/homectl.py @@ -65,7 +65,7 @@ options: type: str resize: description: - - When used with O(disksize) this will attempt to resize the home directory immediately. + - When used with O(disksize) this attempts to resize the home directory immediately. default: false type: bool realname: @@ -218,53 +218,54 @@ EXAMPLES = r""" RETURN = r""" data: - description: Dictionary returned from C(homectl inspect -j). - returned: success - type: dict - sample: { - "data": { - "binding": { - "e9ed2a5b0033427286b228e97c1e8343": { - "fileSystemType": "btrfs", - "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b", - "gid": 60268, - "imagePath": "/home/james.home", - "luksCipher": "aes", - "luksCipherMode": "xts-plain64", - "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81", - "luksVolumeKeySize": 32, - "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f", - "storage": "luks", - "uid": 60268 - } - }, + description: Dictionary returned from C(homectl inspect -j). + returned: success + type: dict + sample: + { + "data": { + "binding": { + "e9ed2a5b0033427286b228e97c1e8343": { + "fileSystemType": "btrfs", + "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b", + "gid": 60268, + "imagePath": "/home/james.home", + "luksCipher": "aes", + "luksCipherMode": "xts-plain64", + "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81", + "luksVolumeKeySize": 32, + "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f", + "storage": "luks", + "uid": 60268 + } + }, + "diskSize": 3221225472, + "disposition": "regular", + "lastChangeUSec": 1641941238208691, + "lastPasswordChangeUSec": 1641941238208691, + "privileged": { + "hashedPassword": [ + "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV." + ] + }, + "signature": [ + { + "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==", + "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n" + } + ], + "status": { + "e9ed2a5b0033427286b228e97c1e8343": { + "diskCeiling": 21845405696, + "diskFloor": 268435456, "diskSize": 3221225472, - "disposition": "regular", - "lastChangeUSec": 1641941238208691, - "lastPasswordChangeUSec": 1641941238208691, - "privileged": { - "hashedPassword": [ - "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV." - ] - }, - "signature": [ - { - "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==", - "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n" - } - ], - "status": { - "e9ed2a5b0033427286b228e97c1e8343": { - "diskCeiling": 21845405696, - "diskFloor": 268435456, - "diskSize": 3221225472, - "service": "io.systemd.Home", - "signedLocally": true, - "state": "inactive" - } - }, - "userName": "james", - } + "service": "io.systemd.Home", + "signedLocally": true, + "state": "inactive" + } + }, + "userName": "james" + } } """ diff --git a/plugins/modules/honeybadger_deployment.py b/plugins/modules/honeybadger_deployment.py index c653643e33..2512fc2642 100644 --- a/plugins/modules/honeybadger_deployment.py +++ b/plugins/modules/honeybadger_deployment.py @@ -51,7 +51,7 @@ options: default: "https://api.honeybadger.io/v1/deploys" validate_certs: description: - - If V(false), SSL certificates for the target url will not be validated. This should only be used on personally controlled + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled sites using self-signed certificates. type: bool default: true @@ -67,7 +67,7 @@ EXAMPLES = r""" repo: 'git@github.com:user/repo.git' """ -RETURN = """# """ +RETURN = """#""" import traceback @@ -88,9 +88,9 @@ def main(): token=dict(required=True, no_log=True), environment=dict(required=True), user=dict(required=False), - repo=dict(required=False), - revision=dict(required=False), - url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'), + repo=dict(), + revision=dict(), + url=dict(default='https://api.honeybadger.io/v1/deploys'), validate_certs=dict(default=True, type='bool'), ), supports_check_mode=True diff --git a/plugins/modules/hpilo_boot.py b/plugins/modules/hpilo_boot.py index 60f3ecc958..f04aaaed20 100644 --- a/plugins/modules/hpilo_boot.py +++ b/plugins/modules/hpilo_boot.py @@ -14,8 +14,8 @@ module: hpilo_boot author: Dag Wieers (@dagwieers) short_description: Boot system using specific media through HP iLO interface description: - - 'This module boots a system through its HP iLO interface. The boot media can be one of: V(cdrom), V(floppy), V(hdd), - V(network), or V(usb).' + - 'This module boots a system through its HP iLO interface. The boot media can be one of: V(cdrom), V(floppy), V(hdd), V(network), + or V(usb).' - This module requires the hpilo python module. extends_documentation_fragment: - community.general.attributes @@ -55,19 +55,19 @@ options: state: description: - The state of the boot media. - - "V(no_boot): Do not boot from the device" - - "V(boot_once): Boot from the device once and then notthereafter" - - "V(boot_always): Boot from the device each time the server is rebooted" - - "V(connect): Connect the virtual media device and set to boot_always" - - "V(disconnect): Disconnects the virtual media device and set to no_boot" - - "V(poweroff): Power off the server" + - 'V(no_boot): Do not boot from the device.' + - 'V(boot_once): Boot from the device once and then notthereafter.' + - 'V(boot_always): Boot from the device each time the server is rebooted.' + - 'V(connect): Connect the virtual media device and set to boot_always.' + - 'V(disconnect): Disconnects the virtual media device and set to no_boot.' + - 'V(poweroff): Power off the server.' default: boot_once type: str choices: ["boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff"] force: description: - Whether to force a reboot (even when the system is already booted). - - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running. + - As a safeguard, without force, M(community.general.hpilo_boot) refuses to reboot a server that is already running. default: false type: bool ssl_version: @@ -76,6 +76,12 @@ options: default: TLSv1 type: str choices: ["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"] + idempotent_boot_once: + description: + - This option makes O(state=boot_once) succeed instead of failing when the server is already powered on. + type: bool + default: false + version_added: 10.6.0 requirements: - python-hpilo notes: @@ -138,6 +144,7 @@ def main(): image=dict(type='str'), state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']), force=dict(type='bool', default=False), + idempotent_boot_once=dict(type='bool', default=False), ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), ) ) @@ -152,6 +159,7 @@ def main(): image = module.params['image'] state = module.params['state'] force = module.params['force'] + idempotent_boot_once = module.params['idempotent_boot_once'] ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) @@ -187,13 +195,21 @@ def main(): power_status = ilo.get_host_power_status() - if not force and power_status == 'ON': - module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host) - if power_status == 'ON': - ilo.warm_boot_server() -# ilo.cold_boot_server() - changed = True + if not force and not idempotent_boot_once: + # module.deprecate( + # 'The failure of the module when the server is already powered on is being deprecated.' + # ' Please set the parameter "idempotent_boot_once=true" to start using the new behavior.', + # version='11.0.0', + # collection_name='community.general' + # ) + module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host) + elif not force and idempotent_boot_once: + pass + elif force: + ilo.warm_boot_server() + # ilo.cold_boot_server() + changed = True else: ilo.press_pwr_btn() # ilo.reset_server() diff --git a/plugins/modules/hpilo_info.py b/plugins/modules/hpilo_info.py index 70eecb8b0e..90680603e8 100644 --- a/plugins/modules/hpilo_info.py +++ b/plugins/modules/hpilo_info.py @@ -121,7 +121,7 @@ hw_uuid: host_power_status: description: - Power status of host. - - Will be one of V(ON), V(OFF) and V(UNKNOWN). + - It is one of V(ON), V(OFF) and V(UNKNOWN). returned: always type: str sample: "ON" diff --git a/plugins/modules/hponcfg.py b/plugins/modules/hponcfg.py index 654ba2c710..c2d32c7d89 100644 --- a/plugins/modules/hponcfg.py +++ b/plugins/modules/hponcfg.py @@ -97,7 +97,6 @@ class HPOnCfg(ModuleHelper): verbose=cmd_runner_fmt.as_bool("-v"), minfw=cmd_runner_fmt.as_opt_val("-m"), ) - use_old_vardict = False def __run__(self): runner = CmdRunner( diff --git a/plugins/modules/htpasswd.py b/plugins/modules/htpasswd.py index de94765130..d8a755476f 100644 --- a/plugins/modules/htpasswd.py +++ b/plugins/modules/htpasswd.py @@ -46,10 +46,13 @@ options: description: - Hashing scheme to be used. As well as the four choices listed here, you can also use any other hash supported by passlib, such as V(portable_apache22) and V(host_apache24); or V(md5_crypt) and V(sha256_crypt), which are Linux passwd hashes. - Only some schemes in addition to the four choices below will be compatible with Apache or Nginx, and supported schemes - depend on passlib version and its dependencies. + Only some schemes in addition to the four choices below are compatible with Apache or Nginx, and supported schemes + depend on C(passlib) version and its dependencies. - See U(https://passlib.readthedocs.io/en/stable/lib/passlib.apache.html#passlib.apache.HtpasswdFile) parameter C(default_scheme). - 'Some of the available choices might be: V(apr_md5_crypt), V(des_crypt), V(ldap_sha1), V(plaintext).' + - 'B(WARNING): The module has no mechanism to determine the O(hash_scheme) of an existing entry, therefore, it does + not detect whether the O(hash_scheme) has changed. If you want to change the scheme, you must remove the existing + entry and then create a new one using the new scheme.' aliases: [crypt_scheme] state: type: str @@ -63,8 +66,8 @@ options: type: bool default: true description: - - Used with O(state=present). If V(true), the file will be created if it does not exist. Conversely, if set to V(false) - and the file does not exist it will fail. + - Used with O(state=present). If V(true), the file is created if it does not exist. Conversely, if set to V(false) and + the file does not exist, it fails. notes: - This module depends on the C(passlib) Python library, which needs to be installed on all target systems. - 'On Debian < 11, Ubuntu <= 20.04, or Fedora: install C(python-passlib).' @@ -85,7 +88,7 @@ EXAMPLES = r""" password: '9s36?;fyNp' owner: root group: www-data - mode: 0640 + mode: '0640' - name: Remove a user from a password file community.general.htpasswd: @@ -188,9 +191,9 @@ def main(): arg_spec = dict( path=dict(type='path', required=True, aliases=["dest", "destfile"]), name=dict(type='str', required=True, aliases=["username"]), - password=dict(type='str', required=False, default=None, no_log=True), - hash_scheme=dict(type='str', required=False, default="apr_md5_crypt", aliases=["crypt_scheme"]), - state=dict(type='str', required=False, default="present", choices=["present", "absent"]), + password=dict(type='str', no_log=True), + hash_scheme=dict(type='str', default="apr_md5_crypt", aliases=["crypt_scheme"]), + state=dict(type='str', default="present", choices=["present", "absent"]), create=dict(type='bool', default=True), ) @@ -238,8 +241,8 @@ def main(): (msg, changed) = present(path, username, password, hash_scheme, create, check_mode) elif state == 'absent': if not os.path.exists(path): - module.exit_json(msg="%s not present" % username, - warnings="%s does not exist" % path, changed=False) + module.warn("%s does not exist" % path) + module.exit_json(msg="%s not present" % username, changed=False) (msg, changed) = absent(path, username, check_mode) else: module.fail_json(msg="Invalid state: %s" % state) diff --git a/plugins/modules/hwc_ecs_instance.py b/plugins/modules/hwc_ecs_instance.py index f01b7c48fd..13becdf07f 100644 --- a/plugins/modules/hwc_ecs_instance.py +++ b/plugins/modules/hwc_ecs_instance.py @@ -442,7 +442,7 @@ created: disk_config_type: description: - Specifies the disk configuration type. MANUAL is The image space is not expanded. AUTO is the image space of the system - disk will be expanded to be as same as the flavor. + disk is expanded to be as same as the flavor. type: str returned: success host_name: diff --git a/plugins/modules/hwc_evs_disk.py b/plugins/modules/hwc_evs_disk.py index 0763c07b01..0d57caf6cb 100644 --- a/plugins/modules/hwc_evs_disk.py +++ b/plugins/modules/hwc_evs_disk.py @@ -70,8 +70,8 @@ options: - SSD specifies the ultra-high I/O disk type. - SAS specifies the high I/O disk type. - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the disk will fail to create. If the EVS disk is created from - a snapshot, the volume_type field must be the same as that of the snapshot's source disk. + - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a + snapshot, the volume_type field must be the same as that of the snapshot's source disk. type: str required: true backup_id: @@ -92,9 +92,9 @@ options: required: false enable_scsi: description: - - If this parameter is set to True, the disk device type will be SCSI, which allows ECS OSs to directly access underlying - storage media. SCSI reservation command is supported. If this parameter is set to False, the disk device type will - be VBD, which supports only simple SCSI read/write commands. + - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying + storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is + VBD, which supports only simple SCSI read/write commands. - If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI EVS disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified. type: bool @@ -167,8 +167,8 @@ volume_type: - SSD specifies the ultra-high I/O disk type. - SAS specifies the high I/O disk type. - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the disk will fail to create. If the EVS disk is created from - a snapshot, the volume_type field must be the same as that of the snapshot's source disk. + - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a snapshot, + the volume_type field must be the same as that of the snapshot's source disk. type: str returned: success backup_id: @@ -189,8 +189,8 @@ enable_full_clone: returned: success enable_scsi: description: - - If this parameter is set to True, the disk device type will be SCSI, which allows ECS OSs to directly access underlying - storage media. SCSI reservation command is supported. If this parameter is set to False, the disk device type will be + - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying + storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is VBD, which supports only simple SCSI read/write commands. - If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI EVS disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified. diff --git a/plugins/modules/hwc_network_vpc.py b/plugins/modules/hwc_network_vpc.py index 5c0c2c8b61..d34e428d6a 100644 --- a/plugins/modules/hwc_network_vpc.py +++ b/plugins/modules/hwc_network_vpc.py @@ -86,22 +86,22 @@ EXAMPLES = r""" RETURN = r""" id: description: - - The id of vpc. + - The ID of VPC. type: str returned: success name: description: - - The name of vpc. + - The name of VPC. type: str returned: success cidr: description: - - The range of available subnets in the vpc. + - The range of available subnets in the VPC. type: str returned: success status: description: - - The status of vpc. + - The status of VPC. type: str returned: success routes: @@ -117,12 +117,12 @@ routes: returned: success next_hop: description: - - The next hop of a route. If the route type is peering, it will provide VPC peering connection ID. + - The next hop of a route. If the route type is peering, it provides VPC peering connection ID. type: str returned: success enable_shared_snat: description: - - Show whether the shared snat is enabled. + - Show whether the shared SNAT is enabled. type: bool returned: success """ diff --git a/plugins/modules/hwc_vpc_eip.py b/plugins/modules/hwc_vpc_eip.py index b818fe0d86..e830c2b14b 100644 --- a/plugins/modules/hwc_vpc_eip.py +++ b/plugins/modules/hwc_vpc_eip.py @@ -92,7 +92,7 @@ options: required: false ip_version: description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address will be assigned. + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned. type: int required: false ipv4_address: @@ -193,7 +193,7 @@ enterprise_project_id: returned: success ip_version: description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address will be assigned. + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned. type: int returned: success ipv4_address: diff --git a/plugins/modules/hwc_vpc_private_ip.py b/plugins/modules/hwc_vpc_private_ip.py index 695c644cb9..e665568774 100644 --- a/plugins/modules/hwc_vpc_private_ip.py +++ b/plugins/modules/hwc_vpc_private_ip.py @@ -21,7 +21,7 @@ notes: - If O(id) option is provided, it takes precedence over O(subnet_id), O(ip_address) for private IP selection. - O(subnet_id), O(ip_address) are used for private IP selection. If more than one private IP with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: diff --git a/plugins/modules/hwc_vpc_route.py b/plugins/modules/hwc_vpc_route.py index 85224fd4c8..20bbba6cd8 100644 --- a/plugins/modules/hwc_vpc_route.py +++ b/plugins/modules/hwc_vpc_route.py @@ -21,7 +21,7 @@ notes: - If O(id) option is provided, it takes precedence over O(destination), O(vpc_id), O(type), and O(next_hop) for route selection. - O(destination), O(vpc_id), O(type) and O(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: diff --git a/plugins/modules/hwc_vpc_security_group.py b/plugins/modules/hwc_vpc_security_group.py index 9f53b49c0d..e1b2b41ae4 100644 --- a/plugins/modules/hwc_vpc_security_group.py +++ b/plugins/modules/hwc_vpc_security_group.py @@ -22,7 +22,7 @@ notes: selection. - O(name), O(enterprise_project_id) and O(vpc_id) are used for security group selection. If more than one security group with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: diff --git a/plugins/modules/hwc_vpc_security_group_rule.py b/plugins/modules/hwc_vpc_security_group_rule.py index 0848901cd5..42f854a029 100644 --- a/plugins/modules/hwc_vpc_security_group_rule.py +++ b/plugins/modules/hwc_vpc_security_group_rule.py @@ -21,7 +21,7 @@ notes: - If O(id) option is provided, it takes precedence over O(security_group_id) for security group rule selection. - O(security_group_id) is used for security group rule selection. If more than one security group rule with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: diff --git a/plugins/modules/hwc_vpc_subnet.py b/plugins/modules/hwc_vpc_subnet.py index 84a9219370..b9af890688 100644 --- a/plugins/modules/hwc_vpc_subnet.py +++ b/plugins/modules/hwc_vpc_subnet.py @@ -86,7 +86,7 @@ options: required: false dns_address: description: - - Specifies the DNS server addresses for subnet. The address in the head will be used first. + - Specifies the DNS server addresses for subnet. The address in the head is used first. type: list elements: str required: false @@ -148,7 +148,7 @@ dhcp_enable: returned: success dns_address: description: - - Specifies the DNS server addresses for subnet. The address in the head will be used first. + - Specifies the DNS server addresses for subnet. The address in the head is used first. type: list returned: success """ diff --git a/plugins/modules/ibm_sa_domain.py b/plugins/modules/ibm_sa_domain.py index 00b9ee1239..d34474b551 100644 --- a/plugins/modules/ibm_sa_domain.py +++ b/plugins/modules/ibm_sa_domain.py @@ -36,7 +36,7 @@ options: type: str ldap_id: description: - - LDAP id to add to the domain. + - LDAP ID to add to the domain. required: false type: str size: diff --git a/plugins/modules/ibm_sa_host.py b/plugins/modules/ibm_sa_host.py index f6613b3b29..b3d80a6b62 100644 --- a/plugins/modules/ibm_sa_host.py +++ b/plugins/modules/ibm_sa_host.py @@ -41,8 +41,8 @@ options: type: str domain: description: - - The domains the cluster will be attached to. To include more than one domain, separate domain names with commas. To - include all existing domains, use an asterisk (V(*)). + - The domains the cluster is attached to. To include more than one domain, separate domain names with commas. To include + all existing domains, use an asterisk (V(*)). required: false type: str iscsi_chap_name: diff --git a/plugins/modules/icinga2_host.py b/plugins/modules/icinga2_host.py index 271a6387bc..d78f607aae 100644 --- a/plugins/modules/icinga2_host.py +++ b/plugins/modules/icinga2_host.py @@ -30,13 +30,13 @@ options: - HTTP, HTTPS, or FTP URL in the form V((http|https|ftp\)://[user[:pass]]@host.domain[:port]/path). use_proxy: description: - - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts. + - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts. type: bool default: true validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using - self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool default: true url_username: @@ -48,12 +48,12 @@ options: type: str description: - The password for use in HTTP basic authentication. - - If the O(url_username) parameter is not specified, the O(url_password) parameter will not be used. + - If the O(url_username) parameter is not specified, the O(url_password) parameter is not used. force_basic_auth: description: - - Httplib2, the library used by the uri module only sends authentication information when a webservice responds to an - initial request with a 401 status. Since some basic auth services do not properly send a 401, logins will fail. This - option forces the sending of the Basic authentication header upon initial request. + - C(httplib2), the library used by Ansible's HTTP request code only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins + may fail. This option forces the sending of the Basic authentication header upon initial request. type: bool default: false client_cert: @@ -235,11 +235,11 @@ def main(): state=dict(default="present", choices=["absent", "present"]), name=dict(required=True, aliases=['host']), zone=dict(), - template=dict(default=None), + template=dict(), check_command=dict(default="hostalive"), - display_name=dict(default=None), + display_name=dict(), ip=dict(), - variables=dict(type='dict', default=None), + variables=dict(type='dict'), ) # Define the main module diff --git a/plugins/modules/idrac_redfish_command.py b/plugins/modules/idrac_redfish_command.py index 531da53162..fa4f29e5f5 100644 --- a/plugins/modules/idrac_redfish_command.py +++ b/plugins/modules/idrac_redfish_command.py @@ -16,6 +16,7 @@ description: - For use with Dell iDRAC operations that require Redfish OEM extensions. extends_documentation_fragment: - community.general.attributes + - community.general.redfish attributes: check_mode: support: none @@ -62,6 +63,12 @@ options: - ID of the System, Manager or Chassis to modify. type: str version_added: '0.2.0' + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: "Jose Delarosa (@jose-delarosa)" """ @@ -93,7 +100,7 @@ return_values: import re from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native @@ -147,17 +154,19 @@ CATEGORY_COMMANDS_ALL = { def main(): result = {} return_values = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10), + resource_id=dict() + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10), - resource_id=dict() - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/idrac_redfish_config.py b/plugins/modules/idrac_redfish_config.py index 97d7a62d04..466e0b344c 100644 --- a/plugins/modules/idrac_redfish_config.py +++ b/plugins/modules/idrac_redfish_config.py @@ -16,6 +16,7 @@ description: - Builds Redfish URIs locally and sends them to remote iDRAC controllers to set or update a configuration attribute. extends_documentation_fragment: - community.general.attributes + - community.general.redfish attributes: check_mode: support: none @@ -71,6 +72,12 @@ options: - ID of the System, Manager or Chassis to modify. type: str version_added: '0.2.0' + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: "Jose Delarosa (@jose-delarosa)" """ @@ -154,7 +161,7 @@ from ansible.module_utils.common.validation import ( check_mutually_exclusive, check_required_arguments ) -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native @@ -246,18 +253,20 @@ CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = { def main(): result = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + manager_attributes=dict(type='dict', default={}), + timeout=dict(type='int', default=10), + resource_id=dict() + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - manager_attributes=dict(type='dict', default={}), - timeout=dict(type='int', default=10), - resource_id=dict() - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/idrac_redfish_info.py b/plugins/modules/idrac_redfish_info.py index 3a8ea8103f..4b9745f7c2 100644 --- a/plugins/modules/idrac_redfish_info.py +++ b/plugins/modules/idrac_redfish_info.py @@ -17,6 +17,7 @@ description: extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module + - community.general.redfish attributes: check_mode: version_added: 3.3.0 @@ -57,6 +58,12 @@ options: - Timeout in seconds for HTTP requests to iDRAC. default: 10 type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: "Jose Delarosa (@jose-delarosa)" """ @@ -124,7 +131,7 @@ msg: """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native @@ -177,16 +184,18 @@ CATEGORY_COMMANDS_ALL = { def main(): result = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/ilo_redfish_command.py b/plugins/modules/ilo_redfish_command.py index 3e698fc049..52b08f8654 100644 --- a/plugins/modules/ilo_redfish_command.py +++ b/plugins/modules/ilo_redfish_command.py @@ -19,6 +19,7 @@ attributes: support: none extends_documentation_fragment: - community.general.attributes + - community.general.redfish options: category: required: true @@ -58,6 +59,12 @@ options: - Timeout in seconds for HTTP requests to iLO. default: 60 type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: - Varni H P (@varini-hp) """ @@ -96,22 +103,25 @@ CATEGORY_COMMANDS_ALL = { } from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native def main(): result = {} + argument_spec = dict( + category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + timeout=dict(type="int", default=60), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - timeout=dict(type="int", default=60), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True) - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/ilo_redfish_config.py b/plugins/modules/ilo_redfish_config.py index fdda339ab3..95c45590e1 100644 --- a/plugins/modules/ilo_redfish_config.py +++ b/plugins/modules/ilo_redfish_config.py @@ -15,6 +15,7 @@ description: - For use with HPE iLO operations that require Redfish OEM extensions. extends_documentation_fragment: - community.general.attributes + - community.general.redfish attributes: check_mode: support: none @@ -65,6 +66,12 @@ options: description: - Value of the attribute to be configured. type: str + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: - "Bhavya B (@bhavya06)" """ @@ -113,25 +120,28 @@ CATEGORY_COMMANDS_ALL = { } from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native def main(): result = {} + argument_spec = dict( + category=dict(required=True, choices=list( + CATEGORY_COMMANDS_ALL.keys())), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + attribute_name=dict(required=True), + attribute_value=dict(type='str'), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True, choices=list( - CATEGORY_COMMANDS_ALL.keys())), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - attribute_name=dict(required=True), - attribute_value=dict(type='str'), - timeout=dict(type='int', default=10) - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/ilo_redfish_info.py b/plugins/modules/ilo_redfish_info.py index 3bd379e80a..daa43b004e 100644 --- a/plugins/modules/ilo_redfish_info.py +++ b/plugins/modules/ilo_redfish_info.py @@ -16,6 +16,7 @@ description: extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module + - community.general.redfish options: category: required: true @@ -51,6 +52,12 @@ options: - Timeout in seconds for HTTP requests to iLO. default: 10 type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: - "Bhavya B (@bhavya06)" """ @@ -108,21 +115,24 @@ CATEGORY_COMMANDS_DEFAULT = { from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC def main(): result = {} category_list = [] + argument_spec = dict( + category=dict(required=True, type='list', elements='str'), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True, type='list', elements='str'), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/imc_rest.py b/plugins/modules/imc_rest.py index d9313b973c..674ba0d2b3 100644 --- a/plugins/modules/imc_rest.py +++ b/plugins/modules/imc_rest.py @@ -57,8 +57,8 @@ options: description: - When used instead of O(path), sets the content of the API requests directly. - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module. - - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, the Cisco IMC - output is subsequently merged. + - You can collate multiple IMC XML fragments and they are processed sequentially in a single stream, the Cisco IMC output + is subsequently merged. - Parameter O(content) is mutual exclusive with parameter O(path). type: str protocol: @@ -71,12 +71,12 @@ options: description: - The socket level timeout in seconds. - This is the time that every single connection (every fragment) can spend. If this O(timeout) is reached, the module - will fail with a C(Connection failure) indicating that C(The read operation timed out). + fails with a C(Connection failure) indicating that C(The read operation timed out). default: 60 type: int validate_certs: description: - - If V(false), SSL certificates will not be validated. + - If V(false), SSL certificates are not validated. - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool default: true @@ -84,8 +84,8 @@ notes: - The XML fragments do not need an authentication cookie, this is injected by the module automatically. - The Cisco IMC XML output is being translated to JSON using the Cobra convention. - Any configConfMo change requested has a return status of C(modified), even if there was no actual change from the previous - configuration. As a result, this module will always report a change on subsequent runs. In case this behaviour is fixed - in a future update to Cisco IMC, this module will automatically adapt. + configuration. As a result, this module always reports a change on subsequent runs. In case this behaviour is fixed in + a future update to Cisco IMC, this module is meant to automatically adapt. - If you get a C(Connection failure) related to C(The read operation timed out) increase the O(timeout) parameter. Some XML fragments can take longer than the default timeout. - More information about the IMC REST API is available from @@ -263,7 +263,7 @@ output: response="yes" errorCode="ERR-xml-parse-error" invocationResult="594" - errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/> + errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n" /> """ import os diff --git a/plugins/modules/imgadm.py b/plugins/modules/imgadm.py index 344bf9cc56..1c29e8a94b 100644 --- a/plugins/modules/imgadm.py +++ b/plugins/modules/imgadm.py @@ -44,7 +44,7 @@ options: choices: [present, absent, deleted, imported, updated, vacuumed] description: - State the object operated on should be in. V(imported) is an alias for for V(present) and V(deleted) for V(absent). - When set to V(vacuumed) and O(uuid=*), it will remove all unused images. + When set to V(vacuumed) and O(uuid=*), it removes all unused images. type: str type: diff --git a/plugins/modules/infinity.py b/plugins/modules/infinity.py index b2c9c2bcb8..cc54b46c51 100644 --- a/plugins/modules/infinity.py +++ b/plugins/modules/infinity.py @@ -67,7 +67,7 @@ options: type: str network_location: description: - - The parent network id for a given network. + - The parent network ID for a given network. type: int default: -1 network_type: @@ -105,7 +105,7 @@ EXAMPLES = r""" RETURN = r""" network_id: - description: Id for a given network. + description: ID for a given network. returned: success type: str sample: '1501' @@ -115,7 +115,8 @@ ip_info: type: str sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}' network_info: - description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned. + description: When reserving a LAN network from a Infinity supernet by providing network_size, the information about the + reserved network is returned. returned: success type: str sample: { @@ -125,7 +126,7 @@ network_info: "network_size": null, "description": null, "network_location": "3085", - "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null}, + "ranges": {"id": 0, "name": null, "first_ip": null, "type": null, "last_ip": null}, "network_type": "lan", "network_name": "'reserve_new_ansible_network'" } diff --git a/plugins/modules/influxdb_retention_policy.py b/plugins/modules/influxdb_retention_policy.py index 332050fb1c..824c34bb7d 100644 --- a/plugins/modules/influxdb_retention_policy.py +++ b/plugins/modules/influxdb_retention_policy.py @@ -59,8 +59,9 @@ options: default: false shard_group_duration: description: - - Determines the time range covered by a shard group. If specified it must be at least one hour. If not provided, it is determined - by InfluxDB by the rentention policy's duration. Supports complex duration expressions with multiple units. + - Determines the time range covered by a shard group. If specified it must be at least one hour. If not provided, it + is determined by InfluxDB by the rentention policy's duration. Supports complex duration expressions with multiple + units. type: str version_added: '2.0.0' extends_documentation_fragment: diff --git a/plugins/modules/influxdb_user.py b/plugins/modules/influxdb_user.py index bc66ff693d..45410e76a5 100644 --- a/plugins/modules/influxdb_user.py +++ b/plugins/modules/influxdb_user.py @@ -37,7 +37,7 @@ options: admin: description: - Whether the user should be in the admin role or not. - - Since version 2.8, the role will also be updated. + - Since version 2.8, the role is also updated. default: false type: bool state: @@ -50,8 +50,8 @@ options: description: - Privileges to grant to this user. - Takes a list of dicts containing the "database" and "privilege" keys. - - If this argument is not provided, the current grants will be left alone. - - If an empty list is provided, all grants for the user will be removed. + - If this argument is not provided, the current grants are left alone. + - If an empty list is provided, all grants for the user are removed. type: list elements: dict extends_documentation_fragment: @@ -101,9 +101,7 @@ EXAMPLES = r""" state: absent """ -RETURN = r""" -#only defaults -""" +RETURN = r"""#""" import json @@ -219,7 +217,7 @@ def main(): argument_spec.update( state=dict(default='present', type='str', choices=['present', 'absent']), user_name=dict(required=True, type='str'), - user_password=dict(required=False, type='str', no_log=True), + user_password=dict(type='str', no_log=True), admin=dict(default='False', type='bool'), grants=dict(type='list', elements='dict'), ) diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py index 61e6662d95..04fe92fa08 100644 --- a/plugins/modules/ini_file.py +++ b/plugins/modules/ini_file.py @@ -39,7 +39,7 @@ options: section: description: - Section name in INI file. This is added if O(state=present) automatically when a single value is being set. - - If being omitted, the O(option) will be placed before the first O(section). + - If being omitted, the O(option) is placed before the first O(section). - Omitting O(section) is also required if the config format does not support sections. type: str section_has_values: @@ -63,7 +63,7 @@ options: elements: str description: - Among possibly multiple sections of the same name, select the first one that contains matching options and values. - - With O(state=present), if a suitable section is not found, a new section will be added, including the required options. + - With O(state=present), if a suitable section is not found, a new section is added, including the required options. - With O(state=absent), at most one O(section) is removed if it contains the values. version_added: 8.6.0 option: @@ -100,8 +100,8 @@ options: O(option)s with the same name are not touched. - If set to V(present) and O(exclusive) set to V(false) the specified O(option=values) lines are added, but the other O(option)s with the same name are not touched. - - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines will be added and the other - O(option)s with the same name are removed. + - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines are added and the other O(option)s + with the same name are removed. type: str choices: [absent, present] default: present @@ -126,8 +126,8 @@ options: version_added: 7.5.0 create: description: - - If set to V(false), the module will fail if the file does not already exist. - - By default it will create the file if it is missing. + - If set to V(false), the module fails if the file does not already exist. + - By default it creates the file if it is missing. type: bool default: true allow_no_value: @@ -268,21 +268,21 @@ from ansible.module_utils.common.text.converters import to_bytes, to_text def match_opt(option, line): option = re.escape(option) - return re.match('([#;]?)( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) + return re.match('( |\t)*([#;]?)( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) def match_active_opt(option, line): option = re.escape(option) - return re.match('()( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) + return re.match('()()( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) def update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg): option_changed = None if ignore_spaces: old_match = match_opt(option, section_lines[index]) - if not old_match.group(1): + if not old_match.group(2): new_match = match_opt(option, newline) - option_changed = old_match.group(7) != new_match.group(7) + option_changed = old_match.group(8) != new_match.group(8) if option_changed is None: option_changed = section_lines[index] != newline if option_changed: @@ -299,7 +299,7 @@ def check_section_has_values(section_has_values, section_lines): for condition in section_has_values: for line in section_lines: match = match_opt(condition["option"], line) - if match and (len(condition["values"]) == 0 or match.group(7) in condition["values"]): + if match and (len(condition["values"]) == 0 or match.group(8) in condition["values"]): break else: return False @@ -432,8 +432,8 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, for index, line in enumerate(section_lines): if match_function(option, line): match = match_function(option, line) - if values and match.group(7) in values: - matched_value = match.group(7) + if values and match.group(8) in values: + matched_value = match.group(8) if not matched_value and allow_no_value: # replace existing option with no value line(s) newline = u'%s\n' % option @@ -505,7 +505,7 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, section_lines = new_section_lines elif not exclusive and len(values) > 0: # delete specified option=value line(s) - new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(7) in values)] + new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(8) in values)] if section_lines != new_section_lines: changed = True msg = 'option changed' @@ -584,7 +584,7 @@ def main(): option=dict(type='str', required=True), value=dict(type='str'), values=dict(type='list', elements='str') - ), default=None, mutually_exclusive=[['value', 'values']]), + ), mutually_exclusive=[['value', 'values']]), option=dict(type='str'), value=dict(type='str'), values=dict(type='list', elements='str'), diff --git a/plugins/modules/installp.py b/plugins/modules/installp.py index e54a56949f..da88a7e7c2 100644 --- a/plugins/modules/installp.py +++ b/plugins/modules/installp.py @@ -47,7 +47,7 @@ options: choices: [absent, present] default: present notes: - - If the package is already installed, even the package/fileset is new, the module will not install it. + - If the package is already installed, even the package/fileset is new, the module does not install it. """ EXAMPLES = r""" diff --git a/plugins/modules/interfaces_file.py b/plugins/modules/interfaces_file.py index 23bfd78790..8e315d7b69 100644 --- a/plugins/modules/interfaces_file.py +++ b/plugins/modules/interfaces_file.py @@ -45,10 +45,10 @@ options: value: type: str description: - - If O(option) is not presented for the O(iface) and O(state) is V(present) option will be added. If O(option) already - exists and is not V(pre-up), V(up), V(post-up) or V(down), its value will be updated. V(pre-up), V(up), V(post-up) - and V(down) options cannot be updated, only adding new options, removing existing ones or cleaning the whole option - set are supported. + - If O(option) is not presented for the O(iface) and O(state) is V(present), then O(option) is added. If O(option) already + exists and is not V(pre-up), V(up), V(post-up) or V(down), its value is updated. V(pre-up), V(up), V(post-up) and + V(down) options cannot be updated, only adding new options, removing existing ones or cleaning the whole option set + are supported. backup: description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered @@ -58,12 +58,12 @@ options: state: type: str description: - - If set to V(absent) the option or section will be removed if present instead of created. + - If set to V(absent) the option or section is removed if present instead of created. default: "present" choices: ["present", "absent"] notes: - - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state. + - If option is defined multiple times last one is updated but all others are deleted in case of an O(state=absent). requirements: [] author: "Roman Belyakovsky (@hryamzik)" """ diff --git a/plugins/modules/ipa_dnsrecord.py b/plugins/modules/ipa_dnsrecord.py index d92e2c4f66..3cba35b11c 100644 --- a/plugins/modules/ipa_dnsrecord.py +++ b/plugins/modules/ipa_dnsrecord.py @@ -45,33 +45,31 @@ options: - Manage DNS record name with this value. - Mutually exclusive with O(record_values), and exactly one of O(record_value) and O(record_values) has to be specified. - Use O(record_values) if you need to specify multiple values. - - In the case of V(A) or V(AAAA) record types, this will be the IP address. - - In the case of V(A6) record type, this will be the A6 Record data. - - In the case of V(CNAME) record type, this will be the hostname. - - In the case of V(DNAME) record type, this will be the DNAME target. - - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA - record. - - In the case of V(PTR) record type, this will be the hostname. - - In the case of V(TXT) record type, this will be a text. - - In the case of V(SRV) record type, this will be a service record. - - In the case of V(MX) record type, this will be a mail exchanger record. - - In the case of V(SSHFP) record type, this will be an SSH fingerprint record. + - In the case of V(A) or V(AAAA) record types, this is the IP address. + - In the case of V(A6) record type, this is the A6 Record data. + - In the case of V(CNAME) record type, this is the hostname. + - In the case of V(DNAME) record type, this is the DNAME target. + - In the case of V(NS) record type, this is the name server hostname. Hostname must already have a valid A or AAAA record. + - In the case of V(PTR) record type, this is the hostname. + - In the case of V(TXT) record type, this is a text. + - In the case of V(SRV) record type, this is a service record. + - In the case of V(MX) record type, this is a mail exchanger record. + - In the case of V(SSHFP) record type, this is an SSH fingerprint record. type: str record_values: description: - Manage DNS record name with this value. - Mutually exclusive with O(record_value), and exactly one of O(record_value) and O(record_values) has to be specified. - - In the case of V(A) or V(AAAA) record types, this will be the IP address. - - In the case of V(A6) record type, this will be the A6 Record data. - - In the case of V(CNAME) record type, this will be the hostname. - - In the case of V(DNAME) record type, this will be the DNAME target. - - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA - record. - - In the case of V(PTR) record type, this will be the hostname. - - In the case of V(TXT) record type, this will be a text. - - In the case of V(SRV) record type, this will be a service record. - - In the case of V(MX) record type, this will be a mail exchanger record. - - In the case of V(SSHFP) record type, this will be an SSH fingerprint record. + - In the case of V(A) or V(AAAA) record types, this is the IP address. + - In the case of V(A6) record type, this is the A6 Record data. + - In the case of V(CNAME) record type, this is the hostname. + - In the case of V(DNAME) record type, this is the DNAME target. + - In the case of V(NS) record type, this is the name server hostname. Hostname must already have a valid A or AAAA record. + - In the case of V(PTR) record type, this is the hostname. + - In the case of V(TXT) record type, this is a text. + - In the case of V(SRV) record type, this is a service record. + - In the case of V(MX) record type, this is a mail exchanger record. + - In the case of V(SSHFP) record type, this is an SSH fingerprint record. type: list elements: str record_ttl: @@ -167,7 +165,7 @@ EXAMPLES = r""" state: absent - name: Ensure an NS record for a subdomain is present - community,general.ipa_dnsrecord: + community.general.ipa_dnsrecord: name: subdomain zone_name: example.com record_type: 'NS' @@ -355,7 +353,7 @@ def main(): record_value=dict(type='str'), record_values=dict(type='list', elements='str'), state=dict(type='str', default='present', choices=['present', 'absent']), - record_ttl=dict(type='int', required=False), + record_ttl=dict(type='int'), ) module = AnsibleModule( diff --git a/plugins/modules/ipa_dnszone.py b/plugins/modules/ipa_dnszone.py index b536c258d2..81a99bc54b 100644 --- a/plugins/modules/ipa_dnszone.py +++ b/plugins/modules/ipa_dnszone.py @@ -173,8 +173,8 @@ def main(): argument_spec = ipa_argument_spec() argument_spec.update(zone_name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['present', 'absent']), - dynamicupdate=dict(type='bool', required=False, default=False), - allowsyncptr=dict(type='bool', required=False, default=False), + dynamicupdate=dict(type='bool', default=False), + allowsyncptr=dict(type='bool', default=False), ) module = AnsibleModule(argument_spec=argument_spec, diff --git a/plugins/modules/ipa_getkeytab.py b/plugins/modules/ipa_getkeytab.py index dfd612564b..7c533fb729 100644 --- a/plugins/modules/ipa_getkeytab.py +++ b/plugins/modules/ipa_getkeytab.py @@ -67,15 +67,15 @@ options: retrieve_mode: description: - Retrieve an existing key from the server instead of generating a new one. - - This is incompatible with the O(password), and will work only against a IPA server more recent than version 3.3. + - This is incompatible with the O(password), and works only against a IPA server more recent than version 3.3. - The user requesting the keytab must have access to the keys for this operation to succeed. - - Be aware that if set V(true), a new keytab will be generated. + - Be aware that if set V(true), a new keytab is generated. - This invalidates all previously retrieved keytabs for this service principal. type: bool encryption_types: description: - The list of encryption types to use to generate keys. - - It will use local client defaults if not provided. + - It uses local client defaults if not provided. - Valid values depend on the Kerberos library version and configuration. type: str state: diff --git a/plugins/modules/ipa_group.py b/plugins/modules/ipa_group.py index 60077a2c6a..934e533dff 100644 --- a/plugins/modules/ipa_group.py +++ b/plugins/modules/ipa_group.py @@ -22,7 +22,7 @@ options: append: description: - If V(true), add the listed O(user) and O(group) to the group members. - - If V(false), only the listed O(user) and O(group) will be group members, removing any other members. + - If V(false), only the listed O(user) and O(group) are set as group members, removing any other members. default: false type: bool version_added: 4.0.0 @@ -49,10 +49,10 @@ options: group: description: - List of group names assigned to this group. - - If O(append=false) and an empty list is passed all groups will be removed from this group. - - Groups that are already assigned but not passed will be removed. - - If O(append=true) the listed groups will be assigned without removing other groups. - - If option is omitted assigned groups will not be checked or changed. + - If O(append=false) and an empty list is passed all groups are removed from this group. + - Groups that are already assigned but not passed are removed. + - If O(append=true) the listed groups are assigned without removing other groups. + - If option is omitted assigned groups are not checked or changed. type: list elements: str nonposix: @@ -62,10 +62,10 @@ options: user: description: - List of user names assigned to this group. - - If O(append=false) and an empty list is passed all users will be removed from this group. - - Users that are already assigned but not passed will be removed. - - If O(append=true) the listed users will be assigned without removing other users. - - If option is omitted assigned users will not be checked or changed. + - If O(append=false) and an empty list is passed all users are removed from this group. + - Users that are already assigned but not passed are removed. + - If O(append=true) the listed users are assigned without removing other users. + - If option is omitted assigned users are not checked or changed. type: list elements: str external_user: @@ -73,7 +73,7 @@ options: - List of external users assigned to this group. - Behaves identically to O(user) with respect to O(append) attribute. - List entries can be in V(DOMAIN\\\\username) or SID format. - - Unless SIDs are provided, the module will always attempt to make changes even if the group already has all the users. + - Unless SIDs are provided, the module always attempts to make changes even if the group already has all the users. This is because only SIDs are returned by IPA query. - O(external=true) is needed for this option to work. type: list @@ -261,7 +261,7 @@ def ensure(module, client): nonposix=module.params['nonposix']) ipa_group = client.group_find(name=name) - if (not (external or external_user is None)): + if not (external or external_user is None): module.fail_json("external_user can only be set if external = True") changed = False diff --git a/plugins/modules/ipa_hbacrule.py b/plugins/modules/ipa_hbacrule.py index d168a3a7e0..cb828f68e9 100644 --- a/plugins/modules/ipa_hbacrule.py +++ b/plugins/modules/ipa_hbacrule.py @@ -32,8 +32,8 @@ options: host: description: - List of host names to assign. - - If an empty list is passed all hosts will be removed from the rule. - - If option is omitted hosts will not be checked or changed. + - If an empty list is passed all hosts are removed from the rule. + - If option is omitted hosts are not checked or changed. required: false type: list elements: str @@ -44,15 +44,15 @@ options: hostgroup: description: - List of hostgroup names to assign. - - If an empty list is passed all hostgroups will be removed from the rule. - - If option is omitted hostgroups will not be checked or changed. + - If an empty list is passed all hostgroups are removed from the rule. + - If option is omitted hostgroups are not checked or changed. type: list elements: str service: description: - List of service names to assign. - - If an empty list is passed all services will be removed from the rule. - - If option is omitted services will not be checked or changed. + - If an empty list is passed all services are removed from the rule. + - If option is omitted services are not checked or changed. type: list elements: str servicecategory: @@ -62,15 +62,15 @@ options: servicegroup: description: - List of service group names to assign. - - If an empty list is passed all assigned service groups will be removed from the rule. - - If option is omitted service groups will not be checked or changed. + - If an empty list is passed all assigned service groups are removed from the rule. + - If option is omitted service groups are not checked or changed. type: list elements: str sourcehost: description: - List of source host names to assign. - - If an empty list if passed all assigned source hosts will be removed from the rule. - - If option is omitted source hosts will not be checked or changed. + - If an empty list if passed all assigned source hosts are removed from the rule. + - If option is omitted source hosts are not checked or changed. type: list elements: str sourcehostcategory: @@ -80,8 +80,8 @@ options: sourcehostgroup: description: - List of source host group names to assign. - - If an empty list if passed all assigned source host groups will be removed from the rule. - - If option is omitted source host groups will not be checked or changed. + - If an empty list if passed all assigned source host groups are removed from the rule. + - If option is omitted source host groups are not checked or changed. type: list elements: str state: @@ -92,8 +92,8 @@ options: user: description: - List of user names to assign. - - If an empty list if passed all assigned users will be removed from the rule. - - If option is omitted users will not be checked or changed. + - If an empty list if passed all assigned users are removed from the rule. + - If option is omitted users are not checked or changed. type: list elements: str usercategory: @@ -103,8 +103,8 @@ options: usergroup: description: - List of user group names to assign. - - If an empty list if passed all assigned user groups will be removed from the rule. - - If option is omitted user groups will not be checked or changed. + - If an empty list if passed all assigned user groups are removed from the rule. + - If option is omitted user groups are not checked or changed. type: list elements: str extends_documentation_fragment: diff --git a/plugins/modules/ipa_host.py b/plugins/modules/ipa_host.py index b2f76ac8f3..c88f3c0adb 100644 --- a/plugins/modules/ipa_host.py +++ b/plugins/modules/ipa_host.py @@ -42,9 +42,9 @@ options: mac_address: description: - List of Hardware MAC address(es) off this host. - - If option is omitted MAC addresses will not be checked or changed. - - If an empty list is passed all assigned MAC addresses will be removed. - - MAC addresses that are already assigned but not passed will be removed. + - If option is omitted MAC addresses are not checked nor changed. + - If an empty list is passed all assigned MAC addresses are removed. + - MAC addresses that are already assigned but not passed are removed. aliases: ["macaddress"] type: list elements: str @@ -66,9 +66,9 @@ options: user_certificate: description: - List of Base-64 encoded server certificates. - - If option is omitted certificates will not be checked or changed. - - If an empty list is passed all assigned certificates will be removed. - - Certificates already assigned but not passed will be removed. + - If option is omitted certificates are not checked nor changed. + - If an empty list is passed all assigned certificates are removed. + - Certificates already assigned but not passed are removed. aliases: ["usercertificate"] type: list elements: str @@ -270,6 +270,10 @@ def ensure(module, client): data = {} for key in diff: data[key] = module_host.get(key) + if "usercertificate" not in data: + data["usercertificate"] = [ + cert['__base64__'] for cert in ipa_host.get("usercertificate", []) + ] ipa_host_show = client.host_show(name=name) if ipa_host_show.get('has_keytab', True) and (state == 'disabled' or module.params.get('random_password')): client.host_disable(name=name) diff --git a/plugins/modules/ipa_hostgroup.py b/plugins/modules/ipa_hostgroup.py index c1e7d3ad56..ffe87fca4c 100644 --- a/plugins/modules/ipa_hostgroup.py +++ b/plugins/modules/ipa_hostgroup.py @@ -22,7 +22,7 @@ options: append: description: - If V(true), add the listed O(host) to the O(hostgroup). - - If V(false), only the listed O(host) will be in O(hostgroup), removing any other hosts. + - If V(false), only the listed O(host) is set in O(hostgroup), removing any other hosts. default: false type: bool version_added: 6.6.0 @@ -40,17 +40,17 @@ options: host: description: - List of hosts that belong to the host-group. - - If an empty list is passed all hosts will be removed from the group. - - If option is omitted hosts will not be checked or changed. - - If option is passed all assigned hosts that are not passed will be unassigned from the group. + - If an empty list is passed all hosts are removed from the group. + - If option is omitted hosts are not checked nor changed. + - If option is passed all assigned hosts that are not passed are unassigned from the group. type: list elements: str hostgroup: description: - List of host-groups than belong to that host-group. - - If an empty list is passed all host-groups will be removed from the group. - - If option is omitted host-groups will not be checked or changed. - - If option is passed all assigned hostgroups that are not passed will be unassigned from the group. + - If an empty list is passed all host-groups are removed from the group. + - If option is omitted host-groups are not checked nor changed. + - If option is passed all assigned hostgroups that are not passed are unassigned from the group. type: list elements: str state: diff --git a/plugins/modules/ipa_otptoken.py b/plugins/modules/ipa_otptoken.py index 5aba671edf..e8c99bd302 100644 --- a/plugins/modules/ipa_otptoken.py +++ b/plugins/modules/ipa_otptoken.py @@ -26,7 +26,7 @@ options: aliases: ["name"] type: str newuniqueid: - description: If specified, the unique id specified will be changed to this. + description: If specified, the unique ID specified is changed to this. type: str otptype: description: @@ -37,7 +37,7 @@ options: secretkey: description: - Token secret (Base64). - - If OTP is created and this is not specified, a random secret will be generated by IPA. + - If OTP is created and this is not specified, a random secret is generated by IPA. - B(Note:) Cannot be modified after OTP is created. type: str description: @@ -54,13 +54,13 @@ options: description: - First date/time the token can be used. - In the format C(YYYYMMddHHmmss). - - For example, V(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22. + - For example, V(20180121182022) allows the token to be used starting on 21 January 2018 at 18:20:22. type: str notafter: description: - Last date/time the token can be used. - In the format C(YYYYMMddHHmmss). - - For example, V(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22. + - For example, V(20200121182022) allows the token to be used until 21 January 2020 at 18:20:22. type: str vendor: description: Token vendor name (informational only). @@ -84,7 +84,7 @@ options: type: str digits: description: - - Number of digits each token code will have. + - Number of digits each token code has. - B(Note:) Cannot be modified after OTP is created. choices: [6, 8] type: int diff --git a/plugins/modules/ipa_role.py b/plugins/modules/ipa_role.py index e77b732cb2..6057deec7b 100644 --- a/plugins/modules/ipa_role.py +++ b/plugins/modules/ipa_role.py @@ -33,41 +33,41 @@ options: group: description: - List of group names assign to this role. - - If an empty list is passed all assigned groups will be unassigned from the role. - - If option is omitted groups will not be checked or changed. - - If option is passed all assigned groups that are not passed will be unassigned from the role. + - If an empty list is passed all assigned groups are unassigned from the role. + - If option is omitted groups are not checked nor changed. + - If option is passed all assigned groups that are not passed are unassigned from the role. type: list elements: str host: description: - List of host names to assign. - - If an empty list is passed all assigned hosts will be unassigned from the role. - - If option is omitted hosts will not be checked or changed. - - If option is passed all assigned hosts that are not passed will be unassigned from the role. + - If an empty list is passed all assigned hosts are unassigned from the role. + - If option is omitted hosts are not checked nor changed. + - If option is passed all assigned hosts that are not passed are unassigned from the role. type: list elements: str hostgroup: description: - List of host group names to assign. - - If an empty list is passed all assigned host groups will be removed from the role. - - If option is omitted host groups will not be checked or changed. - - If option is passed all assigned hostgroups that are not passed will be unassigned from the role. + - If an empty list is passed all assigned host groups are removed from the role. + - If option is omitted host groups are not checked nor changed. + - If option is passed all assigned hostgroups that are not passed are unassigned from the role. type: list elements: str privilege: description: - List of privileges granted to the role. - - If an empty list is passed all assigned privileges will be removed. - - If option is omitted privileges will not be checked or changed. - - If option is passed all assigned privileges that are not passed will be removed. + - If an empty list is passed all assigned privileges are removed. + - If option is omitted privileges are not checked nor changed. + - If option is passed all assigned privileges that are not passed are removed. type: list elements: str service: description: - List of service names to assign. - - If an empty list is passed all assigned services will be removed from the role. - - If option is omitted services will not be checked or changed. - - If option is passed all assigned services that are not passed will be removed from the role. + - If an empty list is passed all assigned services are removed from the role. + - If option is omitted services are not checked nor changed. + - If option is passed all assigned services that are not passed are removed from the role. type: list elements: str state: @@ -78,8 +78,8 @@ options: user: description: - List of user names to assign. - - If an empty list is passed all assigned users will be removed from the role. - - If option is omitted users will not be checked or changed. + - If an empty list is passed all assigned users are removed from the role. + - If option is omitted users are not checked nor changed. type: list elements: str extends_documentation_fragment: diff --git a/plugins/modules/ipa_service.py b/plugins/modules/ipa_service.py index 54c5575950..51ace78760 100644 --- a/plugins/modules/ipa_service.py +++ b/plugins/modules/ipa_service.py @@ -197,10 +197,10 @@ def main(): argument_spec = ipa_argument_spec() argument_spec.update( krbcanonicalname=dict(type='str', required=True, aliases=['name']), - force=dict(type='bool', required=False), - skip_host_check=dict(type='bool', default=False, required=False), - hosts=dict(type='list', required=False, elements='str'), - state=dict(type='str', required=False, default='present', + force=dict(type='bool'), + skip_host_check=dict(type='bool', default=False), + hosts=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, diff --git a/plugins/modules/ipa_sudocmdgroup.py b/plugins/modules/ipa_sudocmdgroup.py index c7ab798f4c..4298032121 100644 --- a/plugins/modules/ipa_sudocmdgroup.py +++ b/plugins/modules/ipa_sudocmdgroup.py @@ -37,8 +37,8 @@ options: sudocmd: description: - List of sudo commands to assign to the group. - - If an empty list is passed all assigned commands will be removed from the group. - - If option is omitted sudo commands will not be checked or changed. + - If an empty list is passed all assigned commands are removed from the group. + - If option is omitted sudo commands are not checked nor changed. type: list elements: str extends_documentation_fragment: diff --git a/plugins/modules/ipa_sudorule.py b/plugins/modules/ipa_sudorule.py index 1670a52035..ae3730da62 100644 --- a/plugins/modules/ipa_sudorule.py +++ b/plugins/modules/ipa_sudorule.py @@ -34,31 +34,31 @@ options: cmd: description: - List of commands assigned to the rule. - - If an empty list is passed all commands will be removed from the rule. - - If option is omitted commands will not be checked or changed. + - If an empty list is passed all commands are removed from the rule. + - If option is omitted commands are not checked nor changed. type: list elements: str cmdgroup: description: - List of command groups assigned to the rule. - - If an empty list is passed all command groups will be removed from the rule. - - If option is omitted command groups will not be checked or changed. + - If an empty list is passed all command groups are removed from the rule. + - If option is omitted command groups are not checked nor changed. type: list elements: str version_added: 2.0.0 deny_cmd: description: - List of denied commands assigned to the rule. - - If an empty list is passed all commands will be removed from the rule. - - If option is omitted commands will not be checked or changed. + - If an empty list is passed all commands are removed from the rule. + - If option is omitted commands are not checked nor changed. type: list elements: str version_added: 8.1.0 deny_cmdgroup: description: - List of denied command groups assigned to the rule. - - If an empty list is passed all command groups will be removed from the rule. - - If option is omitted command groups will not be checked or changed. + - If an empty list is passed all command groups are removed from the rule. + - If option is omitted command groups are not checked nor changed. type: list elements: str version_added: 8.1.0 @@ -69,8 +69,8 @@ options: host: description: - List of hosts assigned to the rule. - - If an empty list is passed all hosts will be removed from the rule. - - If option is omitted hosts will not be checked or changed. + - If an empty list is passed all hosts are removed from the rule. + - If option is omitted hosts are not checked nor changed. - Option O(hostcategory) must be omitted to assign hosts. type: list elements: str @@ -84,8 +84,8 @@ options: hostgroup: description: - List of host groups assigned to the rule. - - If an empty list is passed all host groups will be removed from the rule. - - If option is omitted host groups will not be checked or changed. + - If an empty list is passed all host groups are removed from the rule. + - If option is omitted host groups are not checked nor changed. - Option O(hostcategory) must be omitted to assign host groups. type: list elements: str @@ -113,8 +113,8 @@ options: user: description: - List of users assigned to the rule. - - If an empty list is passed all users will be removed from the rule. - - If option is omitted users will not be checked or changed. + - If an empty list is passed all users are removed from the rule. + - If option is omitted users are not checked nor changed. type: list elements: str usercategory: @@ -125,8 +125,8 @@ options: usergroup: description: - List of user groups assigned to the rule. - - If an empty list is passed all user groups will be removed from the rule. - - If option is omitted user groups will not be checked or changed. + - If an empty list is passed all user groups are removed from the rule. + - If option is omitted user groups are not checked nor changed. type: list elements: str state: diff --git a/plugins/modules/ipa_user.py b/plugins/modules/ipa_user.py index 47d50972bd..6e61f89600 100644 --- a/plugins/modules/ipa_user.py +++ b/plugins/modules/ipa_user.py @@ -35,9 +35,9 @@ options: type: str krbpasswordexpiration: description: - - Date at which the user password will expire. + - Date at which the user password expires. - In the format YYYYMMddHHmmss. - - For example V(20180121182022) will expire on 21 January 2018 at 18:20:22. + - For example V(20180121182022) expires on 21 January 2018 at 18:20:22. type: str loginshell: description: Login shell. @@ -45,14 +45,14 @@ options: mail: description: - List of mail addresses assigned to the user. - - If an empty list is passed all assigned email addresses will be deleted. - - If None is passed email addresses will not be checked or changed. + - If an empty list is passed all assigned email addresses are deleted. + - If None is passed email addresses are not checked nor changed. type: list elements: str password: description: - Password for a user. - - Will not be set for an existing user unless O(update_password=always), which is the default. + - It is not set for an existing user unless O(update_password=always), which is the default. type: str sn: description: @@ -62,8 +62,8 @@ options: sshpubkey: description: - List of public SSH key. - - If an empty list is passed all assigned public keys will be deleted. - - If None is passed SSH public keys will not be checked or changed. + - If an empty list is passed all assigned public keys are deleted. + - If None is passed SSH public keys are not checked nor changed. type: list elements: str state: @@ -74,8 +74,8 @@ options: telephonenumber: description: - List of telephone numbers assigned to the user. - - If an empty list is passed all assigned telephone numbers will be deleted. - - If None is passed telephone numbers will not be checked or changed. + - If an empty list is passed all assigned telephone numbers are deleted. + - If None is passed telephone numbers are not checked nor changed. type: list elements: str title: diff --git a/plugins/modules/ipbase_info.py b/plugins/modules/ipbase_info.py index 3c7d3d26c1..7a2dde13d6 100644 --- a/plugins/modules/ipbase_info.py +++ b/plugins/modules/ipbase_info.py @@ -21,7 +21,7 @@ extends_documentation_fragment: options: ip: description: - - The IP you want to get the info for. If not specified the API will detect the IP automatically. + - The IP you want to get the info for. If not specified the API detects the IP automatically. required: false type: str apikey: @@ -31,7 +31,7 @@ options: type: str hostname: description: - - If the O(hostname) parameter is set to V(true), the API response will contain the hostname of the IP. + - If the O(hostname) parameter is set to V(true), the API response contains the hostname of the IP. required: false type: bool default: false @@ -71,147 +71,148 @@ data: of the response." returned: success type: dict - sample: { - "ip": "1.1.1.1", - "hostname": "one.one.one.one", - "type": "v4", - "range_type": { - "type": "PUBLIC", - "description": "Public address" - }, - "connection": { - "asn": 13335, - "organization": "Cloudflare, Inc.", - "isp": "APNIC Research and Development", - "range": "1.1.1.1/32" - }, - "location": { - "geonames_id": 5332870, - "latitude": 34.053611755371094, - "longitude": -118.24549865722656, - "zip": "90012", - "continent": { - "code": "NA", - "name": "North America", - "name_translated": "North America" + sample: + { + "ip": "1.1.1.1", + "hostname": "one.one.one.one", + "type": "v4", + "range_type": { + "type": "PUBLIC", + "description": "Public address" }, - "country": { - "alpha2": "US", - "alpha3": "USA", - "calling_codes": [ - "+1" - ], - "currencies": [ - { - "symbol": "$", - "name": "US Dollar", - "symbol_native": "$", - "decimal_digits": 2, - "rounding": 0, - "code": "USD", - "name_plural": "US dollars" - } - ], - "emoji": "...", - "ioc": "USA", - "languages": [ - { - "name": "English", - "name_native": "English" - } - ], - "name": "United States", - "name_translated": "United States", - "timezones": [ - "America/New_York", - "America/Detroit", - "America/Kentucky/Louisville", - "America/Kentucky/Monticello", - "America/Indiana/Indianapolis", - "America/Indiana/Vincennes", - "America/Indiana/Winamac", - "America/Indiana/Marengo", - "America/Indiana/Petersburg", - "America/Indiana/Vevay", - "America/Chicago", - "America/Indiana/Tell_City", - "America/Indiana/Knox", - "America/Menominee", - "America/North_Dakota/Center", - "America/North_Dakota/New_Salem", - "America/North_Dakota/Beulah", - "America/Denver", - "America/Boise", - "America/Phoenix", - "America/Los_Angeles", - "America/Anchorage", - "America/Juneau", - "America/Sitka", - "America/Metlakatla", - "America/Yakutat", - "America/Nome", - "America/Adak", - "Pacific/Honolulu" - ], - "is_in_european_union": false, - "fips": "US", - "geonames_id": 6252001, - "hasc_id": "US", - "wikidata_id": "Q30" + "connection": { + "asn": 13335, + "organization": "Cloudflare, Inc.", + "isp": "APNIC Research and Development", + "range": "1.1.1.1/32" }, - "city": { - "fips": "644000", - "alpha2": null, - "geonames_id": 5368753, - "hasc_id": null, - "wikidata_id": "Q65", - "name": "Los Angeles", - "name_translated": "Los Angeles" + "location": { + "geonames_id": 5332870, + "latitude": 34.053611755371094, + "longitude": -118.24549865722656, + "zip": "90012", + "continent": { + "code": "NA", + "name": "North America", + "name_translated": "North America" + }, + "country": { + "alpha2": "US", + "alpha3": "USA", + "calling_codes": [ + "+1" + ], + "currencies": [ + { + "symbol": "$", + "name": "US Dollar", + "symbol_native": "$", + "decimal_digits": 2, + "rounding": 0, + "code": "USD", + "name_plural": "US dollars" + } + ], + "emoji": "...", + "ioc": "USA", + "languages": [ + { + "name": "English", + "name_native": "English" + } + ], + "name": "United States", + "name_translated": "United States", + "timezones": [ + "America/New_York", + "America/Detroit", + "America/Kentucky/Louisville", + "America/Kentucky/Monticello", + "America/Indiana/Indianapolis", + "America/Indiana/Vincennes", + "America/Indiana/Winamac", + "America/Indiana/Marengo", + "America/Indiana/Petersburg", + "America/Indiana/Vevay", + "America/Chicago", + "America/Indiana/Tell_City", + "America/Indiana/Knox", + "America/Menominee", + "America/North_Dakota/Center", + "America/North_Dakota/New_Salem", + "America/North_Dakota/Beulah", + "America/Denver", + "America/Boise", + "America/Phoenix", + "America/Los_Angeles", + "America/Anchorage", + "America/Juneau", + "America/Sitka", + "America/Metlakatla", + "America/Yakutat", + "America/Nome", + "America/Adak", + "Pacific/Honolulu" + ], + "is_in_european_union": false, + "fips": "US", + "geonames_id": 6252001, + "hasc_id": "US", + "wikidata_id": "Q30" + }, + "city": { + "fips": "644000", + "alpha2": null, + "geonames_id": 5368753, + "hasc_id": null, + "wikidata_id": "Q65", + "name": "Los Angeles", + "name_translated": "Los Angeles" + }, + "region": { + "fips": "US06", + "alpha2": "US-CA", + "geonames_id": 5332921, + "hasc_id": "US.CA", + "wikidata_id": "Q99", + "name": "California", + "name_translated": "California" + } }, - "region": { - "fips": "US06", - "alpha2": "US-CA", - "geonames_id": 5332921, - "hasc_id": "US.CA", - "wikidata_id": "Q99", - "name": "California", - "name_translated": "California" + "tlds": [ + ".us" + ], + "timezone": { + "id": "America/Los_Angeles", + "current_time": "2023-05-04T04:30:28-07:00", + "code": "PDT", + "is_daylight_saving": true, + "gmt_offset": -25200 + }, + "security": { + "is_anonymous": false, + "is_datacenter": false, + "is_vpn": false, + "is_bot": false, + "is_abuser": true, + "is_known_attacker": true, + "is_proxy": false, + "is_spam": false, + "is_tor": false, + "is_icloud_relay": false, + "threat_score": 100 + }, + "domains": { + "count": 10943, + "domains": [ + "eliwise.academy", + "accountingprose.academy", + "pistola.academy", + "1and1-test-ntlds-fr.accountant", + "omnergy.africa" + ] } - }, - "tlds": [ - ".us" - ], - "timezone": { - "id": "America/Los_Angeles", - "current_time": "2023-05-04T04:30:28-07:00", - "code": "PDT", - "is_daylight_saving": true, - "gmt_offset": -25200 - }, - "security": { - "is_anonymous": false, - "is_datacenter": false, - "is_vpn": false, - "is_bot": false, - "is_abuser": true, - "is_known_attacker": true, - "is_proxy": false, - "is_spam": false, - "is_tor": false, - "is_icloud_relay": false, - "threat_score": 100 - }, - "domains": { - "count": 10943, - "domains": [ - "eliwise.academy", - "accountingprose.academy", - "pistola.academy", - "1and1-test-ntlds-fr.accountant", - "omnergy.africa" - ] } - } """ from ansible.module_utils.basic import AnsibleModule @@ -284,10 +285,10 @@ class IpbaseInfo(object): def main(): module_args = dict( - ip=dict(type='str', required=False, no_log=False), - apikey=dict(type='str', required=False, no_log=True), - hostname=dict(type='bool', required=False, no_log=False, default=False), - language=dict(type='str', required=False, no_log=False, default='en'), + ip=dict(type='str', no_log=False), + apikey=dict(type='str', no_log=True), + hostname=dict(type='bool', no_log=False, default=False), + language=dict(type='str', no_log=False, default='en'), ) module = AnsibleModule( diff --git a/plugins/modules/ipify_facts.py b/plugins/modules/ipify_facts.py index 7767c8d0ff..b7cd2b7447 100644 --- a/plugins/modules/ipify_facts.py +++ b/plugins/modules/ipify_facts.py @@ -24,7 +24,7 @@ options: api_url: description: - URL of the ipify.org API service. - - C(?format=json) will be appended per default. + - C(?format=json) is appended by default. type: str default: https://api.ipify.org/ timeout: @@ -34,7 +34,7 @@ options: default: 10 validate_certs: description: - - When set to V(false), SSL certificates will not be validated. + - When set to V(false), SSL certificates are not validated. type: bool default: true notes: diff --git a/plugins/modules/ipmi_boot.py b/plugins/modules/ipmi_boot.py index 48879967d7..69131732c6 100644 --- a/plugins/modules/ipmi_boot.py +++ b/plugins/modules/ipmi_boot.py @@ -50,15 +50,15 @@ options: version_added: 4.1.0 bootdev: description: - - Set boot device to use on next reboot - - "The choices for the device are:" - - V(network) -- Request network boot - - V(floppy) -- Boot from floppy - - V(hd) -- Boot from hard drive - - "V(safe) -- Boot from hard drive, requesting 'safe mode'" - - V(optical) -- boot from CD/DVD/BD drive - - V(setup) -- Boot into setup utility - - V(default) -- remove any IPMI directed boot device request + - Set boot device to use on next reboot. + - 'The choices for the device are:' + - V(network) -- Request network boot. + - V(floppy) -- Boot from floppy. + - V(hd) -- Boot from hard drive. + - V(safe) -- Boot from hard drive, requesting 'safe mode'. + - V(optical) -- boot from CD/DVD/BD drive. + - V(setup) -- Boot into setup utility. + - V(default) -- remove any IPMI directed boot device request. required: true choices: - network @@ -95,17 +95,17 @@ author: "Bulat Gaifullin (@bgaifullin) " RETURN = r""" bootdev: - description: The boot device name which will be used beyond next boot. + description: The boot device name which is used beyond next boot. returned: success type: str sample: default persistent: - description: If True, system firmware will use this device beyond next boot. + description: If V(true), system firmware uses this device beyond next boot. returned: success type: bool sample: false uefimode: - description: If True, system firmware will use UEFI boot explicitly beyond next boot. + description: If V(true), system firmware uses UEFI boot explicitly beyond next boot. returned: success type: bool sample: false diff --git a/plugins/modules/ipmi_power.py b/plugins/modules/ipmi_power.py index 35789b15e6..292ecc73aa 100644 --- a/plugins/modules/ipmi_power.py +++ b/plugins/modules/ipmi_power.py @@ -51,12 +51,12 @@ options: state: description: - Whether to ensure that the machine in desired state. - - "The choices for state are:" - - V(on) -- Request system turn on - - V(off) -- Request system turn off without waiting for OS to shutdown - - V(shutdown) -- Have system request OS proper shutdown - - V(reset) -- Request system reset without waiting for OS - - "V(boot) -- If system is off, then V(on), else V(reset)" + - 'The choices for state are:' + - V(on) -- Request system turn on. + - V(off) -- Request system turn off without waiting for OS to shutdown. + - V(shutdown) -- Have system request OS proper shutdown. + - V(reset) -- Request system reset without waiting for OS. + - V(boot) -- If system is off, then V(on), else V(reset). - Either this option or O(machine) is required. choices: ['on', 'off', shutdown, reset, boot] type: str @@ -111,7 +111,17 @@ status: targetAddress: description: The remote target address. type: int - sample: [{"powerstate": "on", "targetAddress": 48}, {"powerstate": "on", "targetAddress": 50}] + sample: + [ + { + "powerstate": "on", + "targetAddress": 48 + }, + { + "powerstate": "on", + "targetAddress": 50 + } + ] """ EXAMPLES = r""" diff --git a/plugins/modules/iptables_state.py b/plugins/modules/iptables_state.py index 6f3fa19042..21fe75ce02 100644 --- a/plugins/modules/iptables_state.py +++ b/plugins/modules/iptables_state.py @@ -27,8 +27,8 @@ description: notes: - The rollback feature is not a module option and depends on task's attributes. To enable it, the module must be played asynchronously, in other words by setting task attributes C(poll) to V(0), and C(async) to a value less or equal to C(ANSIBLE_TIMEOUT). - If C(async) is greater, the rollback will still happen if it shall happen, but you will experience a connection timeout - instead of more relevant info returned by the module after its failure. + If C(async) is greater, the rollback still happens when needed, but you experience a connection timeout instead of more + relevant info returned by the module after its failure. attributes: check_mode: support: full @@ -147,7 +147,8 @@ initial_state: type: list elements: str returned: always - sample: [ + sample: + [ "# Generated by xtables-save v1.8.2", "*filter", ":INPUT ACCEPT [0:0]", @@ -161,7 +162,8 @@ restored: type: list elements: str returned: always - sample: [ + sample: + [ "# Generated by xtables-save v1.8.2", "*filter", ":INPUT DROP [0:0]", @@ -180,7 +182,8 @@ saved: type: list elements: str returned: always - sample: [ + sample: + [ "# Generated by xtables-save v1.8.2", "*filter", ":INPUT ACCEPT [0:0]", diff --git a/plugins/modules/ipwcli_dns.py b/plugins/modules/ipwcli_dns.py index 118f59e8d9..604eb82b5f 100644 --- a/plugins/modules/ipwcli_dns.py +++ b/plugins/modules/ipwcli_dns.py @@ -16,7 +16,7 @@ short_description: Manage DNS Records for Ericsson IPWorks using C(ipwcli) version_added: '0.2.0' description: - - Manage DNS records for the Ericsson IPWorks DNS server. The module will use the C(ipwcli) to deploy the DNS records. + - Manage DNS records for the Ericsson IPWorks DNS server. The module uses the C(ipwcli) to deploy the DNS records. requirements: - ipwcli (installed on Ericsson IPWorks) @@ -271,18 +271,18 @@ def run_module(): dnsname=dict(type='str', required=True), type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']), container=dict(type='str', required=True), - address=dict(type='str', required=False), - ttl=dict(type='int', required=False, default=3600), + address=dict(type='str'), + ttl=dict(type='int', default=3600), state=dict(type='str', default='present', choices=['absent', 'present']), - priority=dict(type='int', required=False, default=10), - weight=dict(type='int', required=False, default=10), - port=dict(type='int', required=False), - target=dict(type='str', required=False), - order=dict(type='int', required=False), - preference=dict(type='int', required=False), - flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']), - service=dict(type='str', required=False), - replacement=dict(type='str', required=False), + priority=dict(type='int', default=10), + weight=dict(type='int', default=10), + port=dict(type='int'), + target=dict(type='str'), + order=dict(type='int'), + preference=dict(type='int'), + flags=dict(type='str', choices=['S', 'A', 'U', 'P']), + service=dict(type='str'), + replacement=dict(type='str'), username=dict(type='str', required=True), password=dict(type='str', required=True, no_log=True) ) diff --git a/plugins/modules/irc.py b/plugins/modules/irc.py index cbeb3fafa0..d18c9fd85f 100644 --- a/plugins/modules/irc.py +++ b/plugins/modules/irc.py @@ -51,19 +51,35 @@ options: description: - Text color for the message. default: "none" - choices: ["none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", - "light_cyan", "light_blue", "pink", "gray", "light_gray"] + choices: + - none + - white + - black + - blue + - green + - red + - brown + - purple + - orange + - yellow + - light_green + - teal + - light_cyan + - light_blue + - pink + - gray + - light_gray aliases: [colour] channel: type: str description: - - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them. + - Channel name. One of nick_to or channel needs to be set. When both are set, the message is sent to both of them. nick_to: type: list elements: str description: - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the - message will be sent to both of them. + message is sent to both of them. key: type: str description: @@ -102,7 +118,7 @@ options: default: none validate_certs: description: - - If set to V(false), the SSL certificates will not be validated. + - If set to V(false), the SSL certificates are not validated. - This should always be set to V(true). Using V(false) is unsafe and should only be done if the network between between Ansible and the IRC server is known to be safe. - B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible. @@ -124,7 +140,7 @@ EXAMPLES = r""" server: irc.example.net use_tls: true validate_certs: true - channel: #t1 + channel: '#t1' msg: Hello world - name: Send a message to an IRC channel @@ -134,7 +150,7 @@ EXAMPLES = r""" server: irc.example.net use_tls: true validate_certs: true - channel: #t1 + channel: '#t1' msg: 'All finished at {{ ansible_date_time.iso8601 }}' color: red nick: ansibleIRC @@ -146,7 +162,7 @@ EXAMPLES = r""" server: irc.example.net use_tls: true validate_certs: true - channel: #t1 + channel: '#t1' nick_to: - nick1 - nick2 @@ -216,9 +232,11 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if use_tls: + kwargs = {} if validate_certs: try: context = ssl.create_default_context() + kwargs["server_hostname"] = server except AttributeError: raise Exception('Need at least Python 2.7.9 for SSL certificate validation') else: @@ -228,7 +246,7 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k else: context = ssl.SSLContext() context.verify_mode = ssl.CERT_NONE - irc = context.wrap_socket(irc) + irc = context.wrap_socket(irc, **kwargs) irc.connect((server, int(port))) if passwd: @@ -293,7 +311,7 @@ def main(): server=dict(default='localhost'), port=dict(type='int', default=6667), nick=dict(default='ansible'), - nick_to=dict(required=False, type='list', elements='str'), + nick_to=dict(type='list', elements='str'), msg=dict(required=True), color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue", "green", "red", "brown", @@ -302,7 +320,7 @@ def main(): "light_blue", "pink", "gray", "light_gray", "none"]), style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]), - channel=dict(required=False), + channel=dict(), key=dict(no_log=True), topic=dict(), passwd=dict(no_log=True), diff --git a/plugins/modules/iso_create.py b/plugins/modules/iso_create.py index 008cb271bb..70f76558e6 100644 --- a/plugins/modules/iso_create.py +++ b/plugins/modules/iso_create.py @@ -32,8 +32,8 @@ attributes: options: src_files: description: - - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file. - - Will fail if specified file or folder in O(src_files) does not exist on local machine. + - This is a list of absolute paths of source files or folders to be contained in the new generated ISO file. + - The module fails if specified file or folder in O(src_files) does not exist on local machine. - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path names are limited to 255 characters.' @@ -43,7 +43,7 @@ options: dest_iso: description: - The absolute path with file name of the new generated ISO file on local machine. - - Will create intermediate folders when they does not exist. + - It creates intermediate folders when they do not exist. type: path required: true interchange_level: diff --git a/plugins/modules/iso_customize.py b/plugins/modules/iso_customize.py index feac8417b8..5ee5b22c2c 100644 --- a/plugins/modules/iso_customize.py +++ b/plugins/modules/iso_customize.py @@ -14,7 +14,7 @@ module: iso_customize short_description: Add/remove/change files in ISO file description: - This module is used to add/remove/change files in ISO file. - - The file inside ISO will be overwritten if it exists by option O(add_files). + - The file inside ISO is overwritten if it exists by option O(add_files). author: - Yuhua Zou (@ZouYuhua) requirements: @@ -51,7 +51,7 @@ options: add_files: description: - Allows to add and replace files in the ISO file. - - Will create intermediate folders inside the ISO file when they do not exist. + - It creates intermediate folders inside the ISO file when they do not exist. type: list required: false elements: dict @@ -69,9 +69,9 @@ options: required: true notes: - The C(pycdlib) library states it supports Python 2.7 and 3.4+. - - The function C(add_file) in pycdlib will overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 / Joliet - / UDF. But it will not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10. So we take workaround "delete the - existing file and then add file for ISO with Rock Ridge". + - The function C(add_file) in pycdlib is designed to overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 + / Joliet / UDF. But it does not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10. So we take workaround + "delete the existing file and then add file for ISO with Rock Ridge". """ EXAMPLES = r""" diff --git a/plugins/modules/iso_extract.py b/plugins/modules/iso_extract.py index 8cda967b64..88644a6eb6 100644 --- a/plugins/modules/iso_extract.py +++ b/plugins/modules/iso_extract.py @@ -55,19 +55,19 @@ options: required: true force: description: - - If V(true), which will replace the remote file when contents are different than the source. - - If V(false), the file will only be extracted and copied if the destination does not already exist. + - If V(true), it replaces the remote file when contents are different than the source. + - If V(false), the file is only extracted and copied if the destination does not already exist. type: bool default: true executable: description: - The path to the C(7z) executable to use for extracting files from the ISO. - - If not provided, it will assume the value V(7z). + - If not provided, it assumes the value V(7z). type: path password: description: - Password used to decrypt files from the ISO. - - Will only be used if 7z is used. + - It is only used if C(7z) is used. - The password is used as a command line argument to 7z. This is a B(potential security risk) that allows passwords to be revealed if someone else can list running processes on the same machine in the right moment. type: str diff --git a/plugins/modules/jabber.py b/plugins/modules/jabber.py index 01a34ff9f5..ab73672410 100644 --- a/plugins/modules/jabber.py +++ b/plugins/modules/jabber.py @@ -108,9 +108,9 @@ def main(): password=dict(required=True, no_log=True), to=dict(required=True), msg=dict(required=True), - host=dict(required=False), - port=dict(required=False, default=5222, type='int'), - encoding=dict(required=False), + host=dict(), + port=dict(default=5222, type='int'), + encoding=dict(), ), supports_check_mode=True ) diff --git a/plugins/modules/java_cert.py b/plugins/modules/java_cert.py index 8746c2d617..13cfea9324 100644 --- a/plugins/modules/java_cert.py +++ b/plugins/modules/java_cert.py @@ -32,7 +32,7 @@ options: cert_port: description: - Port to connect to URL. - - This will be used to create server URL:PORT. + - This is used to create server URL:PORT. type: int default: 443 cert_path: @@ -98,8 +98,8 @@ options: state: description: - Defines action which can be either certificate import or removal. - - When state is present, the certificate will always idempotently be inserted into the keystore, even if there already - exists a cert alias that is different. + - When O(state=present), the certificate is always inserted into the keystore, even if there already exists a cert alias + that is different. type: str choices: [absent, present] default: present @@ -197,18 +197,6 @@ EXAMPLES = r""" """ RETURN = r""" -msg: - description: Output from stdout of keytool command after execution of given command. - returned: success - type: str - sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'" - -rc: - description: Keytool command execution return value. - returned: success - type: int - sample: "0" - cmd: description: Executed command to get action done. returned: success @@ -315,12 +303,13 @@ def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, passwo "-noprompt", "-keystore", pkcs_file, - "-alias", - alias, "-storetype", "pkcs12", "-rfc" ] + # Append optional alias + if alias: + export_cmd.extend(["-alias", alias]) (export_rc, export_stdout, export_err) = module.run_command(export_cmd, data=password, check_rc=False) if export_rc != 0: @@ -393,6 +382,10 @@ def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alia keystore_path, keystore_pass, keystore_alias, keystore_type): ''' Import pkcs12 from path into keystore located on keystore_path as alias ''' + optional_aliases = { + "-destalias": keystore_alias, + "-srcalias": pkcs12_alias + } import_cmd = [ executable, "-importkeystore", @@ -401,13 +394,14 @@ def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alia "pkcs12", "-srckeystore", pkcs12_path, - "-srcalias", - pkcs12_alias, "-destkeystore", keystore_path, - "-destalias", - keystore_alias ] + # Append optional aliases + for flag, value in optional_aliases.items(): + if value: + import_cmd.extend([flag, value]) + import_cmd += _get_keystore_type_keytool_parameters(keystore_type) secret_data = "%s\n%s" % (keystore_pass, pkcs12_pass) diff --git a/plugins/modules/java_keystore.py b/plugins/modules/java_keystore.py index df7e71abbe..c826c9af4c 100644 --- a/plugins/modules/java_keystore.py +++ b/plugins/modules/java_keystore.py @@ -24,8 +24,8 @@ options: name: description: - Name of the certificate in the keystore. - - If the provided name does not exist in the keystore, the module will re-create the keystore. This behavior changed - in community.general 3.0.0, before that the module would fail when the name did not match. + - If the provided name does not exist in the keystore, the module re-creates the keystore. This behavior changed in + community.general 3.0.0, before that the module would fail when the name did not match. type: str required: true certificate: @@ -62,7 +62,7 @@ options: password: description: - Password that should be used to secure the keystore. - - If the provided password fails to unlock the keystore, the module will re-create the keystore with the new passphrase. + - If the provided password fails to unlock the keystore, the module re-creates the keystore with the new passphrase. This behavior changed in community.general 3.0.0, before that the module would fail when the password did not match. type: str required: true @@ -130,7 +130,7 @@ notes: or with the P(ansible.builtin.file#lookup) lookup), while O(certificate_path) and O(private_key_path) require that the files are available on the target host. - By design, any change of a value of options O(keystore_type), O(name) or O(password), as well as changes of key or certificate - materials will cause the existing O(dest) to be overwritten. + materials causes the existing O(dest) to be overwritten. """ EXAMPLES = r""" @@ -166,24 +166,12 @@ EXAMPLES = r""" """ RETURN = r""" -msg: - description: Output from stdout of keytool/openssl command after execution of given command or an error. - returned: changed and failure - type: str - sample: "Unable to find the current certificate fingerprint in ..." - err: description: Output from stderr of keytool/openssl command after error of given command. returned: failure type: str sample: "Keystore password is too short - must be at least 6 characters\n" -rc: - description: Keytool/openssl command execution return value. - returned: changed and failure - type: int - sample: "0" - cmd: description: Executed command to get action done. returned: changed and failure diff --git a/plugins/modules/jenkins_build.py b/plugins/modules/jenkins_build.py index a909eab690..4e11dd3642 100644 --- a/plugins/modules/jenkins_build.py +++ b/plugins/modules/jenkins_build.py @@ -99,6 +99,16 @@ EXAMPLES = r""" state: stopped url: http://localhost:8080 +- name: Trigger Jenkins build in detached mode + community.general.jenkins_build: + name: "detached-build" + state: present + user: admin + token: abcdefghijklmnopqrstuvwxyz123456 + url: http://localhost:8080 + detach: true + time_between_checks: 20 + - name: Delete a jenkins build using token authentication community.general.jenkins_build: name: "delete-experiment" @@ -126,7 +136,7 @@ user: type: str sample: admin url: - description: Url to connect to the Jenkins server. + description: URL to connect to the Jenkins server. returned: success type: str sample: https://jenkins.mydomain.com @@ -180,11 +190,11 @@ class JenkinsBuild: def get_jenkins_connection(self): try: - if (self.user and self.password): + if self.user and self.password: return jenkins.Jenkins(self.jenkins_url, self.user, self.password) - elif (self.user and self.token): + elif self.user and self.token: return jenkins.Jenkins(self.jenkins_url, self.user, self.token) - elif (self.user and not (self.password or self.token)): + elif self.user and not (self.password or self.token): return jenkins.Jenkins(self.jenkins_url, self.user) else: return jenkins.Jenkins(self.jenkins_url) diff --git a/plugins/modules/jenkins_build_info.py b/plugins/modules/jenkins_build_info.py index f252eb504a..85cb22ad2c 100644 --- a/plugins/modules/jenkins_build_info.py +++ b/plugins/modules/jenkins_build_info.py @@ -30,7 +30,7 @@ options: build_number: description: - An integer which specifies a build of a job. - - If not specified the last build information will be returned. + - If not specified the last build information is returned. type: int password: description: @@ -138,11 +138,11 @@ class JenkinsBuildInfo: def get_jenkins_connection(self): try: - if (self.user and self.password): + if self.user and self.password: return jenkins.Jenkins(self.jenkins_url, self.user, self.password) - elif (self.user and self.token): + elif self.user and self.token: return jenkins.Jenkins(self.jenkins_url, self.user, self.token) - elif (self.user and not (self.password or self.token)): + elif self.user and not (self.password or self.token): return jenkins.Jenkins(self.jenkins_url, self.user) else: return jenkins.Jenkins(self.jenkins_url) diff --git a/plugins/modules/jenkins_credential.py b/plugins/modules/jenkins_credential.py new file mode 100644 index 0000000000..3bd8a9dd7a --- /dev/null +++ b/plugins/modules/jenkins_credential.py @@ -0,0 +1,863 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: jenkins_credential +short_description: Manage Jenkins credentials and domains through API +version_added: 11.1.0 +description: + - This module allows managing Jenkins credentials and domain scopes through the Jenkins HTTP API. + - Create, update, and delete different credential types such as C(username/password), C(secret text), C(SSH key), C(certificates), + C(GitHub App), and domains. + - For scoped domains (O(type=scope)), it supports restrictions based on V(hostname), V(hostname:port), V(path), and V(scheme). +requirements: + - urllib3 >= 1.26.0 +author: + - Youssef Ali (@YoussefKhalidAli) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + id: + description: + - The ID of the Jenkins credential or domain. + type: str + type: + description: + - Type of the credential or action. + choices: + - user_and_pass + - file + - text + - github_app + - ssh_key + - certificate + - scope + - token + type: str + state: + description: + - The state of the credential. + choices: + - present + - absent + default: present + type: str + scope: + description: + - Jenkins credential domain scope. + - Deleting a domain scope deletes all credentials within it. + type: str + default: '_' + force: + description: + - Force update if the credential already exists, used with O(state=present). + - If set to V(true), it deletes the existing credential before creating a new one. + - Always returns RV(ignore:changed=true). + type: bool + default: false + url: + description: + - Jenkins server URL. + type: str + default: http://localhost:8080 + jenkins_user: + description: + - Jenkins user for authentication. + required: true + type: str + jenkins_password: + description: + - Jenkins password for token creation. Required if O(type=token). + type: str + token: + description: + - Jenkins API token. Required unless O(type=token). + type: str + description: + description: + - Description of the credential or domain. + default: '' + type: str + location: + description: + - Location of the credential. Either V(system) or V(folder). + - If O(location=folder) then O(url) must be set to V(/job/). + choices: + - system + - folder + default: 'system' + type: str + name: + description: + - Name of the token to generate. Required if O(type=token). + - When generating a new token, do not pass O(id). It is generated automatically. + - Creating two tokens with the same name generates two distinct tokens with different RV(token_uuid) values. + - Replacing a token with another one of the same name requires deleting the original first using O(force=True). + type: str + username: + description: + - Username for credentials types that require it (for example O(type=ssh_key) or O(type=user_and_pass)). + type: str + password: + description: + - Password for credentials types that require it (for example O(type=user_and_passs) or O(type=certificate)). + type: str + secret: + description: + - Secret text (used when O(type=text)). + type: str + appID: + description: + - GitHub App ID. + type: str + api_uri: + description: + - Link to Github API. + default: 'https://api.github.com' + type: str + owner: + description: + - GitHub App owner. + type: str + file_path: + description: + - File path to secret file (for example O(type=file) or O(type=certificate)). + - For O(type=certificate), this can be a V(.p12) or V(.pem) file. + type: path + private_key_path: + description: + - Path to private key file for PEM certificates or GitHub Apps. + type: path + passphrase: + description: + - SSH passphrase if needed. + type: str + inc_hostname: + description: + - List of hostnames to include in scope. + type: list + elements: str + exc_hostname: + description: + - List of hostnames to exclude from scope. + - If a hostname appears in both this list and O(inc_hostname), the hostname is excluded. + type: list + elements: str + inc_hostname_port: + description: + - List of V(host:port) to include in scope. + type: list + elements: str + exc_hostname_port: + description: + - List of host:port to exclude from scope. + - If a hostname and port appears in both this list and O(inc_hostname_port), it is excluded. + type: list + elements: str + inc_path: + description: + - List of URL paths to include when matching credentials to domains. + - 'B(Matching is hierarchical): subpaths of excluded paths are also excluded, even if explicitly included.' + type: list + elements: str + exc_path: + description: + - List of URL paths to exclude. + - If a path is also matched by O(exc_path), it is excluded. + - If you exclude a subpath of a path previously included, that subpath alone is excluded. + type: list + elements: str + schemes: + description: + - List of schemes (for example V(http) or V(https)) to match. + type: list + elements: str +""" + +EXAMPLES = r""" +- name: Generate token + community.general.jenkins_credential: + id: "test-token" + jenkins_user: "admin" + jenkins_password: "password" + type: "token" + register: token_result + +- name: Add CUSTOM scope credential + community.general.jenkins_credential: + id: "CUSTOM" + type: "scope" + jenkins_user: "admin" + token: "{{ token }}" + description: "Custom scope credential" + inc_path: + - "include/path" + - "include/path2" + exc_path: + - "exclude/path" + - "exclude/path2" + inc_hostname: + - "included-hostname" + - "included-hostname2" + exc_hostname: + - "excluded-hostname" + - "excluded-hostname2" + schemes: + - "http" + - "https" + inc_hostname_port: + - "included-hostname:7000" + - "included-hostname2:7000" + exc_hostname_port: + - "excluded-hostname:7000" + - "excluded-hostname2:7000" + +- name: Add user_and_pass credential + community.general.jenkins_credential: + id: "userpass-id" + type: "user_and_pass" + jenkins_user: "admin" + token: "{{ token }}" + description: "User and password credential" + username: "user1" + password: "pass1" + +- name: Add file credential to custom scope + community.general.jenkins_credential: + id: "file-id" + type: "file" + jenkins_user: "admin" + token: "{{ token }}" + scope: "CUSTOM" + description: "File credential" + file_path: "../vars/my-secret.pem" + +- name: Add text credential to folder + community.general.jenkins_credential: + id: "text-id" + type: "text" + jenkins_user: "admin" + token: "{{ token }}" + description: "Text credential" + secret: "mysecrettext" + location: "folder" + url: "http://localhost:8080/job/test" + +- name: Add githubApp credential + community.general.jenkins_credential: + id: "githubapp-id" + type: "github_app" + jenkins_user: "admin" + token: "{{ token }}" + description: "GitHub app credential" + appID: "12345" + file_path: "../vars/github.pem" + owner: "github_owner" + +- name: Add sshKey credential + community.general.jenkins_credential: + id: "sshkey-id" + type: "ssh_key" + jenkins_user: "admin" + token: "{{ token }}" + description: "SSH key credential" + username: "sshuser" + file_path: "../vars/ssh_key" + passphrase: 1234 + +- name: Add certificate credential (p12) + community.general.jenkins_credential: + id: "certificate-id" + type: "certificate" + jenkins_user: "admin" + token: "{{ token }}" + description: "Certificate credential" + password: "12345678901234" + file_path: "../vars/certificate.p12" + +- name: Add certificate credential (pem) + community.general.jenkins_credential: + id: "certificate-id-pem" + type: "certificate" + jenkins_user: "admin" + token: "{{ token }}" + description: "Certificate credential (pem)" + file_path: "../vars/cert.pem" + private_key_path: "../vars/private.key" +""" +RETURN = r""" +details: + description: Return more details in case of errors. + type: str + returned: failed +token: + description: + - The generated API token if O(type=token). + - This is needed to authenticate API calls later. + - This should be stored securely, as it is the only time it is returned. + type: str + returned: success +token_uuid: + description: + - The generated ID of the token. + - You pass this value back to the module as O(id) to edit or revoke the token later. + - This should be stored securely, as it is the only time it is returned. + type: str + returned: success +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url, basic_auth_header +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible_collections.community.general.plugins.module_utils import deps + +import json +import os +import base64 + +with deps.declare("urllib3", reason="urllib3 is required to embed files into requests"): + import urllib3 + + +# Function to validate file paths exist on disk +def validate_file_exist(module, path): + + if path and not os.path.exists(path): + module.fail_json(msg="File not found: {}".format(path)) + + +# Gets the Jenkins crumb for CSRF protection which is required for API calls +def get_jenkins_crumb(module, headers): + type = module.params["type"] + url = module.params["url"] + + if "/job" in url: + url = url.split("/job")[0] + + crumb_url = "{}/crumbIssuer/api/json".format(url) + + response, info = fetch_url(module, crumb_url, headers=headers) + + if info["status"] != 200: + module.fail_json(msg="Failed to fetch Jenkins crumb. Confirm token is real.") + + # Cookie is needed to generate API token + cookie = info.get("set-cookie", "") + session_cookie = cookie.split(";")[0] if cookie else None + + try: + data = response.read() + json_data = json.loads(data) + crumb_request_field = json_data["crumbRequestField"] + crumb = json_data["crumb"] + headers[crumb_request_field] = crumb # Set the crumb in headers + headers["Content-Type"] = ( + "application/x-www-form-urlencoded" # Set Content-Type for form data + ) + if type == "token": + headers["Cookie"] = ( + session_cookie # Set session cookie for token operations + ) + return crumb_request_field, crumb, session_cookie # Return for test purposes + + except Exception: + return None + + +# Function to clean the data sent via API by removing unwanted keys and None values +def clean_data(data): + # Keys to remove (including those with None values) + keys_to_remove = { + "url", + "token", + "jenkins_user", + "jenkins_password", + "file_path", + "private_key_path", + "type", + "state", + "force", + "name", + "scope", + "location", + "api_uri", + } + + # Filter out None values and unwanted keys + cleaned_data = { + key: value + for key, value in data.items() + if value is not None and key not in keys_to_remove + } + + return cleaned_data + + +# Function to check if credentials/domain exists +def target_exists(module, check_domain=False): + url = module.params["url"] + location = module.params["location"] + scope = module.params["scope"] + name = module.params["id"] + user = module.params["jenkins_user"] + token = module.params["token"] + + headers = {"Authorization": basic_auth_header(user, token)} + + if module.params["type"] == "scope" or check_domain: + target_url = "{}/credentials/store/{}/domain/{}/api/json".format( + url, location, scope if check_domain else name + ) + elif module.params["type"] == "token": + return False # Can't check token + else: + target_url = "{}/credentials/store/{}/domain/{}/credential/{}/api/json".format( + url, location, scope, name + ) + + response, info = fetch_url(module, target_url, headers=headers) + status = info.get("status", 0) + + if status == 200: + return True + elif status == 404: + return False + else: + module.fail_json( + msg="Unexpected status code {} when checking {} existence.".format( + status, name + ) + ) + + +# Function to delete the scope or credential provided +def delete_target(module, headers): + user = module.params["jenkins_user"] + type = module.params["type"] + url = module.params["url"] + location = module.params["location"] + id = module.params["id"] + scope = module.params["scope"] + + body = False + + try: + + if type == "token": + delete_url = "{}/user/{}/descriptorByName/jenkins.security.ApiTokenProperty/revoke".format( + url, user + ) + body = urlencode({"tokenUuid": id}) + + elif type == "scope": + delete_url = "{}/credentials/store/{}/domain/{}/doDelete".format( + url, location, id + ) + + else: + delete_url = ( + "{}/credentials/store/{}/domain/{}/credential/{}/doDelete".format( + url, location, scope, id + ) + ) + + response, info = fetch_url( + module, + delete_url, + headers=headers, + data=body if body else None, + method="POST", + ) + + status = info.get("status", 0) + if not status == 200: + module.fail_json( + msg="Failed to delete: HTTP {}, {}, {}".format( + status, response, headers + ) + ) + + except Exception as e: + module.fail_json(msg="Exception during delete: {}".format(str(e))) + + +# Function to read the private key for types texts and ssh_key +def read_privateKey(module): + try: + with open(module.params["private_key_path"], "r") as f: + private_key = f.read().strip() + return private_key + except Exception as e: + module.fail_json(msg="Failed to read private key file: {}".format(str(e))) + + +# Function to builds multipart form-data body and content-type header for file credential upload. +# Returns: +# body (bytes): Encoded multipart data +# content_type (str): Content-Type header including boundary +def embed_file_into_body(module, file_path, credentials): + + filename = os.path.basename(file_path) + + try: + with open(file_path, "rb") as f: + file_bytes = f.read() + except Exception as e: + module.fail_json(msg="Failed to read file: {}".format(str(e))) + return "", "" # Return for test purposes + + credentials.update( + { + "file": "file0", + "fileName": filename, + } + ) + + payload = {"credentials": credentials} + + fields = {"file0": (filename, file_bytes), "json": json.dumps(payload)} + + body, content_type = urllib3.encode_multipart_formdata(fields) + return body, content_type + + +# Main function to run the Ansible module +def run_module(): + + module = AnsibleModule( + argument_spec=dict( + id=dict(type="str"), + type=dict( + type="str", + choices=[ + "user_and_pass", + "file", + "text", + "github_app", + "ssh_key", + "certificate", + "scope", + "token", + ], + ), + state=dict(type="str", default="present", choices=["present", "absent"]), + force=dict(type="bool", default=False), + scope=dict(type="str", default="_"), + url=dict(type="str", default="http://localhost:8080"), + jenkins_user=dict(type="str", required=True), + jenkins_password=dict(type="str", no_log=True), + token=dict(type="str", no_log=True), + description=dict(type="str", default=""), + location=dict(type="str", default="system", choices=["system", "folder"]), + name=dict(type="str"), + username=dict(type="str"), + password=dict(type="str", no_log=True), + file_path=dict(type="path"), + secret=dict(type="str", no_log=True), + appID=dict(type="str"), + api_uri=dict(type="str", default="https://api.github.com"), + owner=dict(type="str"), + passphrase=dict(type="str", no_log=True), + private_key_path=dict(type="path", no_log=True), + # Scope specifications parameters + inc_hostname=dict(type="list", elements="str"), + exc_hostname=dict(type="list", elements="str"), + inc_hostname_port=dict(type="list", elements="str"), + exc_hostname_port=dict(type="list", elements="str"), + inc_path=dict(type="list", elements="str"), + exc_path=dict(type="list", elements="str"), + schemes=dict(type="list", elements="str"), + ), + supports_check_mode=True, + required_if=[ + ("state", "present", ["type"]), + ("state", "absent", ["id"]), + ("type", "token", ["name", "jenkins_password"]), + ("type", "user_and_pass", ["username", "password", "id", "token"]), + ("type", "file", ["file_path", "id", "token"]), + ("type", "text", ["secret", "id", "token"]), + ("type", "github_app", ["appID", "private_key_path", "id", "token"]), + ("type", "ssh_key", ["username", "private_key_path", "id", "token"]), + ("type", "certificate", ["file_path", "id", "token"]), + ("type", "scope", ["id", "token"]), + ], + ) + + # Parameters + id = module.params["id"] + type = module.params["type"] + state = module.params["state"] + force = module.params["force"] + scope = module.params["scope"] + url = module.params["url"] + jenkins_user = module.params["jenkins_user"] + jenkins_password = module.params["jenkins_password"] + name = module.params["name"] + token = module.params["token"] + description = module.params["description"] + location = module.params["location"] + filePath = module.params["file_path"] + private_key_path = module.params["private_key_path"] + api_uri = module.params["api_uri"] + inc_hostname = module.params["inc_hostname"] + exc_hostname = module.params["exc_hostname"] + inc_hostname_port = module.params["inc_hostname_port"] + exc_hostname_port = module.params["exc_hostname_port"] + inc_path = module.params["inc_path"] + exc_path = module.params["exc_path"] + schemes = module.params["schemes"] + + deps.validate(module) + + headers = { + "Authorization": basic_auth_header(jenkins_user, token or jenkins_password), + } + + # Get the crumb for CSRF protection + get_jenkins_crumb(module, headers) + + result = dict( + changed=False, + msg="", + ) + + credentials = clean_data(module.params) + + does_exist = target_exists(module) + + # Check if the credential/domain doesn't exist and the user wants to delete + if not does_exist and state == "absent" and not type == "token": + result["changed"] = False + result["msg"] = "{} does not exist.".format(id) + module.exit_json(**result) + + if state == "present": + + # If updating, we need to delete the existing credential/domain first based on force parameter + if force and (does_exist or type == "token"): + delete_target(module, headers) + elif does_exist and not force: + result["changed"] = False + result["msg"] = "{} already exists. Use force=True to update.".format(id) + module.exit_json(**result) + + if type == "token": + + post_url = "{}/user/{}/descriptorByName/jenkins.security.ApiTokenProperty/generateNewToken".format( + url, jenkins_user + ) + + body = "newTokenName={}".format(name) + + elif type == "scope": + + post_url = "{}/credentials/store/{}/createDomain".format(url, location) + + specifications = [] + + # Create a domain in Jenkins + if inc_hostname or exc_hostname: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.HostnameSpecification", + "includes": ",".join(inc_hostname), + "excludes": ",".join(exc_hostname), + } + ) + + if inc_hostname_port or exc_hostname_port: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.HostnamePortSpecification", + "includes": ",".join(inc_hostname_port), + "excludes": ",".join(exc_hostname_port), + } + ) + + if schemes: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.SchemeSpecification", + "schemes": ",".join(schemes), + }, + ) + + if inc_path or exc_path: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.PathSpecification", + "includes": ",".join(inc_path), + "excludes": ",".join(exc_path), + } + ) + + payload = { + "name": id, + "description": description, + "specifications": specifications, + } + + else: + if filePath: + validate_file_exist(module, filePath) + elif private_key_path: + validate_file_exist(module, private_key_path) + + post_url = "{}/credentials/store/{}/domain/{}/createCredentials".format( + url, location, scope + ) + + cred_class = { + "user_and_pass": "com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl", + "file": "org.jenkinsci.plugins.plaincredentials.impl.FileCredentialsImpl", + "text": "org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl", + "github_app": "org.jenkinsci.plugins.github_branch_source.GitHubAppCredentials", + "ssh_key": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey", + "certificate": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl", + } + credentials.update({"$class": cred_class[type]}) + + if type == "file": + + # Build multipart body and content-type + body, content_type = embed_file_into_body(module, filePath, credentials) + headers["Content-Type"] = content_type + + elif type == "github_app": + + private_key = read_privateKey(module) + + credentials.update( + { + "privateKey": private_key, + "apiUri": api_uri, + } + ) + + elif type == "ssh_key": + + private_key = read_privateKey(module) + + credentials.update( + { + "privateKeySource": { + "stapler-class": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey$DirectEntryPrivateKeySource", + "privateKey": private_key, + }, + } + ) + + elif type == "certificate": + + name, ext = os.path.splitext(filePath) + + if ext.lower() in [".p12", ".pfx"]: + try: + with open(filePath, "rb") as f: + file_content = f.read() + uploaded_keystore = base64.b64encode(file_content).decode( + "utf-8" + ) + except Exception as e: + module.fail_json( + msg="Failed to read or encode keystore file: {}".format( + str(e) + ) + ) + + credentials.update( + { + "keyStoreSource": { + "$class": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl$UploadedKeyStoreSource", + "uploadedKeystore": uploaded_keystore, + }, + } + ) + + elif ext.lower() in [".pem", ".crt"]: # PEM mode + try: + with open(filePath, "r") as f: + cert_chain = f.read() + with open(private_key_path, "r") as f: + private_key = f.read() + except Exception as e: + module.fail_json( + msg="Failed to read PEM files: {}".format(str(e)) + ) + + credentials.update( + { + "keyStoreSource": { + "$class": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl$PEMEntryKeyStoreSource", + "certChain": cert_chain, + "privateKey": private_key, + }, + } + ) + + else: + module.fail_json( + msg="Unsupported certificate file type. Only .p12, .pfx, .pem or .crt are supported." + ) + + payload = {"credentials": credentials} + + if not type == "file" and not type == "token": + body = urlencode({"json": json.dumps(payload)}) + + else: # Delete + + delete_target(module, headers) + + module.exit_json(changed=True, msg="{} deleted successfully.".format(id)) + + if ( + not type == "scope" and not scope == "_" + ): # Check if custom scope exists if adding to a custom scope + if not target_exists(module, True): + module.fail_json(msg="Domain {} doesn't exists".format(scope)) + + try: + response, info = fetch_url( + module, post_url, headers=headers, data=body, method="POST" + ) + except Exception as e: + module.fail_json(msg="Request to {} failed: {}".format(post_url, str(e))) + + status = info.get("status", 0) + + if not status == 200: + body = response.read() if response else b"" + module.fail_json( + msg="Failed to {} credential".format( + "add/update" if state == "present" else "delete" + ), + details=body.decode("utf-8", errors="ignore"), + ) + + if type == "token": + response_data = json.loads(response.read()) + result["token"] = response_data["data"]["tokenValue"] + result["token_uuid"] = response_data["data"]["tokenUuid"] + + result["changed"] = True + result["msg"] = response.read().decode("utf-8") + + module.exit_json(**result) + + +if __name__ == "__main__": + run_module() diff --git a/plugins/modules/jenkins_job.py b/plugins/modules/jenkins_job.py index f539e569e8..8362a40255 100644 --- a/plugins/modules/jenkins_job.py +++ b/plugins/modules/jenkins_job.py @@ -76,8 +76,8 @@ options: type: bool default: true description: - - If set to V(false), the SSL certificates will not be validated. This should only set to V(false) used on personally - controlled sites using self-signed certificates as it avoids verifying the source site. + - If set to V(false), the SSL certificates are not validated. This should only set to V(false) used on personally controlled + sites using self-signed certificates as it avoids verifying the source site. - The C(python-jenkins) library only handles this by using the environment variable E(PYTHONHTTPSVERIFY). version_added: 2.3.0 """ @@ -154,7 +154,7 @@ user: type: str sample: admin url: - description: Url to connect to the Jenkins server. + description: URL to connect to the Jenkins server. returned: success type: str sample: https://jenkins.mydomain.com @@ -350,14 +350,14 @@ def job_config_to_string(xml_str): def main(): module = AnsibleModule( argument_spec=dict( - config=dict(type='str', required=False), + config=dict(type='str'), name=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - state=dict(type='str', required=False, choices=['present', 'absent'], default="present"), - enabled=dict(required=False, type='bool'), - token=dict(type='str', required=False, no_log=True), - url=dict(type='str', required=False, default="http://localhost:8080"), - user=dict(type='str', required=False), + password=dict(type='str', no_log=True), + state=dict(type='str', choices=['present', 'absent'], default="present"), + enabled=dict(type='bool'), + token=dict(type='str', no_log=True), + url=dict(type='str', default="http://localhost:8080"), + user=dict(type='str'), validate_certs=dict(type='bool', default=True), ), mutually_exclusive=[ diff --git a/plugins/modules/jenkins_job_info.py b/plugins/modules/jenkins_job_info.py index f406ec3b4b..37d9af3f56 100644 --- a/plugins/modules/jenkins_job_info.py +++ b/plugins/modules/jenkins_job_info.py @@ -53,7 +53,7 @@ options: - User to authenticate with the Jenkins server. validate_certs: description: - - If set to V(false), the SSL certificates will not be validated. + - If set to V(false), the SSL certificates are not validated. - This should only set to V(false) used on personally controlled sites using self-signed certificates. default: true type: bool @@ -135,7 +135,7 @@ jobs: "fullname": "test-folder/test-job", "url": "http://localhost:8080/job/test-job/", "color": "blue" - }, + } ] """ diff --git a/plugins/modules/jenkins_node.py b/plugins/modules/jenkins_node.py index affd462659..aa75100168 100644 --- a/plugins/modules/jenkins_node.py +++ b/plugins/modules/jenkins_node.py @@ -65,9 +65,9 @@ options: offline_message: description: - Specifies the offline reason message to be set when configuring the Jenkins node state. - - If O(offline_message) is given and requested O(state) is not V(disabled), an error will be raised. + - If O(offline_message) is given and requested O(state) is not V(disabled), an error is raised. - Internally O(offline_message) is set using the V(toggleOffline) API, so updating the message when the node is already - offline (current state V(disabled)) is not possible. In this case, a warning will be issued. + offline (current state V(disabled)) is not possible. In this case, a warning is issued. type: str version_added: 10.0.0 """ diff --git a/plugins/modules/jenkins_plugin.py b/plugins/modules/jenkins_plugin.py index 73ff40c725..f47dcfe92f 100644 --- a/plugins/modules/jenkins_plugin.py +++ b/plugins/modules/jenkins_plugin.py @@ -51,7 +51,7 @@ options: type: str description: - Desired plugin state. - - If set to V(latest), the check for new version will be performed every time. This is suitable to keep the plugin up-to-date. + - If set to V(latest), the check for new version is performed every time. This is suitable to keep the plugin up-to-date. choices: [absent, present, pinned, unpinned, enabled, disabled, latest] default: present timeout: @@ -64,8 +64,8 @@ options: description: - Number of seconds after which a new copy of the C(update-center.json) file is downloaded. This is used to avoid the need to download the plugin to calculate its checksum when O(state=latest) is specified. - - Set it to V(0) if no cache file should be used. In that case, the plugin file will always be downloaded to calculate - its checksum when O(state=latest) is specified. + - Set it to V(0) if no cache file should be used. In that case, the plugin file is always downloaded to calculate its + checksum when O(state=latest) is specified. default: 86400 updates_url: type: list diff --git a/plugins/modules/jenkins_script.py b/plugins/modules/jenkins_script.py index bd30f9daa7..5a00581366 100644 --- a/plugins/modules/jenkins_script.py +++ b/plugins/modules/jenkins_script.py @@ -39,8 +39,8 @@ options: default: http://localhost:8080 validate_certs: description: - - If set to V(false), the SSL certificates will not be validated. This should only set to V(false) used on personally - controlled sites using self-signed certificates as it avoids verifying the source site. + - If set to V(false), the SSL certificates are not validated. This should only set to V(false) used on personally controlled + sites using self-signed certificates as it avoids verifying the source site. type: bool default: true user: @@ -142,12 +142,12 @@ def main(): module = AnsibleModule( argument_spec=dict( script=dict(required=True, type="str"), - url=dict(required=False, type="str", default="http://localhost:8080"), - validate_certs=dict(required=False, type="bool", default=True), - user=dict(required=False, type="str", default=None), - password=dict(required=False, no_log=True, type="str", default=None), - timeout=dict(required=False, type="int", default=10), - args=dict(required=False, type="dict", default=None) + url=dict(type="str", default="http://localhost:8080"), + validate_certs=dict(type="bool", default=True), + user=dict(type="str"), + password=dict(no_log=True, type="str"), + timeout=dict(type="int", default=10), + args=dict(type="dict") ) ) diff --git a/plugins/modules/jira.py b/plugins/modules/jira.py index cc3136c3bf..f99c252675 100644 --- a/plugins/modules/jira.py +++ b/plugins/modules/jira.py @@ -59,6 +59,18 @@ options: - The personal access token to log-in with. - Mutually exclusive with O(username) and O(password). version_added: 4.2.0 + client_cert: + type: path + description: + - Client certificate if required. + - In addition to O(username) and O(password) or O(token). Not mutually exclusive. + version_added: 10.4.0 + client_key: + type: path + description: + - Client certificate key if required. + - In addition to O(username) and O(password) or O(token). Not mutually exclusive. + version_added: 10.4.0 project: type: str @@ -105,14 +117,13 @@ options: suboptions: type: description: - - Use type to specify which of the JIRA visibility restriction types will be used. + - Use O(comment_visibility.type) to specify which of the JIRA visibility restriction types is used. type: str required: true choices: [group, role] value: description: - - Use value to specify value corresponding to the type of visibility restriction. For example name of the group - or role. + - Specify value corresponding to the type of visibility restriction. For example name of the group or role. type: str required: true version_added: '3.2.0' @@ -122,6 +133,14 @@ options: required: false description: - Only used when O(operation) is V(transition), and a bit of a misnomer, it actually refers to the transition name. + - This is mutually exclusive with O(status_id). + status_id: + type: str + required: false + description: + - Only used when O(operation) is V(transition), and refers to the transition ID. + - This is mutually exclusive with O(status). + version_added: 10.3.0 assignee: type: str required: false @@ -145,12 +164,12 @@ options: type: str required: false description: - - Set issue from which link will be created. + - Set issue from which link is created. outwardissue: type: str required: false description: - - Set issue to which link will be created. + - Set issue to which link is created. fields: type: dict required: false @@ -172,7 +191,7 @@ options: maxresults: required: false description: - - Limit the result of O(operation=search). If no value is specified, the default jira limit will be used. + - Limit the result of O(operation=search). If no value is specified, the default JIRA limit is used. - Used when O(operation=search) only, ignored otherwise. type: int version_added: '0.2.0' @@ -206,12 +225,12 @@ options: content: type: str description: - - The Base64 encoded contents of the file to attach. If not specified, the contents of O(attachment.filename) will - be used instead. + - The Base64 encoded contents of the file to attach. If not specified, the contents of O(attachment.filename) is + used instead. mimetype: type: str description: - - The MIME type to supply for the upload. If not specified, best-effort detection will be done. + - The MIME type to supply for the upload. If not specified, best-effort detection is performed. notes: - Currently this only works with basic-auth, or tokens. - To use with JIRA Cloud, pass the login e-mail as the O(username) and the API token as O(password). @@ -438,6 +457,23 @@ EXAMPLES = r""" operation: attach attachment: filename: topsecretreport.xlsx + +# Use username, password and client certificate authentification +- name: Create an issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + client_cert: '{{ path/to/client-cert }}' + client_key: '{{ path/to/client-key }}' + +# Use token and client certificate authentification +- name: Create an issue + community.general.jira: + uri: '{{ server }}' + token: '{{ token }}' + client_cert: '{{ path/to/client-cert }}' + client_key: '{{ path/to/client-key }}' """ import base64 @@ -472,6 +508,8 @@ class JIRA(StateModuleHelper): username=dict(type='str'), password=dict(type='str', no_log=True), token=dict(type='str', no_log=True), + client_cert=dict(type='path'), + client_key=dict(type='path'), project=dict(type='str', ), summary=dict(type='str', ), description=dict(type='str', ), @@ -483,6 +521,7 @@ class JIRA(StateModuleHelper): value=dict(type='str', required=True) )), status=dict(type='str', ), + status_id=dict(type='str', ), assignee=dict(type='str', ), fields=dict(default={}, type='dict'), linktype=dict(type='str', ), @@ -498,9 +537,11 @@ class JIRA(StateModuleHelper): ['username', 'token'], ['password', 'token'], ['assignee', 'account_id'], + ['status', 'status_id'] ], required_together=[ ['username', 'password'], + ['client_cert', 'client_key'] ], required_one_of=[ ['username', 'token'], @@ -511,13 +552,13 @@ class JIRA(StateModuleHelper): ('operation', 'comment', ['issue', 'comment']), ('operation', 'workflow', ['issue', 'comment']), ('operation', 'fetch', ['issue']), - ('operation', 'transition', ['issue', 'status']), + ('operation', 'transition', ['issue']), + ('operation', 'transition', ['status', 'status_id'], True), ('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']), ('operation', 'search', ['jql']), ), supports_check_mode=False ) - use_old_vardict = False state_param = 'operation' def __init_module__(self): @@ -616,14 +657,27 @@ class JIRA(StateModuleHelper): turl = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" tmeta = self.get(turl) - target = self.vars.status tid = None + target = None + + if self.vars.status is not None: + target = self.vars.status.strip() + elif self.vars.status_id is not None: + tid = self.vars.status_id.strip() + for t in tmeta['transitions']: - if t['name'] == target: - tid = t['id'] - break + if target is not None: + if t['name'] == target: + tid = t['id'] + break + else: + if tid == t['id']: + break else: - raise ValueError("Failed find valid transition for '%s'" % target) + if target is not None: + raise ValueError("Failed find valid transition for '%s'" % target) + else: + raise ValueError("Failed find valid transition for ID '%s'" % tid) fields = dict(self.vars.fields) if self.vars.summary is not None: diff --git a/plugins/modules/kdeconfig.py b/plugins/modules/kdeconfig.py index 334db3aee4..ac542d04e8 100644 --- a/plugins/modules/kdeconfig.py +++ b/plugins/modules/kdeconfig.py @@ -17,12 +17,12 @@ description: options: path: description: - - Path to the config file. If the file does not exist it will be created. + - Path to the config file. If the file does not exist it is created. type: path required: true kwriteconfig_path: description: - - Path to the kwriteconfig executable. If not specified, Ansible will try to discover it. + - Path to the kwriteconfig executable. If not specified, Ansible tries to discover it. type: path values: description: @@ -141,7 +141,7 @@ def run_kwriteconfig(module, cmd, path, groups, key, value): else: args.append('false') else: - args.append(value) + args.extend(['--', value]) module.run_command(args, check_rc=True) diff --git a/plugins/modules/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py index 1dbf94f629..e1cf3fddb5 100644 --- a/plugins/modules/kernel_blacklist.py +++ b/plugins/modules/kernel_blacklist.py @@ -65,7 +65,6 @@ class Blacklist(StateModuleHelper): ), supports_check_mode=True, ) - use_old_vardict = False def __init_module__(self): self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name))) diff --git a/plugins/modules/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py index 58878c069d..ae6d24958c 100644 --- a/plugins/modules/keycloak_authentication.py +++ b/plugins/modules/keycloak_authentication.py @@ -190,17 +190,20 @@ msg: type: str end_state: - description: Representation of the authentication after module execution. - returned: on success - type: dict - sample: { + description: Representation of the authentication after module execution. + returned: on success + type: dict + sample: + { "alias": "Copy of first broker login", "authenticationExecutions": [ { "alias": "review profile config", "authenticationConfig": { "alias": "review profile config", - "config": { "update.profile.on.first.login": "missing" }, + "config": { + "update.profile.on.first.login": "missing" + }, "id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7" }, "configurable": true, @@ -210,7 +213,11 @@ end_state: "level": 0, "providerId": "idp-review-profile", "requirement": "REQUIRED", - "requirementChoices": [ "REQUIRED", "ALTERNATIVE", "DISABLED" ] + "requirementChoices": [ + "REQUIRED", + "ALTERNATIVE", + "DISABLED" + ] } ], "builtIn": false, @@ -308,6 +315,8 @@ def create_or_update_executions(kc, config, realm='master'): } # add the execution configuration if new_exec["authenticationConfig"] is not None: + if "authenticationConfig" in execution and "id" in execution["authenticationConfig"]: + kc.delete_authentication_config(execution["authenticationConfig"]["id"], realm=realm) kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm) for key in new_exec: # remove unwanted key for the next API call @@ -358,8 +367,9 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']]) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, ) result = dict(changed=False, msg='', flow={}) diff --git a/plugins/modules/keycloak_authentication_required_actions.py b/plugins/modules/keycloak_authentication_required_actions.py index 60b47d7a6a..69183ce605 100644 --- a/plugins/modules/keycloak_authentication_required_actions.py +++ b/plugins/modules/keycloak_authentication_required_actions.py @@ -49,7 +49,7 @@ options: type: dict defaultAction: description: - - Indicates, if any new user will have the required action assigned to it. + - Indicates whether new users have the required action assigned to them. type: bool enabled: description: @@ -149,7 +149,7 @@ end_state: type: dict defaultAction: description: - - Indicates, if any new user will have the required action assigned to it. + - Indicates whether new users have the required action assigned to them. sample: false type: bool enabled: @@ -237,8 +237,9 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']]) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, ) result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) diff --git a/plugins/modules/keycloak_authz_authorization_scope.py b/plugins/modules/keycloak_authz_authorization_scope.py index cd1ff57afc..78d70c7ee6 100644 --- a/plugins/modules/keycloak_authz_authorization_scope.py +++ b/plugins/modules/keycloak_authz_authorization_scope.py @@ -17,13 +17,14 @@ short_description: Allows administration of Keycloak client authorization scopes version_added: 6.6.0 description: - - This module allows the administration of Keycloak client Authorization Scopes using the Keycloak REST API. Authorization Scopes are only available - if a client has Authorization enabled. - - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have the requisite access - rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored - to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services paths and payloads - have not officially been documented by the Keycloak project. U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). + - This module allows the administration of Keycloak client Authorization Scopes using the Keycloak REST API. Authorization + Scopes are only available if a client has Authorization enabled. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). attributes: check_mode: support: full @@ -36,8 +37,8 @@ options: state: description: - State of the authorization scope. - - On V(present), the authorization scope will be created (or updated if it exists already). - - On V(absent), the authorization scope will be removed if it exists. + - On V(present), the authorization scope is created (or updated if it exists already). + - On V(absent), the authorization scope is removed if it exists. choices: ['present', 'absent'] default: 'present' type: str @@ -141,8 +142,8 @@ def main(): state=dict(type='str', default='present', choices=['present', 'absent']), name=dict(type='str', required=True), - display_name=dict(type='str', required=False), - icon_uri=dict(type='str', required=False), + display_name=dict(type='str'), + icon_uri=dict(type='str'), client_id=dict(type='str', required=True), realm=dict(type='str', required=True) ) @@ -152,8 +153,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=( - [['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) diff --git a/plugins/modules/keycloak_authz_custom_policy.py b/plugins/modules/keycloak_authz_custom_policy.py index ef6c9b0973..9607c0172c 100644 --- a/plugins/modules/keycloak_authz_custom_policy.py +++ b/plugins/modules/keycloak_authz_custom_policy.py @@ -17,13 +17,15 @@ short_description: Allows administration of Keycloak client custom Javascript po version_added: 7.5.0 description: - - This module allows the administration of Keycloak client custom Javascript using the Keycloak REST API. Custom Javascript policies are only - available if a client has Authorization enabled and if they have been deployed to the Keycloak server as JAR files. - - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have the requisite access - rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored - to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services paths and payloads - have not officially been documented by the Keycloak project. U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). + - This module allows the administration of Keycloak client custom Javascript using the Keycloak REST API. Custom Javascript + policies are only available if a client has Authorization enabled and if they have been deployed to the Keycloak server + as JAR files. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). attributes: check_mode: support: full @@ -36,8 +38,8 @@ options: state: description: - State of the custom policy. - - On V(present), the custom policy will be created (or updated if it exists already). - - On V(absent), the custom policy will be removed if it exists. + - On V(present), the custom policy is created (or updated if it exists already). + - On V(absent), the custom policy is removed if it exists. choices: ['present', 'absent'] default: 'present' type: str @@ -137,8 +139,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=( - [['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', end_state={}) diff --git a/plugins/modules/keycloak_authz_permission.py b/plugins/modules/keycloak_authz_permission.py index e4ab9fe14d..74bc6cf956 100644 --- a/plugins/modules/keycloak_authz_permission.py +++ b/plugins/modules/keycloak_authz_permission.py @@ -17,18 +17,20 @@ version_added: 7.2.0 short_description: Allows administration of Keycloak client authorization permissions using Keycloak API description: - - This module allows the administration of Keycloak client authorization permissions using the Keycloak REST API. Authorization permissions are - only available if a client has Authorization enabled. - - There are some peculiarities in JSON paths and payloads for authorization permissions. In particular POST and PUT operations are targeted - at permission endpoints, whereas GET requests go to policies endpoint. To make matters more interesting the JSON responses from GET requests - return data in a different format than what is expected for POST and PUT. The end result is that it is not possible to detect changes to things - like policies, scopes or resources - at least not without a large number of additional API calls. Therefore this module always updates authorization - permissions instead of attempting to determine if changes are truly needed. - - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have the requisite access - rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored - to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services paths and payloads - have not officially been documented by the Keycloak project. U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). + - This module allows the administration of Keycloak client authorization permissions using the Keycloak REST API. Authorization + permissions are only available if a client has Authorization enabled. + - There are some peculiarities in JSON paths and payloads for authorization permissions. In particular POST and PUT operations + are targeted at permission endpoints, whereas GET requests go to policies endpoint. To make matters more interesting the + JSON responses from GET requests return data in a different format than what is expected for POST and PUT. The end result + is that it is not possible to detect changes to things like policies, scopes or resources - at least not without a large + number of additional API calls. Therefore this module always updates authorization permissions instead of attempting to + determine if changes are truly needed. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). attributes: check_mode: support: full @@ -41,8 +43,8 @@ options: state: description: - State of the authorization permission. - - On V(present), the authorization permission will be created (or updated if it exists already). - - On V(absent), the authorization permission will be removed if it exists. + - On V(present), the authorization permission is created (or updated if it exists already). + - On V(absent), the authorization permission is removed if it exists. choices: ['present', 'absent'] default: 'present' type: str @@ -235,13 +237,13 @@ def main(): state=dict(type='str', default='present', choices=['present', 'absent']), name=dict(type='str', required=True), - description=dict(type='str', required=False), + description=dict(type='str'), permission_type=dict(type='str', choices=['scope', 'resource'], required=True), decision_strategy=dict(type='str', default='UNANIMOUS', choices=['UNANIMOUS', 'AFFIRMATIVE', 'CONSENSUS']), - resources=dict(type='list', elements='str', default=[], required=False), - scopes=dict(type='list', elements='str', default=[], required=False), - policies=dict(type='list', elements='str', default=[], required=False), + resources=dict(type='list', elements='str', default=[]), + scopes=dict(type='list', elements='str', default=[]), + policies=dict(type='list', elements='str', default=[]), client_id=dict(type='str', required=True), realm=dict(type='str', required=True) ) @@ -251,8 +253,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=( - [['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) # Convenience variables state = module.params.get('state') diff --git a/plugins/modules/keycloak_authz_permission_info.py b/plugins/modules/keycloak_authz_permission_info.py index 6851abb311..af7318315f 100644 --- a/plugins/modules/keycloak_authz_permission_info.py +++ b/plugins/modules/keycloak_authz_permission_info.py @@ -17,13 +17,14 @@ version_added: 7.2.0 short_description: Query Keycloak client authorization permissions information description: - - This module allows querying information about Keycloak client authorization permissions from the resources endpoint using the Keycloak REST - API. Authorization permissions are only available if a client has Authorization enabled. - - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have the requisite access - rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored - to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services paths and payloads - have not officially been documented by the Keycloak project. U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). + - This module allows querying information about Keycloak client authorization permissions from the resources endpoint using + the Keycloak REST API. Authorization permissions are only available if a client has Authorization enabled. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). attributes: action_group: version_added: 10.2.0 @@ -133,8 +134,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=( - [['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) # Convenience variables name = module.params.get('name') diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py index 6b19711e3d..6c8a7b1383 100644 --- a/plugins/modules/keycloak_client.py +++ b/plugins/modules/keycloak_client.py @@ -15,14 +15,16 @@ short_description: Allows administration of Keycloak clients using Keycloak API description: - - This module allows the administration of Keycloak clients using the Keycloak REST API. It requires access to the REST API using OpenID Connect; - the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli and an - admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - Aliases are provided so camelCased versions can be used as well. - - The Keycloak API does not always sanity check inputs, for example you can set SAML-specific settings on an OpenID Connect client for instance and - the other way around. Be careful. If you do not specify a setting, usually a sensible default is chosen. + - This module allows the administration of Keycloak clients using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used + as well. + - The Keycloak API does not always sanity check inputs, for example you can set SAML-specific settings on an OpenID Connect + client for instance and the other way around. Be careful. If you do not specify a setting, usually a sensible default + is chosen. attributes: check_mode: support: full @@ -35,8 +37,8 @@ options: state: description: - State of the client. - - On V(present), the client will be created (or updated if it exists already). - - On V(absent), the client will be removed if it exists. + - On V(present), the client are created (or updated if it exists already). + - On V(absent), the client are removed if it exists. choices: ['present', 'absent'] default: 'present' type: str @@ -49,15 +51,16 @@ options: client_id: description: - - Client id of client to be worked on. This is usually an alphanumeric name chosen by you. Either this or O(id) is required. If you specify - both, O(id) takes precedence. This is C(clientId) in the Keycloak REST API. + - Client ID of client to be worked on. This is usually an alphanumeric name chosen by you. Either this or O(id) is required. + If you specify both, O(id) takes precedence. This is C(clientId) in the Keycloak REST API. aliases: - clientId type: str id: description: - - Id of client to be worked on. This is usually an UUID. Either this or O(client_id) is required. If you specify both, this takes precedence. + - ID of client to be worked on. This is usually an UUID. Either this or O(client_id) is required. If you specify both, + this takes precedence. type: str name: @@ -86,7 +89,8 @@ options: base_url: description: - - Default URL to use when the auth server needs to redirect or link back to the client This is C(baseUrl) in the Keycloak REST API. + - Default URL to use when the auth server needs to redirect or link back to the client This is C(baseUrl) in the Keycloak + REST API. aliases: - baseUrl type: str @@ -98,11 +102,11 @@ options: client_authenticator_type: description: - - How do clients authenticate with the auth server? Either V(client-secret), V(client-jwt), or V(client-x509) can be chosen. When using - V(client-secret), the module parameter O(secret) can set it, for V(client-jwt), you can use the keys C(use.jwks.url), C(jwks.url), and - C(jwt.credential.certificate) in the O(attributes) module parameter to configure its behavior. For V(client-x509) you can use the keys - C(x509.allow.regex.pattern.comparison) and C(x509.subjectdn) in the O(attributes) module parameter to configure which certificate(s) to - accept. + - How do clients authenticate with the auth server? Either V(client-secret), V(client-jwt), or V(client-x509) can be + chosen. When using V(client-secret), the module parameter O(secret) can set it, for V(client-jwt), you can use the + keys C(use.jwks.url), C(jwks.url), and C(jwt.credential.certificate) in the O(attributes) module parameter to configure + its behavior. For V(client-x509) you can use the keys C(x509.allow.regex.pattern.comparison) and C(x509.subjectdn) + in the O(attributes) module parameter to configure which certificate(s) to accept. - This is C(clientAuthenticatorType) in the Keycloak REST API. choices: ['client-secret', 'client-jwt', 'client-x509'] aliases: @@ -111,22 +115,23 @@ options: secret: description: - - When using O(client_authenticator_type=client-secret) (the default), you can specify a secret here (otherwise one will be generated if - it does not exit). If changing this secret, the module will not register a change currently (but the changed secret will be saved). + - When using O(client_authenticator_type=client-secret) (the default), you can specify a secret here (otherwise one + is generated if it does not exit). If changing this secret, the module does not register a change currently (but the + changed secret is saved). type: str registration_access_token: description: - - The registration access token provides access for clients to the client registration service. This is C(registrationAccessToken) in the - Keycloak REST API. + - The registration access token provides access for clients to the client registration service. This is C(registrationAccessToken) + in the Keycloak REST API. aliases: - registrationAccessToken type: str default_roles: description: - - List of default roles for this client. If the client roles referenced do not exist yet, they will be created. This is C(defaultRoles) in - the Keycloak REST API. + - List of default roles for this client. If the client roles referenced do not exist yet, they are created. This is + C(defaultRoles) in the Keycloak REST API. aliases: - defaultRoles type: list @@ -150,7 +155,8 @@ options: not_before: description: - - Revoke any tokens issued before this date for this client (this is a UNIX timestamp). This is C(notBefore) in the Keycloak REST API. + - Revoke any tokens issued before this date for this client (this is a UNIX timestamp). This is C(notBefore) in the + Keycloak REST API. type: int aliases: - notBefore @@ -171,36 +177,40 @@ options: standard_flow_enabled: description: - - Enable standard flow for this client or not (OpenID connect). This is C(standardFlowEnabled) in the Keycloak REST API. + - Enable standard flow for this client or not (OpenID connect). This is C(standardFlowEnabled) in the Keycloak REST + API. aliases: - standardFlowEnabled type: bool implicit_flow_enabled: description: - - Enable implicit flow for this client or not (OpenID connect). This is C(implicitFlowEnabled) in the Keycloak REST API. + - Enable implicit flow for this client or not (OpenID connect). This is C(implicitFlowEnabled) in the Keycloak REST + API. aliases: - implicitFlowEnabled type: bool direct_access_grants_enabled: description: - - Are direct access grants enabled for this client or not (OpenID connect). This is C(directAccessGrantsEnabled) in the Keycloak REST API. + - Are direct access grants enabled for this client or not (OpenID connect). This is C(directAccessGrantsEnabled) in + the Keycloak REST API. aliases: - directAccessGrantsEnabled type: bool service_accounts_enabled: description: - - Are service accounts enabled for this client or not (OpenID connect). This is C(serviceAccountsEnabled) in the Keycloak REST API. + - Are service accounts enabled for this client or not (OpenID connect). This is C(serviceAccountsEnabled) in the Keycloak + REST API. aliases: - serviceAccountsEnabled type: bool authorization_services_enabled: description: - - Are authorization services enabled for this client or not (OpenID connect). This is C(authorizationServicesEnabled) in the Keycloak REST - API. + - Are authorization services enabled for this client or not (OpenID connect). This is C(authorizationServicesEnabled) + in the Keycloak REST API. aliases: - authorizationServicesEnabled type: bool @@ -222,7 +232,7 @@ options: protocol: description: - Type of client. - - At creation only, default value will be V(openid-connect) if O(protocol) is omitted. + - At creation only, default value is V(openid-connect) if O(protocol) is omitted. - The V(docker-v2) value was added in community.general 8.6.0. type: str choices: ['openid-connect', 'saml', 'docker-v2'] @@ -243,37 +253,40 @@ options: registered_nodes: description: - - Dict of registered cluster nodes (with C(nodename) as the key and last registration time as the value). This is C(registeredNodes) in the - Keycloak REST API. + - Dict of registered cluster nodes (with C(nodename) as the key and last registration time as the value). This is C(registeredNodes) + in the Keycloak REST API. type: dict aliases: - registeredNodes client_template: description: - - Client template to use for this client. If it does not exist this field will silently be dropped. This is C(clientTemplate) in the Keycloak - REST API. + - Client template to use for this client. If it does not exist this field is silently dropped. This is C(clientTemplate) + in the Keycloak REST API. type: str aliases: - clientTemplate use_template_config: description: - - Whether or not to use configuration from the O(client_template). This is C(useTemplateConfig) in the Keycloak REST API. + - Whether or not to use configuration from the O(client_template). This is C(useTemplateConfig) in the Keycloak REST + API. aliases: - useTemplateConfig type: bool use_template_scope: description: - - Whether or not to use scope configuration from the O(client_template). This is C(useTemplateScope) in the Keycloak REST API. + - Whether or not to use scope configuration from the O(client_template). This is C(useTemplateScope) in the Keycloak + REST API. aliases: - useTemplateScope type: bool use_template_mappers: description: - - Whether or not to use mapper configuration from the O(client_template). This is C(useTemplateMappers) in the Keycloak REST API. + - Whether or not to use mapper configuration from the O(client_template). This is C(useTemplateMappers) in the Keycloak + REST API. aliases: - useTemplateMappers type: bool @@ -295,9 +308,9 @@ options: authorization_settings: description: - - A data structure defining the authorization settings for this client. For reference, please see the Keycloak API docs at - U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation). - This is C(authorizationSettings) in the Keycloak REST API. + - A data structure defining the authorization settings for this client. For reference, please see the Keycloak API docs + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation). This is C(authorizationSettings) + in the Keycloak REST API. type: dict aliases: - authorizationSettings @@ -310,13 +323,15 @@ options: browser: description: - Flow ID of the browser authentication flow. - - O(authentication_flow_binding_overrides.browser) and O(authentication_flow_binding_overrides.browser_name) are mutually exclusive. + - O(authentication_flow_binding_overrides.browser) and O(authentication_flow_binding_overrides.browser_name) are + mutually exclusive. type: str browser_name: description: - Flow name of the browser authentication flow. - - O(authentication_flow_binding_overrides.browser) and O(authentication_flow_binding_overrides.browser_name) are mutually exclusive. + - O(authentication_flow_binding_overrides.browser) and O(authentication_flow_binding_overrides.browser_name) are + mutually exclusive. aliases: - browserName type: str @@ -325,8 +340,8 @@ options: direct_grant: description: - Flow ID of the direct grant authentication flow. - - O(authentication_flow_binding_overrides.direct_grant) and O(authentication_flow_binding_overrides.direct_grant_name) are mutually - exclusive. + - O(authentication_flow_binding_overrides.direct_grant) and O(authentication_flow_binding_overrides.direct_grant_name) + are mutually exclusive. aliases: - directGrant type: str @@ -334,8 +349,8 @@ options: direct_grant_name: description: - Flow name of the direct grant authentication flow. - - O(authentication_flow_binding_overrides.direct_grant) and O(authentication_flow_binding_overrides.direct_grant_name) are mutually - exclusive. + - O(authentication_flow_binding_overrides.direct_grant) and O(authentication_flow_binding_overrides.direct_grant_name) + are mutually exclusive. aliases: - directGrantName type: str @@ -398,8 +413,8 @@ options: protocolMapper: description: - - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide since this may - be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' + - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide + since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' - V(docker-v2-allow-all-mapper). - V(oidc-address-mapper). - V(oidc-full-name-mapper). @@ -422,29 +437,30 @@ options: - V(saml-user-attribute-mapper). - V(saml-user-property-mapper). - V(saml-user-session-note-mapper). - - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to Server Info -> Providers - and looking under 'protocol-mapper'. + - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to + Server Info -> Providers and looking under 'protocol-mapper'. type: str config: description: - - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value of - O(protocol_mappers[].protocolMapper) - and are not documented other than by the source of the mappers and its parent class(es). An example is given below. It is easiest - to obtain valid config values by dumping an already-existing protocol mapper configuration through check-mode in the RV(existing) - field. + - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value + of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its + parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the RV(existing) field. type: dict attributes: description: - - A dict of further attributes for this client. This can contain various configuration settings; an example is given in the examples section. - While an exhaustive list of permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak API - does not validate whether a given option is appropriate for the protocol used; if specified anyway, Keycloak will simply not use it. + - A dict of further attributes for this client. This can contain various configuration settings; an example is given + in the examples section. While an exhaustive list of permissible options is not available; possible options as of + Keycloak 3.4 are listed below. The Keycloak API does not validate whether a given option is appropriate for the protocol + used; if specified anyway, Keycloak does not use it. type: dict suboptions: saml.authnstatement: description: - - For SAML clients, boolean specifying whether or not a statement containing method and timestamp should be included in the login response. + - For SAML clients, boolean specifying whether or not a statement containing method and timestamp should be included + in the login response. saml.client.signature: description: - For SAML clients, boolean specifying whether a client signature is required and validated. @@ -462,8 +478,8 @@ options: - Boolean specifying whether SAML documents should be signed by the realm. saml.server.signature.keyinfo.ext: description: - - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion of the signing key - id in the SAML Extensions element. + - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion + of the signing key ID in the SAML Extensions element. saml.signature.algorithm: description: - Signature algorithm used to sign SAML documents. One of V(RSA_SHA256), V(RSA_SHA1), V(RSA_SHA512), or V(DSA_SHA1). @@ -481,28 +497,30 @@ options: - SAML Redirect Binding URL for the client's assertion consumer service (login responses). saml_force_name_id_format: description: - - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead. + - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured + one instead. saml_name_id_format: description: - For SAML clients, the NameID format to use (one of V(username), V(email), V(transient), or V(persistent)). saml_signature_canonicalization_method: description: - - SAML signature canonicalization method. This is one of four values, namely V(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE, - V(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS, V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) + - SAML signature canonicalization method. This is one of four values, namely V(http://www.w3.org/2001/10/xml-exc-c14n#) + for EXCLUSIVE, V(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS, + V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS. saml_single_logout_service_url_post: description: - - SAML POST binding url for the client's single logout service. + - SAML POST binding URL for the client's single logout service. saml_single_logout_service_url_redirect: description: - - SAML redirect binding url for the client's single logout service. + - SAML redirect binding URL for the client's single logout service. user.info.response.signature.alg: description: - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of V(RS256) or V(unsigned). request.object.signature.alg: description: - - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending OIDC request object. One of V(any), V(none), - V(RS256). + - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending OIDC request object. One + of V(any), V(none), V(RS256). use.jwks.url: description: - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client public keys. @@ -514,7 +532,7 @@ options: - For OpenID-Connect clients, client certificate for validating JWT issued by client and signed by its key, base64-encoded. x509.subjectdn: description: - - For OpenID-Connect clients, subject which will be used to authenticate the client. + - For OpenID-Connect clients, subject which is used to authenticate the client. type: str version_added: 9.5.0 @@ -686,23 +704,35 @@ proposed: description: Representation of proposed client. returned: always type: dict - sample: {clientId: "test"} + sample: {"clientId": "test"} existing: description: Representation of existing client (sample is truncated). returned: always type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } end_state: description: Representation of client after module execution (sample is truncated). returned: on success type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError, is_struct_included + keycloak_argument_spec, get_token, KeycloakError from ansible.module_utils.basic import AnsibleModule import copy @@ -740,12 +770,30 @@ def normalise_cr(clientrep, remove_ids=False): if remove_ids: mapper.pop('id', None) + # Convert bool to string + if 'config' in mapper: + for key, value in mapper['config'].items(): + if isinstance(value, bool): + mapper['config'][key] = str(value).lower() + # Set to a default value. mapper['consentRequired'] = mapper.get('consentRequired', False) + if 'attributes' in clientrep: + for key, value in clientrep['attributes'].items(): + if isinstance(value, bool): + clientrep['attributes'][key] = str(value).lower() + clientrep['attributes'].pop('client.secret.creation.time', None) return clientrep +def normalize_kc_resp(clientrep): + # kc drops the variable 'authorizationServicesEnabled' if set to false + # to minimize diff/changes we set it to false if not set by kc + if clientrep and 'authorizationServicesEnabled' not in clientrep: + clientrep['authorizationServicesEnabled'] = False + + def sanitize_cr(clientrep): """ Removes probably sensitive details from a client representation. @@ -757,8 +805,11 @@ def sanitize_cr(clientrep): result['secret'] = 'no_log' if 'attributes' in result: attributes = result['attributes'] - if isinstance(attributes, dict) and 'saml.signing.private.key' in attributes: - attributes['saml.signing.private.key'] = 'no_log' + if isinstance(attributes, dict): + if 'saml.signing.private.key' in attributes: + attributes['saml.signing.private.key'] = 'no_log' + if 'saml.encryption.private.key' in attributes: + attributes['saml.encryption.private.key'] = 'no_log' return normalise_cr(result) @@ -902,8 +953,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=([['client_id', 'id'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) @@ -932,6 +985,8 @@ def main(): else: before_client = kc.get_client_by_id(cid, realm=realm) + normalize_kc_resp(before_client) + if before_client is None: before_client = {} @@ -1003,7 +1058,7 @@ def main(): if module._diff: result['diff'] = dict(before=sanitize_cr(before_norm), after=sanitize_cr(desired_norm)) - result['changed'] = not is_struct_included(desired_norm, before_norm, CLIENT_META_DATA) + result['changed'] = desired_norm != before_norm module.exit_json(**result) @@ -1011,6 +1066,8 @@ def main(): kc.update_client(cid, desired_client, realm=realm) after_client = kc.get_client_by_id(cid, realm=realm) + normalize_kc_resp(after_client) + if before_client == after_client: result['changed'] = False if module._diff: diff --git a/plugins/modules/keycloak_client_rolemapping.py b/plugins/modules/keycloak_client_rolemapping.py index 23dad803d7..1700c99cc1 100644 --- a/plugins/modules/keycloak_client_rolemapping.py +++ b/plugins/modules/keycloak_client_rolemapping.py @@ -16,16 +16,17 @@ short_description: Allows administration of Keycloak client_rolemapping with the version_added: 3.5.0 description: - - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API. It requires access to the REST API - using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, - admin-cli and an admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the - expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that way by this module. - You may pass single values for attributes when calling the module, and this will be translated into a list suitable for the API. - - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API to translate the name - into the role ID. + - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API + to translate the name into the role ID. attributes: check_mode: support: full @@ -38,8 +39,9 @@ options: state: description: - State of the client_rolemapping. - - On V(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you provide. - - On V(absent), the client_rolemapping will be removed if it exists. + - On V(present), the client_rolemapping is created if it does not yet exist, or updated with the parameters + you provide. + - On V(absent), the client_rolemapping is removed if it exists. default: 'present' type: str choices: @@ -71,21 +73,22 @@ options: - Identify parent by ID. - Needs less API calls than using O(parents[].name). - A deep parent chain can be started at any point when first given parent is given as ID. - - Note that in principle both ID and name can be specified at the same time but current implementation only always use just one of them, - with ID being preferred. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. name: type: str description: - Identify parent by name. - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood. - When giving a parent chain with only names it must be complete up to the top. - - Note that in principle both ID and name can be specified at the same time but current implementation only always use just one of them, - with ID being preferred. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. gid: type: str description: - - Id of the group to be mapped. - - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of API calls required. + - ID of the group to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. client_id: type: str description: @@ -94,8 +97,9 @@ options: cid: type: str description: - - Id of the client to be mapped. - - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of API calls required. + - ID of the client to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. roles: description: - Roles to be mapped to the group. @@ -111,8 +115,8 @@ options: type: str description: - The unique identifier for this role_representation. - - This parameter is not required for updating or deleting a role_representation but providing it will reduce the number of API calls - required. + - This parameter is not required for updating or deleting a role_representation but providing it reduces the number + of API calls required. extends_documentation_fragment: - community.general.keycloak - community.general.keycloak.actiongroup_keycloak @@ -205,7 +209,7 @@ proposed: description: Representation of proposed client role mapping. returned: always type: dict - sample: {clientId: "test"} + sample: {"clientId": "test"} existing: description: @@ -213,7 +217,13 @@ existing: - The sample is truncated. returned: always type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } end_state: description: @@ -221,7 +231,13 @@ end_state: - The sample is truncated. returned: on success type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( @@ -264,8 +280,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) diff --git a/plugins/modules/keycloak_client_rolescope.py b/plugins/modules/keycloak_client_rolescope.py index ff91a0fe78..fcf57c2e4a 100644 --- a/plugins/modules/keycloak_client_rolescope.py +++ b/plugins/modules/keycloak_client_rolescope.py @@ -11,17 +11,20 @@ __metaclass__ = type DOCUMENTATION = r""" module: keycloak_client_rolescope -short_description: Allows administration of Keycloak client roles scope to restrict the usage of certain roles to a other specific client applications +short_description: Allows administration of Keycloak client roles scope to restrict the usage of certain roles to a other + specific client applications version_added: 8.6.0 description: - - This module allows you to add or remove Keycloak roles from clients scope using the Keycloak REST API. It requires access to the REST API using - OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. + - This module allows you to add or remove Keycloak roles from clients scope using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. - Client O(client_id) must have O(community.general.keycloak_client#module:full_scope_allowed) set to V(false). - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that way by this module. - You may pass single values for attributes when calling the module, and this will be translated into a list suitable for the API. + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. attributes: check_mode: support: full @@ -34,8 +37,8 @@ options: state: description: - State of the role mapping. - - On V(present), all roles in O(role_names) will be mapped if not exists yet. - - On V(absent), all roles mapping in O(role_names) will be removed if it exists. + - On V(present), all roles in O(role_names) are mapped if not exist yet. + - On V(absent), all roles mapping in O(role_names) are removed if it exists. default: 'present' type: str choices: @@ -123,11 +126,12 @@ msg: sample: "Client role scope for frontend-client-public has been updated" end_state: - description: Representation of role role scope after module execution. - returned: on success - type: list - elements: dict - sample: [ + description: Representation of role role scope after module execution. + returned: on success + type: list + elements: dict + sample: + [ { "clientRole": false, "composite": false, diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py index ed82e0c0f7..ddb4e1b04b 100644 --- a/plugins/modules/keycloak_clientscope.py +++ b/plugins/modules/keycloak_clientscope.py @@ -16,15 +16,17 @@ short_description: Allows administration of Keycloak client_scopes using Keycloa version_added: 3.4.0 description: - - This module allows you to add, remove or modify Keycloak client_scopes using the Keycloak REST API. It requires access to the REST API using OpenID - Connect; the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that way by this module. - You may pass single values for attributes when calling the module, and this will be translated into a list suitable for the API. - - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup to the API to translate the - name into the client_scope ID. + - This module allows you to add, remove or modify Keycloak client_scopes using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup to the API + to translate the name into the client_scope ID. attributes: check_mode: support: full @@ -37,8 +39,8 @@ options: state: description: - State of the client_scope. - - On V(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide. - - On V(absent), the client_scope will be removed if it exists. + - On V(present), the client_scope is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the client_scope is removed if it exists. default: 'present' type: str choices: @@ -60,7 +62,8 @@ options: type: str description: - The unique identifier for this client_scope. - - This parameter is not required for updating or deleting a client_scope but providing it will reduce the number of API calls required. + - This parameter is not required for updating or deleting a client_scope but providing it reduces the number of API + calls required. description: type: str description: @@ -91,8 +94,8 @@ options: protocolMapper: description: - - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide since this may - be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' + - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide + since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' - V(docker-v2-allow-all-mapper). - V(oidc-address-mapper). - V(oidc-full-name-mapper). @@ -115,8 +118,8 @@ options: - V(saml-user-attribute-mapper). - V(saml-user-property-mapper). - V(saml-user-session-note-mapper). - - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to Server Info -> Providers - and looking under 'protocol-mapper'. + - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to + Server Info -> Providers and looking under 'protocol-mapper'. type: str name: @@ -131,11 +134,10 @@ options: config: description: - - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value of - O(protocol_mappers[].protocolMapper) - and are not documented other than by the source of the mappers and its parent class(es). An example is given below. It is easiest - to obtain valid config values by dumping an already-existing protocol mapper configuration through check-mode in the RV(existing) - return value. + - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value + of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its + parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the RV(existing) return value. type: dict attributes: @@ -261,19 +263,31 @@ proposed: description: Representation of proposed client scope. returned: always type: dict - sample: {clientId: "test"} + sample: {"clientId": "test"} existing: description: Representation of existing client scope (sample is truncated). returned: always type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } end_state: description: Representation of client scope after module execution (sample is truncated). returned: on success type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ @@ -352,8 +366,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) diff --git a/plugins/modules/keycloak_clientscope_type.py b/plugins/modules/keycloak_clientscope_type.py index 48d57bcc46..85308f1a22 100644 --- a/plugins/modules/keycloak_clientscope_type.py +++ b/plugins/modules/keycloak_clientscope_type.py @@ -17,9 +17,10 @@ short_description: Set the type of aclientscope in realm or client using Keycloa version_added: 6.6.0 description: - - This module allows you to set the type (optional, default) of clientscopes using the Keycloak REST API. It requires access to the REST API using - OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. + - This module allows you to set the type (optional, default) of clientscopes using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. attributes: check_mode: support: full @@ -98,20 +99,43 @@ proposed: description: Representation of proposed client-scope types mapping. returned: always type: dict - sample: {default_clientscopes: ["profile", "role"], optional_clientscopes: []} + sample: + { + "default_clientscopes": [ + "profile", + "role" + ], + "optional_clientscopes": [] + } existing: description: - Representation of client scopes before module execution. returned: always type: dict - sample: {default_clientscopes: ["profile", "role"], optional_clientscopes: ["phone"]} + sample: + { + "default_clientscopes": [ + "profile", + "role" + ], + "optional_clientscopes": [ + "phone" + ] + } end_state: description: - Representation of client scopes after module execution. - The sample is truncated. returned: on success type: dict - sample: {default_clientscopes: ["profile", "role"], optional_clientscopes: []} + sample: + { + "default_clientscopes": [ + "profile", + "role" + ], + "optional_clientscopes": [] + } """ from ansible.module_utils.basic import AnsibleModule @@ -144,15 +168,17 @@ def keycloak_clientscope_type_module(): argument_spec=argument_spec, supports_check_mode=True, required_one_of=([ - ['token', 'auth_realm', 'auth_username', 'auth_password'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'], ['default_clientscopes', 'optional_clientscopes'] ]), - required_together=([['auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, mutually_exclusive=[ ['token', 'auth_realm'], ['token', 'auth_username'], ['token', 'auth_password'] - ]) + ], + ) return module diff --git a/plugins/modules/keycloak_clientsecret_info.py b/plugins/modules/keycloak_clientsecret_info.py index d0f6297037..0ea48f6a33 100644 --- a/plugins/modules/keycloak_clientsecret_info.py +++ b/plugins/modules/keycloak_clientsecret_info.py @@ -17,12 +17,14 @@ short_description: Retrieve client secret using Keycloak API version_added: 6.1.0 description: - - This module allows you to get a Keycloak client secret using the Keycloak REST API. It requires access to the REST API using OpenID Connect; the - user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli and an admin - user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. - - When retrieving a new client secret, where possible provide the client's O(id) (not O(client_id)) to the module. This removes a lookup to - the API to translate the O(client_id) into the client ID. - - 'Note that this module returns the client secret. To avoid this showing up in the logs, please add C(no_log: true) to the task.' + - This module allows you to get a Keycloak client secret using the Keycloak REST API. It requires access to the REST API + using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + - When retrieving a new client secret, where possible provide the client's O(id) (not O(client_id)) to the module. This + removes a lookup to the API to translate the O(client_id) into the client ID. + - 'Note that this module returns the client secret. To avoid this showing up in the logs, please add C(no_log: true) to + the task.' attributes: action_group: version_added: 10.2.0 @@ -37,7 +39,8 @@ options: id: description: - The unique identifier for this client. - - This parameter is not required for getting or generating a client secret but providing it will reduce the number of API calls required. + - This parameter is not required for getting or generating a client secret but providing it reduces the number of API + calls required. type: str client_id: diff --git a/plugins/modules/keycloak_clientsecret_regenerate.py b/plugins/modules/keycloak_clientsecret_regenerate.py index 4bd48e90ad..2bcaeb3705 100644 --- a/plugins/modules/keycloak_clientsecret_regenerate.py +++ b/plugins/modules/keycloak_clientsecret_regenerate.py @@ -17,12 +17,14 @@ short_description: Regenerate Keycloak client secret using Keycloak API version_added: 6.1.0 description: - - This module allows you to regenerate a Keycloak client secret using the Keycloak REST API. It requires access to the REST API using OpenID Connect; - the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli and an - admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. - - When regenerating a client secret, where possible provide the client's id (not client_id) to the module. This removes a lookup to the API - to translate the client_id into the client ID. - - 'Note that this module returns the client secret. To avoid this showing up in the logs, please add C(no_log: true) to the task.' + - This module allows you to regenerate a Keycloak client secret using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + - When regenerating a client secret, where possible provide the client's ID (not client_id) to the module. This removes + a lookup to the API to translate the client_id into the client ID. + - 'Note that this module returns the client secret. To avoid this showing up in the logs, please add C(no_log: true) to + the task.' attributes: check_mode: support: full @@ -41,12 +43,13 @@ options: id: description: - The unique identifier for this client. - - This parameter is not required for getting or generating a client secret but providing it will reduce the number of API calls required. + - This parameter is not required for getting or generating a client secret but providing it reduces the number of API + calls required. type: str client_id: description: - - The client_id of the client. Passing this instead of id results in an extra API call. + - The client_id of the client. Passing this instead of ID results in an extra API call. aliases: - clientId type: str diff --git a/plugins/modules/keycloak_clienttemplate.py b/plugins/modules/keycloak_clienttemplate.py index bfd138c3f2..ee357605f1 100644 --- a/plugins/modules/keycloak_clienttemplate.py +++ b/plugins/modules/keycloak_clienttemplate.py @@ -14,13 +14,15 @@ module: keycloak_clienttemplate short_description: Allows administration of Keycloak client templates using Keycloak API description: - - This module allows the administration of Keycloak client templates using the Keycloak REST API. It requires access to the REST API using OpenID - Connect; the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - The Keycloak API does not always enforce for only sensible settings to be used -- you can set SAML-specific settings on an OpenID Connect - client for instance and the other way around. Be careful. If you do not specify a setting, usually a sensible default is chosen. + - This module allows the administration of Keycloak client templates using the Keycloak REST API. It requires access to + the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - The Keycloak API does not always enforce for only sensible settings to be used -- you can set SAML-specific settings on + an OpenID Connect client for instance and the other way around. Be careful. If you do not specify a setting, usually a + sensible default is chosen. attributes: check_mode: support: full @@ -33,15 +35,15 @@ options: state: description: - State of the client template. - - On V(present), the client template will be created (or updated if it exists already). - - On V(absent), the client template will be removed if it exists. + - On V(present), the client template is created (or updated if it exists already). + - On V(absent), the client template is removed if it exists. choices: ['present', 'absent'] default: 'present' type: str id: description: - - Id of client template to be worked on. This is usually a UUID. + - ID of client template to be worked on. This is usually a UUID. type: str realm: @@ -69,12 +71,14 @@ options: full_scope_allowed: description: - - Is the "Full Scope Allowed" feature set for this client template or not. This is C(fullScopeAllowed) in the Keycloak REST API. + - Is the "Full Scope Allowed" feature set for this client template or not. This is C(fullScopeAllowed) in the Keycloak + REST API. type: bool protocol_mappers: description: - - A list of dicts defining protocol mappers for this client template. This is C(protocolMappers) in the Keycloak REST API. + - A list of dicts defining protocol mappers for this client template. This is C(protocolMappers) in the Keycloak REST + API. type: list elements: dict suboptions: @@ -106,8 +110,8 @@ options: protocolMapper: description: - - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide since this may - be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' + - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide + since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' - V(docker-v2-allow-all-mapper). - V(oidc-address-mapper). - V(oidc-full-name-mapper). @@ -130,31 +134,29 @@ options: - V(saml-user-attribute-mapper). - V(saml-user-property-mapper). - V(saml-user-session-note-mapper). - - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to Server Info -> Providers - and looking under 'protocol-mapper'. + - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to + Server Info -> Providers and looking under 'protocol-mapper'. type: str config: description: - - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value of - O(protocol_mappers[].protocolMapper) - and are not documented other than by the source of the mappers and its parent class(es). An example is given below. It is easiest - to obtain valid config values by dumping an already-existing protocol mapper configuration through check-mode in the RV(existing) - field. + - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value + of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its + parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the RV(existing) field. type: dict attributes: description: - - A dict of further attributes for this client template. This can contain various configuration settings, though in the default installation - of Keycloak as of 3.4, none are documented or known, so this is usually empty. + - A dict of further attributes for this client template. This can contain various configuration settings, though in + the default installation of Keycloak as of 3.4, none are documented or known, so this is usually empty. type: dict notes: - The Keycloak REST API defines further fields (namely C(bearerOnly), C(consentRequired), C(standardFlowEnabled), C(implicitFlowEnabled), - C(directAccessGrantsEnabled), - C(serviceAccountsEnabled), C(publicClient), and C(frontchannelLogout)) which, while available with keycloak_client, do not have any effect - on Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such, they are not available - through this module. + C(directAccessGrantsEnabled), C(serviceAccountsEnabled), C(publicClient), and C(frontchannelLogout)) which, while available + with keycloak_client, do not have any effect on Keycloak client-templates and are discarded if supplied with an API request + changing client-templates. As such, they are not available through this module. extends_documentation_fragment: - community.general.keycloak - community.general.keycloak.actiongroup_keycloak @@ -236,19 +238,33 @@ proposed: description: Representation of proposed client template. returned: always type: dict - sample: {name: "test01"} + sample: {"name": "test01"} existing: description: Representation of existing client template (sample is truncated). returned: always type: dict - sample: {"description": "test01", "fullScopeAllowed": false, "id": "9c3712ab-decd-481e-954f-76da7b006e5f", "name": "test01", "protocol": "saml"} + sample: + { + "description": "test01", + "fullScopeAllowed": false, + "id": "9c3712ab-decd-481e-954f-76da7b006e5f", + "name": "test01", + "protocol": "saml" + } end_state: description: Representation of client template after module execution (sample is truncated). returned: on success type: dict - sample: {"description": "test01", "fullScopeAllowed": false, "id": "9c3712ab-decd-481e-954f-76da7b006e5f", "name": "test01", "protocol": "saml"} + sample: + { + "description": "test01", + "fullScopeAllowed": false, + "id": "9c3712ab-decd-481e-954f-76da7b006e5f", + "name": "test01", + "protocol": "saml" + } """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ @@ -292,8 +308,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) diff --git a/plugins/modules/keycloak_component.py b/plugins/modules/keycloak_component.py index 2b402ddb62..8b0c67b321 100644 --- a/plugins/modules/keycloak_component.py +++ b/plugins/modules/keycloak_component.py @@ -16,12 +16,13 @@ short_description: Allows administration of Keycloak components using Keycloak A version_added: 10.0.0 description: - - This module allows the administration of Keycloak components using the Keycloak REST API. It requires access to the REST API using OpenID Connect; - the user connecting and the realm being used must have the requisite access rights. In a default Keycloak installation, C(admin-cli) and an - C(admin) user would work, as would a separate realm definition with the scope tailored to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). - Aliases are provided so camelCased versions can be used as well. + - This module allows the administration of Keycloak components using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default + Keycloak installation, C(admin-cli) and an C(admin) user would work, as would a separate realm definition with the scope + tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). Aliases are provided so camelCased versions can be + used as well. attributes: check_mode: support: full @@ -34,8 +35,8 @@ options: state: description: - State of the Keycloak component. - - On V(present), the component will be created (or updated if it exists already). - - On V(absent), the component will be removed if it exists. + - On V(present), the component is created (or updated if it exists already). + - On V(absent), the component is removed if it exists. choices: ['present', 'absent'] default: 'present' type: str @@ -154,8 +155,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) diff --git a/plugins/modules/keycloak_component_info.py b/plugins/modules/keycloak_component_info.py index a1f2c1f0ef..79a6d58720 100644 --- a/plugins/modules/keycloak_component_info.py +++ b/plugins/modules/keycloak_component_info.py @@ -34,9 +34,8 @@ options: provider_type: description: - Provider type of components. - - 'Examples: V(org.keycloak.storage.UserStorageProvider), - V(org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy), V(org.keycloak.keys.KeyProvider), - V(org.keycloak.userprofile.UserProfileProvider), V(org.keycloak.storage.ldap.mappers.LDAPStorageMapper).' + - 'Examples: V(org.keycloak.storage.UserStorageProvider), V(org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy), + V(org.keycloak.keys.KeyProvider), V(org.keycloak.userprofile.UserProfileProvider), V(org.keycloak.storage.ldap.mappers.LDAPStorageMapper).' type: str parent_id: description: @@ -92,7 +91,6 @@ EXAMPLES = r""" realm: myrealm parent_id: "075ef2fa-19fc-4a6d-bf4c-249f57365fd2" provider_type: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" - """ RETURN = r""" diff --git a/plugins/modules/keycloak_group.py b/plugins/modules/keycloak_group.py index 796f5fc56f..7053b33a35 100644 --- a/plugins/modules/keycloak_group.py +++ b/plugins/modules/keycloak_group.py @@ -14,15 +14,17 @@ module: keycloak_group short_description: Allows administration of Keycloak groups using Keycloak API description: - - This module allows you to add, remove or modify Keycloak groups using the Keycloak REST API. It requires access to the REST API using OpenID Connect; - the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli and an - admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html). - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that way by this module. - You may pass single values for attributes when calling the module, and this will be translated into a list suitable for the API. - - When updating a group, where possible provide the group ID to the module. This removes a lookup to the API to translate the name into the - group ID. + - This module allows you to add, remove or modify Keycloak groups using the Keycloak REST API. It requires access to the + REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In + a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the + scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a group, where possible provide the group ID to the module. This removes a lookup to the API to translate + the name into the group ID. attributes: check_mode: support: full @@ -35,9 +37,9 @@ options: state: description: - State of the group. - - On V(present), the group will be created if it does not yet exist, or updated with the parameters you provide. - - On V(absent), the group will be removed if it exists. Be aware that absenting a group with subgroups will automatically delete all its - subgroups too. + - On V(present), the group is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the group is removed if it exists. Be aware that absenting a group with subgroups automatically deletes + all its subgroups too. default: 'present' type: str choices: @@ -59,7 +61,7 @@ options: type: str description: - The unique identifier for this group. - - This parameter is not required for updating or deleting a group but providing it will reduce the number of API calls required. + - This parameter is not required for updating or deleting a group but providing it reduces the number of API calls required. attributes: type: dict description: @@ -70,9 +72,10 @@ options: type: list description: - List of parent groups for the group to handle sorted top to bottom. - - Set this to create a group as a subgroup of another group or groups (parents) or when accessing an existing subgroup by name. - - Not necessary to set when accessing an existing subgroup by its C(ID) because in that case the group can be directly queried without necessarily - knowing its parent(s). + - Set this to create a group as a subgroup of another group or groups (parents) or when accessing an existing subgroup + by name. + - Not necessary to set when accessing an existing subgroup by its C(ID) because in that case the group can be directly + queried without necessarily knowing its parent(s). elements: dict suboptions: id: @@ -81,19 +84,19 @@ options: - Identify parent by ID. - Needs less API calls than using O(parents[].name). - A deep parent chain can be started at any point when first given parent is given as ID. - - Note that in principle both ID and name can be specified at the same time but current implementation only always use just one of them, - with ID being preferred. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. name: type: str description: - Identify parent by name. - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood. - When giving a parent chain with only names it must be complete up to the top. - - Note that in principle both ID and name can be specified at the same time but current implementation only always use just one of them, - with ID being preferred. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. notes: - - Presently, the RV(end_state.realmRoles), RV(end_state.clientRoles), and RV(end_state.access) attributes returned by the Keycloak API are read-only - for groups. This limitation will be removed in a later version of this module. + - Presently, the RV(end_state.realmRoles), RV(end_state.clientRoles), and RV(end_state.access) attributes returned by the + Keycloak API are read-only for groups. This limitation will be removed in a later version of this module. extends_documentation_fragment: - community.general.keycloak - community.general.keycloak.actiongroup_keycloak @@ -278,7 +281,7 @@ end_state: returned: always sample: [] subGroups: - description: A list of groups that are children of this group. These groups will have the same parameters as documented here. + description: A list of groups that are children of this group. These groups have the same parameters as documented here. type: list returned: always clientRoles: @@ -329,8 +332,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, group='') diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py index ee631bf19c..40a06846d6 100644 --- a/plugins/modules/keycloak_identity_provider.py +++ b/plugins/modules/keycloak_identity_provider.py @@ -16,12 +16,12 @@ short_description: Allows administration of Keycloak identity providers using Ke version_added: 3.6.0 description: - - This module allows you to add, remove or modify Keycloak identity providers using the Keycloak REST API. It requires access to the REST API - using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, - admin-cli and an admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the - expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). + - This module allows you to add, remove or modify Keycloak identity providers using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). attributes: check_mode: support: full @@ -34,8 +34,8 @@ options: state: description: - State of the identity provider. - - On V(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide. - - On V(absent), the identity provider will be removed if it exists. + - On V(present), the identity provider is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the identity provider is removed if it exists. default: 'present' type: str choices: @@ -89,8 +89,8 @@ options: link_only: description: - - If true, users cannot log in through this provider. They can only link to this provider. This is useful if you do not want to allow login - from the provider, but want to integrate with a provider. + - If true, users cannot log in through this provider. They can only link to this provider. This is useful if you do + not want to allow login from the provider, but want to integrate with a provider. aliases: - linkOnly type: bool @@ -125,14 +125,15 @@ options: config: description: - - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id). Examples are - given below for V(oidc) and V(saml). It is easiest to obtain valid config values by dumping an already-existing identity provider configuration - through check-mode in the RV(existing) field. + - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id). + Examples are given below for V(oidc) and V(saml). It is easiest to obtain valid config values by dumping an already-existing + identity provider configuration through check-mode in the RV(existing) field. type: dict suboptions: hide_on_login_page: description: - - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) parameter. + - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) + parameter. aliases: - hideOnLoginPage type: bool @@ -146,14 +147,14 @@ options: sync_mode: description: - - Default sync mode for all mappers. The sync mode determines when user data will be synced using the mappers. + - Default sync mode for all mappers. The sync mode determines when user data is synced using the mappers. aliases: - syncMode type: str issuer: description: - - The issuer identifier for the issuer of the response. If not provided, no validation will be performed. + - The issuer identifier for the issuer of the response. If not provided, no validation is performed. type: str authorizationUrl: @@ -203,7 +204,7 @@ options: useJwksUrl: description: - - If the switch is on, identity provider public keys will be downloaded from given JWKS URL. + - If V(true), identity provider public keys are downloaded from given JWKS URL. type: bool jwksUrl: @@ -213,7 +214,7 @@ options: entityId: description: - - The Entity ID that will be used to uniquely identify this SAML Service Provider. + - The Entity ID that is used to uniquely identify this SAML Service Provider. type: str singleSignOnServiceUrl: @@ -352,76 +353,79 @@ msg: sample: "Identity provider my-idp has been created" proposed: - description: Representation of proposed identity provider. - returned: always - type: dict - sample: { - "config": { - "authorizationUrl": "https://idp.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "secret", - "issuer": "https://idp.example.com", - "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "providerId": "oidc" + description: Representation of proposed identity provider. + returned: always + type: dict + sample: + { + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "providerId": "oidc" } existing: - description: Representation of existing identity provider. - returned: always - type: dict - sample: { - "addReadTokenRoleOnCreate": false, - "alias": "my-idp", - "authenticateByDefault": false, - "config": { - "authorizationUrl": "https://old.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "**********", - "issuer": "https://old.example.com", - "syncMode": "FORCE", - "tokenUrl": "https://old.example.com/token", - "userInfoUrl": "https://old.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "enabled": true, - "firstBrokerLoginFlowAlias": "first broker login", - "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", - "linkOnly": false, - "providerId": "oidc", - "storeToken": false, - "trustEmail": false, + description: Representation of existing identity provider. + returned: always + type: dict + sample: + { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://old.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://old.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://old.example.com/token", + "userInfoUrl": "https://old.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false } end_state: - description: Representation of identity provider after module execution. - returned: on success - type: dict - sample: { - "addReadTokenRoleOnCreate": false, - "alias": "my-idp", - "authenticateByDefault": false, - "config": { - "authorizationUrl": "https://idp.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "**********", - "issuer": "https://idp.example.com", - "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "enabled": true, - "firstBrokerLoginFlowAlias": "first broker login", - "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", - "linkOnly": false, - "providerId": "oidc", - "storeToken": false, - "trustEmail": false, + description: Representation of identity provider after module execution. + returned: on success + type: dict + sample: + { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false } """ @@ -495,8 +499,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py index d2ae4f33c8..c8bc7dc7df 100644 --- a/plugins/modules/keycloak_realm.py +++ b/plugins/modules/keycloak_realm.py @@ -17,14 +17,16 @@ short_description: Allows administration of Keycloak realm using Keycloak API version_added: 3.0.0 description: - - This module allows the administration of Keycloak realm using the Keycloak REST API. It requires access to the REST API using OpenID Connect; - the user connecting and the realm being used must have the requisite access rights. In a default Keycloak installation, admin-cli and an admin - user would work, as would a separate realm definition with the scope tailored to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - Aliases are provided so camelCased versions can be used as well. - - The Keycloak API does not always sanity check inputs, for example you can set SAML-specific settings on an OpenID Connect client for instance and - also the other way around. B(Be careful). If you do not specify a setting, usually a sensible default is chosen. + - This module allows the administration of Keycloak realm using the Keycloak REST API. It requires access to the REST API + using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored + to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used + as well. + - The Keycloak API does not always sanity check inputs, for example you can set SAML-specific settings on an OpenID Connect + client for instance and also the other way around. B(Be careful). If you do not specify a setting, usually a sensible + default is chosen. attributes: check_mode: support: full @@ -37,8 +39,8 @@ options: state: description: - State of the realm. - - On V(present), the realm will be created (or updated if it exists already). - - On V(absent), the realm will be removed if it exists. + - On V(present), the realm is created (or updated if it exists already). + - On V(absent), the realm is removed if it exists. choices: ['present', 'absent'] default: 'present' type: str @@ -526,8 +528,7 @@ EXAMPLES = r""" auth_realm: master auth_username: USERNAME auth_password: PASSWORD - id: realm - realm: realm + realm: unique_realm_name state: present - name: Delete a Keycloak realm @@ -537,7 +538,7 @@ EXAMPLES = r""" auth_realm: master auth_username: USERNAME auth_password: PASSWORD - id: test + realm: unique_realm_name state: absent """ @@ -552,19 +553,31 @@ proposed: description: Representation of proposed realm. returned: always type: dict - sample: {id: "test"} + sample: {"realm": "test"} existing: description: Representation of existing realm (sample is truncated). returned: always type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } end_state: description: Representation of realm after module execution (sample is truncated). returned: on success type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ @@ -704,8 +717,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=([['id', 'realm', 'enabled'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) @@ -763,9 +778,6 @@ def main(): # Process a creation result['changed'] = True - if 'id' not in desired_realm: - module.fail_json(msg='id needs to be specified when creating a new realm') - if module._diff: result['diff'] = dict(before='', after=sanitize_cr(desired_realm)) @@ -774,11 +786,11 @@ def main(): # create it kc.create_realm(desired_realm) - after_realm = kc.get_realm_by_id(desired_realm['id']) + after_realm = kc.get_realm_by_id(desired_realm['realm']) result['end_state'] = sanitize_cr(after_realm) - result['msg'] = 'Realm %s has been created.' % desired_realm['id'] + result['msg'] = 'Realm %s has been created.' % desired_realm['realm'] module.exit_json(**result) else: @@ -812,7 +824,7 @@ def main(): result['diff'] = dict(before=before_realm_sanitized, after=sanitize_cr(after_realm)) - result['msg'] = 'Realm %s has been updated.' % desired_realm['id'] + result['msg'] = 'Realm %s has been updated.' % desired_realm['realm'] module.exit_json(**result) else: @@ -831,7 +843,7 @@ def main(): result['proposed'] = {} result['end_state'] = {} - result['msg'] = 'Realm %s has been deleted.' % before_realm['id'] + result['msg'] = 'Realm %s has been deleted.' % before_realm['realm'] module.exit_json(**result) diff --git a/plugins/modules/keycloak_realm_info.py b/plugins/modules/keycloak_realm_info.py index e3e4f08248..501ca48c21 100644 --- a/plugins/modules/keycloak_realm_info.py +++ b/plugins/modules/keycloak_realm_info.py @@ -17,10 +17,11 @@ version_added: 4.3.0 description: - This module allows you to get Keycloak realm public information using the Keycloak REST API. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that way by this module. - You may pass single values for attributes when calling the module, and this will be translated into a list suitable for the API. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module diff --git a/plugins/modules/keycloak_realm_key.py b/plugins/modules/keycloak_realm_key.py index 0f7c5ae114..dbb284ec4b 100644 --- a/plugins/modules/keycloak_realm_key.py +++ b/plugins/modules/keycloak_realm_key.py @@ -17,17 +17,17 @@ short_description: Allows administration of Keycloak realm keys using Keycloak A version_added: 7.5.0 description: - - This module allows the administration of Keycloak realm keys using the Keycloak REST API. It requires access to the REST API using OpenID Connect; - the user connecting and the realm being used must have the requisite access rights. In a default Keycloak installation, admin-cli and an admin - user would work, as would a separate realm definition with the scope tailored to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - Aliases are provided so camelCased versions can be used as well. - - This module is unable to detect changes to the actual cryptographic key after importing it. However, if some other property is changed alongside - the cryptographic key, then the key will also get changed as a side-effect, as the JSON payload needs to include the private key. This can - be considered either a bug or a feature, as the alternative would be to always update the realm key whether it has changed or not. - - If certificate is not explicitly provided it will be dynamically created by Keycloak. Therefore comparing the current state of the certificate - to the desired state (which may be empty) is not possible. + - This module allows the administration of Keycloak realm keys using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored + to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used + as well. + - This module is unable to detect changes to the actual cryptographic key after importing it. However, if some other property + is changed alongside the cryptographic key, then the key also changes as a side-effect, as the JSON payload needs to include + the private key. This can be considered either a bug or a feature, as the alternative would be to always update the realm + key whether it has changed or not. attributes: check_mode: support: full @@ -40,8 +40,8 @@ options: state: description: - State of the keycloak realm key. - - On V(present), the realm key will be created (or updated if it exists already). - - On V(absent), the realm key will be removed if it exists. + - On V(present), the realm key is created (or updated if it exists already). + - On V(absent), the realm key is removed if it exists. choices: ['present', 'absent'] default: 'present' type: str @@ -52,9 +52,9 @@ options: required: true force: description: - - Enforce the state of the private key and certificate. This is not automatically the case as this module is unable to determine the current - state of the private key and thus cannot trigger an update based on an actual divergence. That said, a private key update may happen even - if force is false as a side-effect of other changes. + - Enforce the state of the private key and certificate. This is not automatically the case as this module is unable + to determine the current state of the private key and thus cannot trigger an update based on an actual divergence. + That said, a private key update may happen even if force is false as a side-effect of other changes. default: false type: bool parent_id: @@ -76,12 +76,14 @@ options: suboptions: active: description: - - Whether they key is active or inactive. Not to be confused with the state of the Ansible resource managed by the O(state) parameter. + - Whether they key is active or inactive. Not to be confused with the state of the Ansible resource managed by the + O(state) parameter. default: true type: bool enabled: description: - - Whether the key is enabled or disabled. Not to be confused with the state of the Ansible resource managed by the O(state) parameter. + - Whether the key is enabled or disabled. Not to be confused with the state of the Ansible resource managed by the + O(state) parameter. default: true type: bool priority: @@ -92,30 +94,33 @@ options: algorithm: description: - Key algorithm. - - The values V(RS384), V(RS512), V(PS256), V(PS384), V(PS512), V(RSA1_5), V(RSA-OAEP), V(RSA-OAEP-256) have been added in community.general - 8.2.0. + - The values V(RS384), V(RS512), V(PS256), V(PS384), V(PS512), V(RSA1_5), V(RSA-OAEP), V(RSA-OAEP-256) have been + added in community.general 8.2.0. default: RS256 choices: ['RS256', 'RS384', 'RS512', 'PS256', 'PS384', 'PS512', 'RSA1_5', 'RSA-OAEP', 'RSA-OAEP-256'] type: str private_key: description: - The private key as an ASCII string. Contents of the key must match O(config.algorithm) and O(provider_id). - - Please note that the module cannot detect whether the private key specified differs from the current state's private key. Use O(force=true) - to force the module to update the private key if you expect it to be updated. + - Please note that the module cannot detect whether the private key specified differs from the current state's private + key. Use O(force=true) to force the module to update the private key if you expect it to be updated. required: true type: str certificate: description: - - A certificate signed with the private key as an ASCII string. Contents of the key must match O(config.algorithm) and O(provider_id). - - If you want Keycloak to automatically generate a certificate using your private key then set this to an empty string. + - A certificate signed with the private key as an ASCII string. Contents of the key must match O(config.algorithm) + and O(provider_id). + - If you want Keycloak to automatically generate a certificate using your private key then set this to an empty + string. required: true type: str notes: - - Current value of the private key cannot be fetched from Keycloak. Therefore comparing its desired state to the current state is not possible. - - If certificate is not explicitly provided it will be dynamically created by Keycloak. Therefore comparing the current state of the certificate - to the desired state (which may be empty) is not possible. - - Due to the private key and certificate options the module is B(not fully idempotent). You can use O(force=true) to force the module to always - update if you know that the private key might have changed. + - Current value of the private key cannot be fetched from Keycloak. Therefore comparing its desired state to the current + state is not possible. + - If O(config.certificate) is not explicitly provided it is dynamically created by Keycloak. Therefore comparing the current + state of the certificate to the desired state (which may be empty) is not possible. + - Due to the private key and certificate options the module is B(not fully idempotent). You can use O(force=true) to force + the module to ensure updating if you know that the private key might have changed. extends_documentation_fragment: - community.general.keycloak - community.general.keycloak.actiongroup_keycloak @@ -201,7 +206,21 @@ end_state: description: Realm key configuration. type: dict returned: when O(state=present) - sample: {"active": ["true"], "algorithm": ["RS256"], "enabled": ["true"], "priority": ["140"]} + sample: + { + "active": [ + "true" + ], + "algorithm": [ + "RS256" + ], + "enabled": [ + "true" + ], + "priority": [ + "140" + ] + } """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ @@ -256,8 +275,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) # Initialize the result object. Only "changed" seems to have special # meaning for Ansible. diff --git a/plugins/modules/keycloak_realm_keys_metadata_info.py b/plugins/modules/keycloak_realm_keys_metadata_info.py index d116e3435b..8340c8f2a5 100644 --- a/plugins/modules/keycloak_realm_keys_metadata_info.py +++ b/plugins/modules/keycloak_realm_keys_metadata_info.py @@ -18,8 +18,8 @@ version_added: 9.3.0 description: - This module allows you to get Keycloak realm keys metadata using the Keycloak REST API. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). attributes: action_group: version_added: 10.2.0 @@ -104,8 +104,9 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([["token", "auth_realm", "auth_username", "auth_password"]]), - required_together=([["auth_realm", "auth_username", "auth_password"]]), + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, ) result = dict(changed=False, msg="", keys_metadata="") diff --git a/plugins/modules/keycloak_realm_rolemapping.py b/plugins/modules/keycloak_realm_rolemapping.py index bed65057a4..2b6b6a4eda 100644 --- a/plugins/modules/keycloak_realm_rolemapping.py +++ b/plugins/modules/keycloak_realm_rolemapping.py @@ -16,16 +16,17 @@ short_description: Allows administration of Keycloak realm role mappings into gr version_added: 8.2.0 description: - - This module allows you to add, remove or modify Keycloak realm role mappings into groups with the Keycloak REST API. It requires access to - the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default Keycloak - installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored to your needs and a user - having the expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/18.0/rest-api/index.html). - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that way by this module. - You may pass single values for attributes when calling the module, and this will be translated into a list suitable for the API. - - When updating a group_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API to translate the name - into the role ID. + - This module allows you to add, remove or modify Keycloak realm role mappings into groups with the Keycloak REST API. It + requires access to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite + access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client + definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/18.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a group_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API + to translate the name into the role ID. attributes: check_mode: support: full @@ -38,8 +39,8 @@ options: state: description: - State of the realm_rolemapping. - - On C(present), the realm_rolemapping will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the realm_rolemapping will be removed if it exists. + - On C(present), the realm_rolemapping is created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the realm_rolemapping is removed if it exists. default: 'present' type: str choices: @@ -70,21 +71,22 @@ options: - Identify parent by ID. - Needs less API calls than using O(parents[].name). - A deep parent chain can be started at any point when first given parent is given as ID. - - Note that in principle both ID and name can be specified at the same time but current implementation only always use just one of them, - with ID being preferred. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. name: type: str description: - Identify parent by name. - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood. - When giving a parent chain with only names it must be complete up to the top. - - Note that in principle both ID and name can be specified at the same time but current implementation only always use just one of them, - with ID being preferred. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. gid: type: str description: - ID of the group to be mapped. - - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of API calls required. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. roles: description: - Roles to be mapped to the group. @@ -100,8 +102,8 @@ options: type: str description: - The unique identifier for this role_representation. - - This parameter is not required for updating or deleting a role_representation but providing it will reduce the number of API calls - required. + - This parameter is not required for updating or deleting a role_representation but providing it reduces the number + of API calls required. extends_documentation_fragment: - community.general.keycloak - community.general.keycloak.actiongroup_keycloak @@ -192,7 +194,7 @@ proposed: description: Representation of proposed client role mapping. returned: always type: dict - sample: {clientId: "test"} + sample: {"clientId": "test"} existing: description: @@ -200,7 +202,13 @@ existing: - The sample is truncated. returned: always type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } end_state: description: @@ -208,7 +216,13 @@ end_state: - The sample is truncated. returned: on success type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( @@ -249,8 +263,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) diff --git a/plugins/modules/keycloak_role.py b/plugins/modules/keycloak_role.py index 3e7644bf6c..5b706354ed 100644 --- a/plugins/modules/keycloak_role.py +++ b/plugins/modules/keycloak_role.py @@ -16,13 +16,15 @@ short_description: Allows administration of Keycloak roles using Keycloak API version_added: 3.4.0 description: - - This module allows you to add, remove or modify Keycloak roles using the Keycloak REST API. It requires access to the REST API using OpenID Connect; - the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli and an - admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that way by this module. - You may pass single values for attributes when calling the module, and this will be translated into a list suitable for the API. + - This module allows you to add, remove or modify Keycloak roles using the Keycloak REST API. It requires access to the + REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In + a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the + scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. attributes: check_mode: support: full @@ -35,8 +37,8 @@ options: state: description: - State of the role. - - On V(present), the role will be created if it does not yet exist, or updated with the parameters you provide. - - On V(absent), the role will be removed if it exists. + - On V(present), the role is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the role is removed if it exists. default: 'present' type: str choices: @@ -62,7 +64,7 @@ options: client_id: type: str description: - - If the role is a client role, the client id under which it resides. + - If the role is a client role, the client ID under which it resides. - If this parameter is absent, the role is considered a realm role. attributes: type: dict @@ -199,15 +201,31 @@ existing: description: Representation of existing role. returned: always type: dict - sample: {"attributes": {}, "clientRole": true, "composite": false, "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", "description": "My - client test role", "id": "561703dd-0f38-45ff-9a5a-0c978f794547", "name": "myrole"} + sample: + { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" + } end_state: description: Representation of role after module execution (sample is truncated). returned: on success type: dict - sample: {"attributes": {}, "clientRole": true, "composite": false, "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", "description": "My - updated client test role", "id": "561703dd-0f38-45ff-9a5a-0c978f794547", "name": "myrole"} + sample: + { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My updated client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" + } """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ @@ -226,7 +244,7 @@ def main(): composites_spec = dict( name=dict(type='str', required=True), - client_id=dict(type='str', aliases=['clientId'], required=False), + client_id=dict(type='str', aliases=['clientId']), state=dict(type='str', default='present', choices=['present', 'absent']) ) @@ -245,8 +263,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) diff --git a/plugins/modules/keycloak_user.py b/plugins/modules/keycloak_user.py index 65880548ab..2b3c838483 100644 --- a/plugins/modules/keycloak_user.py +++ b/plugins/modules/keycloak_user.py @@ -101,6 +101,9 @@ options: groups: description: - List of groups for the user. + - Groups can be referenced by their name, like V(staff), or their path, like V(/staff/engineering). The path syntax + allows you to reference subgroups, which is not possible otherwise. + - Using the path is possible since community.general 10.6.0. type: list elements: dict default: [] @@ -329,11 +332,6 @@ EXAMPLES = r""" """ RETURN = r""" -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: User f18c709c-03d6-11ee-970b-c74bf2721112 created proposed: description: Representation of the proposed user. returned: on success @@ -346,10 +344,6 @@ end_state: description: Representation of the user after module execution. returned: on success type: dict -changed: - description: Return V(true) if the operation changed the user on the keycloak server, V(false) otherwise. - returned: always - type: bool """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ @@ -407,8 +401,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index be8b75fc85..3290ab8dd9 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -16,11 +16,12 @@ short_description: Allows administration of Keycloak user federations using Keyc version_added: 3.7.0 description: - - This module allows you to add, remove or modify Keycloak user federations using the Keycloak REST API. It requires access to the REST API using - OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html). + - This module allows you to add, remove or modify Keycloak user federations using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html). attributes: check_mode: support: full @@ -33,8 +34,8 @@ options: state: description: - State of the user federation. - - On V(present), the user federation will be created if it does not yet exist, or updated with the parameters you provide. - - On V(absent), the user federation will be removed if it exists. + - On V(present), the user federation is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the user federation is removed if it exists. default: 'present' type: str choices: @@ -49,7 +50,7 @@ options: id: description: - - The unique ID for this user federation. If left empty, the user federation will be searched by its O(name). + - The unique ID for this user federation. If left empty, the user federation is searched by its O(name). type: str name: @@ -59,8 +60,8 @@ options: provider_id: description: - - Provider for this user federation. Built-in providers are V(ldap), V(kerberos), and V(sssd). Custom user storage providers can also be - used. + - Provider for this user federation. Built-in providers are V(ldap), V(kerberos), and V(sssd). Custom user storage providers + can also be used. aliases: - providerId type: str @@ -75,7 +76,7 @@ options: parent_id: description: - - Unique ID for the parent of this user federation. Realm ID will be automatically used if left blank. + - Unique ID for the parent of this user federation. Realm ID is automatically used if left blank. aliases: - parentId type: str @@ -90,12 +91,15 @@ options: bind_credential_update_mode: description: - - The value of the config parameter O(config.bindCredential) is redacted in the Keycloak responses. Comparing the redacted value with the - desired value always evaluates to not equal. This means the before and desired states are never equal if the parameter is set. - - Set to V(always) to include O(config.bindCredential) in the comparison of before and desired state. Because of the redacted value returned - by Keycloak the module will always detect a change and make an update if a O(config.bindCredential) value is set. + - The value of the config parameter O(config.bindCredential) is redacted in the Keycloak responses. Comparing the redacted + value with the desired value always evaluates to not equal. This means the before and desired states are never equal + if the parameter is set. + - Set to V(always) to include O(config.bindCredential) in the comparison of before and desired state. Because of the + redacted value returned by Keycloak the module always detects a change and make an update if a O(config.bindCredential) + value is set. - Set to V(only_indirect) to exclude O(config.bindCredential) when comparing the before state with the desired state. - The value of O(config.bindCredential) will only be updated if there are other changes to the user federation that require an update. + The value of O(config.bindCredential) is only updated if there are other changes to the user federation that require + an update. type: str default: always choices: @@ -105,9 +109,9 @@ options: config: description: - - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id). Examples are - given below for V(ldap), V(kerberos) and V(sssd). It is easiest to obtain valid config values by dumping an already-existing user federation - configuration through check-mode in the RV(existing) field. + - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id). + Examples are given below for V(ldap), V(kerberos) and V(sssd). It is easiest to obtain valid config values by dumping + an already-existing user federation configuration through check-mode in the RV(existing) field. - The value V(sssd) has been supported since community.general 4.2.0. type: dict suboptions: @@ -125,14 +129,14 @@ options: importEnabled: description: - - If V(true), LDAP users will be imported into Keycloak DB and synced by the configured sync policies. + - If V(true), LDAP users are imported into Keycloak DB and synced by the configured sync policies. default: true type: bool editMode: description: - - V(READ_ONLY) is a read-only LDAP store. V(WRITABLE) means data will be synced back to LDAP on demand. V(UNSYNCED) means user data - will be imported, but not synced back to LDAP. + - V(READ_ONLY) is a read-only LDAP store. V(WRITABLE) means data is synced back to LDAP on demand. V(UNSYNCED) means + user data is imported, but not synced back to LDAP. type: str choices: - READ_ONLY @@ -141,7 +145,8 @@ options: syncRegistrations: description: - - Should newly created users be created within LDAP store? Priority effects which provider is chosen to sync the new user. + - Should newly created users be created within LDAP store? Priority effects which provider is chosen to sync the + new user. default: false type: bool @@ -153,29 +158,31 @@ options: usernameLDAPAttribute: description: - - Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server vendors it can be V(uid). For Active directory - it can be V(sAMAccountName) or V(cn). The attribute should be filled for all LDAP user records you want to import from LDAP to Keycloak. + - Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server vendors it can be V(uid). For + Active directory it can be V(sAMAccountName) or V(cn). The attribute should be filled for all LDAP user records + you want to import from LDAP to Keycloak. type: str rdnLDAPAttribute: description: - - Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN. Usually it is the same as Username LDAP attribute, - however it is not required. For example for Active directory, it is common to use V(cn) as RDN attribute when username attribute might - be V(sAMAccountName). + - Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN. Usually it is the same as Username + LDAP attribute, however it is not required. For example for Active directory, it is common to use V(cn) as RDN + attribute when username attribute might be V(sAMAccountName). type: str uuidLDAPAttribute: description: - - Name of LDAP attribute, which is used as unique object identifier (UUID) for objects in LDAP. For many LDAP server vendors, it is - V(entryUUID); however some are different. For example for Active directory it should be V(objectGUID). If your LDAP server does not - support the notion of UUID, you can use any other attribute that is supposed to be unique among LDAP users in tree. + - Name of LDAP attribute, which is used as unique object identifier (UUID) for objects in LDAP. For many LDAP server + vendors, it is V(entryUUID); however some are different. For example for Active directory it should be V(objectGUID). + If your LDAP server does not support the notion of UUID, you can use any other attribute that is supposed to be + unique among LDAP users in tree. type: str userObjectClasses: description: - - All values of LDAP objectClass attribute for users in LDAP divided by comma. For example V(inetOrgPerson, organizationalPerson). Newly - created Keycloak users will be written to LDAP with all those object classes and existing LDAP user records are found just if they - contain all those object classes. + - All values of LDAP objectClass attribute for users in LDAP divided by comma. For example V(inetOrgPerson, organizationalPerson). + Newly created Keycloak users are written to LDAP with all those object classes and existing LDAP user records + are found just if they contain all those object classes. type: str connectionUrl: @@ -195,8 +202,8 @@ options: searchScope: description: - - For one level, the search applies only for users in the DNs specified by User DNs. For subtree, the search applies to the whole subtree. - See LDAP documentation for more details. + - For one level, the search applies only for users in the DNs specified by User DNs. For subtree, the search applies + to the whole subtree. See LDAP documentation for more details. default: '1' type: str choices: @@ -205,7 +212,8 @@ options: authType: description: - - Type of the Authentication method used during LDAP Bind operation. It is used in most of the requests sent to the LDAP server. + - Type of the Authentication method used during LDAP Bind operation. It is used in most of the requests sent to + the LDAP server. default: 'none' type: str choices: @@ -214,7 +222,7 @@ options: bindDn: description: - - DN of LDAP user which will be used by Keycloak to access LDAP server. + - DN of LDAP user which is used by Keycloak to access LDAP server. type: str bindCredential: @@ -224,15 +232,15 @@ options: startTls: description: - - Encrypts the connection to LDAP using STARTTLS, which will disable connection pooling. + - Encrypts the connection to LDAP using STARTTLS, which disables connection pooling. default: false type: bool usePasswordModifyExtendedOp: description: - - Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify extended operation usually requires that LDAP user - already has password in the LDAP server. So when this is used with 'Sync Registrations', it can be good to add also 'Hardcoded LDAP - attribute mapper' with randomly generated initial password. + - Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify extended operation usually requires + that LDAP user already has password in the LDAP server. So when this is used with 'Sync Registrations', it can + be good to add also 'Hardcoded LDAP attribute mapper' with randomly generated initial password. default: false type: bool @@ -250,11 +258,11 @@ options: useTruststoreSpi: description: - - Specifies whether LDAP connection will use the truststore SPI with the truststore configured in standalone.xml/domain.xml. V(always) - means that it will always use it. V(never) means that it will not use it. V(ldapsOnly) means that it will use if your connection URL - use ldaps. - - Note even if standalone.xml/domain.xml is not configured, the default Java cacerts or certificate specified by C(javax.net.ssl.trustStore) - property will be used. + - Specifies whether LDAP connection uses the truststore SPI with the truststore configured in standalone.xml/domain.xml. + V(always) means that it always uses it. V(never) means that it does not use it. V(ldapsOnly) means that it uses + if your connection URL use ldaps. + - Note even if standalone.xml/domain.xml is not configured, the default Java cacerts or certificate specified by + C(javax.net.ssl.trustStore) property is used. default: ldapsOnly type: str choices: @@ -295,8 +303,8 @@ options: connectionPoolingDebug: description: - - A string that indicates the level of debug output to produce. Example valid values are V(fine) (trace connection creation and removal) - and V(all) (all debugging information). + - A string that indicates the level of debug output to produce. Example valid values are V(fine) (trace connection + creation and removal) and V(all) (all debugging information). type: str connectionPoolingInitSize: @@ -321,13 +329,14 @@ options: connectionPoolingTimeout: description: - - The number of milliseconds that an idle connection may remain in the pool without being closed and removed from the pool. + - The number of milliseconds that an idle connection may remain in the pool without being closed and removed from + the pool. type: int allowKerberosAuthentication: description: - - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data about authenticated users will be provisioned from - this LDAP server. + - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data about authenticated users is + provisioned from this LDAP server. default: false type: bool @@ -338,16 +347,17 @@ options: krbPrincipalAttribute: description: - - Name of the LDAP attribute, which refers to Kerberos principal. This is used to lookup appropriate LDAP user after successful Kerberos/SPNEGO - authentication in Keycloak. When this is empty, the LDAP user will be looked based on LDAP username corresponding to the first part - of his Kerberos principal. For instance, for principal C(john@KEYCLOAK.ORG), it will assume that LDAP username is V(john). + - Name of the LDAP attribute, which refers to Kerberos principal. This is used to lookup appropriate LDAP user after + successful Kerberos/SPNEGO authentication in Keycloak. When this is empty, the LDAP user is looked up based on + LDAP username corresponding to the first part of his Kerberos principal. For instance, for principal C(john@KEYCLOAK.ORG), + it assumes that LDAP username is V(john). type: str version_added: 8.1.0 serverPrincipal: description: - - Full name of server principal for HTTP service including server and domain name. For example V(HTTP/host.foo.org@FOO.ORG). Use V(*) - to accept any service principal in the KeyTab file. + - Full name of server principal for HTTP service including server and domain name. For example V(HTTP/host.foo.org@FOO.ORG). + Use V(*) to accept any service principal in the KeyTab file. type: str keyTab: @@ -362,8 +372,8 @@ options: useKerberosForPasswordAuthentication: description: - - Use Kerberos login module for authenticate username/password against Kerberos server instead of authenticating against LDAP server - with Directory Service API. + - Use Kerberos login module for authenticate username/password against Kerberos server instead of authenticating + against LDAP server with Directory Service API. default: false type: bool @@ -409,17 +419,17 @@ options: evictionDay: description: - - Day of the week the entry will become invalid on. + - Day of the week the entry is set to become invalid on. type: str evictionHour: description: - - Hour of day the entry will become invalid on. + - Hour of day the entry is set to become invalid on. type: str evictionMinute: description: - - Minute of day the entry will become invalid on. + - Minute of day the entry is set to become invalid on. type: str maxLifespan: @@ -429,8 +439,9 @@ options: referral: description: - - Specifies if LDAP referrals should be followed or ignored. Please note that enabling referrals can slow down authentication as it - allows the LDAP server to decide which other LDAP servers to use. This could potentially include untrusted servers. + - Specifies if LDAP referrals should be followed or ignored. Please note that enabling referrals can slow down authentication + as it allows the LDAP server to decide which other LDAP servers to use. This could potentially include untrusted + servers. type: str choices: - ignore @@ -450,12 +461,12 @@ options: name: description: - - Name of the mapper. If no ID is given, the mapper will be searched by name. + - Name of the mapper. If no ID is given, the mapper is searched by name. type: str parentId: description: - - Unique ID for the parent of this mapper. ID of the user federation will automatically be used if left blank. + - Unique ID for the parent of this mapper. ID of the user federation is automatically used if left blank. type: str providerId: @@ -587,122 +598,125 @@ msg: sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799." proposed: - description: Representation of proposed user federation. - returned: always - type: dict - sample: { - "config": { - "allowKerberosAuthentication": "false", - "authType": "simple", - "batchSizeForSync": "1000", - "bindCredential": "**********", - "bindDn": "cn=directory reader", - "cachePolicy": "DEFAULT", - "connectionPooling": "true", - "connectionUrl": "ldaps://ldap.example.com:636", - "debug": "false", - "editMode": "READ_ONLY", - "enabled": "true", - "importEnabled": "true", - "pagination": "true", - "priority": "0", - "rdnLDAPAttribute": "uid", - "searchScope": "1", - "syncRegistrations": "false", - "trustEmail": "false", - "useKerberosForPasswordAuthentication": "false", - "useTruststoreSpi": "ldapsOnly", - "userObjectClasses": "inetOrgPerson, organizationalPerson", - "usernameLDAPAttribute": "uid", - "usersDn": "ou=Users,dc=example,dc=com", - "uuidLDAPAttribute": "entryUUID", - "validatePasswordPolicy": "false", - "vendor": "other" - }, - "name": "ldap", - "providerId": "ldap", - "providerType": "org.keycloak.storage.UserStorageProvider" + description: Representation of proposed user federation. + returned: always + type: dict + sample: + { + "config": { + "allowKerberosAuthentication": "false", + "authType": "simple", + "batchSizeForSync": "1000", + "bindCredential": "**********", + "bindDn": "cn=directory reader", + "cachePolicy": "DEFAULT", + "connectionPooling": "true", + "connectionUrl": "ldaps://ldap.example.com:636", + "debug": "false", + "editMode": "READ_ONLY", + "enabled": "true", + "importEnabled": "true", + "pagination": "true", + "priority": "0", + "rdnLDAPAttribute": "uid", + "searchScope": "1", + "syncRegistrations": "false", + "trustEmail": "false", + "useKerberosForPasswordAuthentication": "false", + "useTruststoreSpi": "ldapsOnly", + "userObjectClasses": "inetOrgPerson, organizationalPerson", + "usernameLDAPAttribute": "uid", + "usersDn": "ou=Users,dc=example,dc=com", + "uuidLDAPAttribute": "entryUUID", + "validatePasswordPolicy": "false", + "vendor": "other" + }, + "name": "ldap", + "providerId": "ldap", + "providerType": "org.keycloak.storage.UserStorageProvider" } existing: - description: Representation of existing user federation. - returned: always - type: dict - sample: { - "config": { - "allowKerberosAuthentication": "false", - "authType": "simple", - "batchSizeForSync": "1000", - "bindCredential": "**********", - "bindDn": "cn=directory reader", - "cachePolicy": "DEFAULT", - "changedSyncPeriod": "-1", - "connectionPooling": "true", - "connectionUrl": "ldaps://ldap.example.com:636", - "debug": "false", - "editMode": "READ_ONLY", - "enabled": "true", - "fullSyncPeriod": "-1", - "importEnabled": "true", - "pagination": "true", - "priority": "0", - "rdnLDAPAttribute": "uid", - "searchScope": "1", - "syncRegistrations": "false", - "trustEmail": "false", - "useKerberosForPasswordAuthentication": "false", - "useTruststoreSpi": "ldapsOnly", - "userObjectClasses": "inetOrgPerson, organizationalPerson", - "usernameLDAPAttribute": "uid", - "usersDn": "ou=Users,dc=example,dc=com", - "uuidLDAPAttribute": "entryUUID", - "validatePasswordPolicy": "false", - "vendor": "other" - }, - "id": "01122837-9047-4ae4-8ca0-6e2e891a765f", - "mappers": [ - { - "config": { - "always.read.value.from.ldap": "false", - "is.mandatory.in.ldap": "false", - "ldap.attribute": "mail", - "read.only": "true", - "user.model.attribute": "email" - }, - "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f", - "name": "email", - "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f", - "providerId": "user-attribute-ldap-mapper", - "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" - } - ], - "name": "myfed", - "parentId": "myrealm", - "providerId": "ldap", - "providerType": "org.keycloak.storage.UserStorageProvider" + description: Representation of existing user federation. + returned: always + type: dict + sample: + { + "config": { + "allowKerberosAuthentication": "false", + "authType": "simple", + "batchSizeForSync": "1000", + "bindCredential": "**********", + "bindDn": "cn=directory reader", + "cachePolicy": "DEFAULT", + "changedSyncPeriod": "-1", + "connectionPooling": "true", + "connectionUrl": "ldaps://ldap.example.com:636", + "debug": "false", + "editMode": "READ_ONLY", + "enabled": "true", + "fullSyncPeriod": "-1", + "importEnabled": "true", + "pagination": "true", + "priority": "0", + "rdnLDAPAttribute": "uid", + "searchScope": "1", + "syncRegistrations": "false", + "trustEmail": "false", + "useKerberosForPasswordAuthentication": "false", + "useTruststoreSpi": "ldapsOnly", + "userObjectClasses": "inetOrgPerson, organizationalPerson", + "usernameLDAPAttribute": "uid", + "usersDn": "ou=Users,dc=example,dc=com", + "uuidLDAPAttribute": "entryUUID", + "validatePasswordPolicy": "false", + "vendor": "other" + }, + "id": "01122837-9047-4ae4-8ca0-6e2e891a765f", + "mappers": [ + { + "config": { + "always.read.value.from.ldap": "false", + "is.mandatory.in.ldap": "false", + "ldap.attribute": "mail", + "read.only": "true", + "user.model.attribute": "email" + }, + "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f", + "name": "email", + "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f", + "providerId": "user-attribute-ldap-mapper", + "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + } + ], + "name": "myfed", + "parentId": "myrealm", + "providerId": "ldap", + "providerType": "org.keycloak.storage.UserStorageProvider" } end_state: - description: Representation of user federation after module execution. - returned: on success - type: dict - sample: { - "config": { - "allowPasswordAuthentication": "false", - "cachePolicy": "DEFAULT", - "enabled": "true", - "kerberosRealm": "EXAMPLE.COM", - "keyTab": "/etc/krb5.keytab", - "priority": "0", - "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM", - "updateProfileFirstLogin": "false" - }, - "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122", - "mappers": [], - "name": "kerberos", - "parentId": "myrealm", - "providerId": "kerberos", - "providerType": "org.keycloak.storage.UserStorageProvider" + description: Representation of user federation after module execution. + returned: on success + type: dict + sample: + { + "config": { + "allowPasswordAuthentication": "false", + "cachePolicy": "DEFAULT", + "enabled": "true", + "kerberosRealm": "EXAMPLE.COM", + "keyTab": "/etc/krb5.keytab", + "priority": "0", + "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM", + "updateProfileFirstLogin": "false" + }, + "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122", + "mappers": [], + "name": "kerberos", + "parentId": "myrealm", + "providerId": "kerberos", + "providerType": "org.keycloak.storage.UserStorageProvider" } """ @@ -827,8 +841,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) diff --git a/plugins/modules/keycloak_user_rolemapping.py b/plugins/modules/keycloak_user_rolemapping.py index 319aa5350b..49d71e2ca9 100644 --- a/plugins/modules/keycloak_user_rolemapping.py +++ b/plugins/modules/keycloak_user_rolemapping.py @@ -15,15 +15,17 @@ short_description: Allows administration of Keycloak user_rolemapping with the K version_added: 5.7.0 description: - - This module allows you to add, remove or modify Keycloak user_rolemapping with the Keycloak REST API. It requires access to the REST API using - OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that way by this module. - You may pass single values for attributes when calling the module, and this will be translated into a list suitable for the API. - - When updating a user_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API to translate the name - into the role ID. + - This module allows you to add, remove or modify Keycloak user_rolemapping with the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a user_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API to + translate the name into the role ID. attributes: check_mode: support: full @@ -36,8 +38,8 @@ options: state: description: - State of the user_rolemapping. - - On V(present), the user_rolemapping will be created if it does not yet exist, or updated with the parameters you provide. - - On V(absent), the user_rolemapping will be removed if it exists. + - On V(present), the user_rolemapping is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the user_rolemapping is removed if it exists. default: 'present' type: str choices: @@ -59,22 +61,26 @@ options: type: str description: - ID of the user to be mapped. - - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of API calls required. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. service_account_user_client_id: type: str description: - Client ID of the service-account-user to be mapped. - - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of API calls required. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. client_id: type: str description: - Name of the client to be mapped (different than O(cid)). - - This parameter is required if O(cid) is not provided (can be replaced by O(cid) to reduce the number of API calls that must be made). + - This parameter is required if O(cid) is not provided (can be replaced by O(cid) to reduce the number of API calls + that must be made). cid: type: str description: - ID of the client to be mapped. - - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of API calls required. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. roles: description: - Roles to be mapped to the user. @@ -90,8 +96,8 @@ options: type: str description: - The unique identifier for this role_representation. - - This parameter is not required for updating or deleting a role_representation but providing it will reduce the number of API calls - required. + - This parameter is not required for updating or deleting a role_representation but providing it reduces the number + of API calls required. extends_documentation_fragment: - community.general.keycloak - community.general.keycloak.actiongroup_keycloak @@ -184,7 +190,7 @@ proposed: description: Representation of proposed client role mapping. returned: always type: dict - sample: {clientId: "test"} + sample: {"clientId": "test"} existing: description: @@ -192,7 +198,13 @@ existing: - The sample is truncated. returned: always type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } end_state: description: @@ -200,7 +212,13 @@ end_state: - The sample is truncated. returned: on success type: dict - sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}} + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ @@ -236,9 +254,11 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password'], + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'], ['uid', 'target_username', 'service_account_user_client_id']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) @@ -342,7 +362,7 @@ def main(): # Assign roles result['changed'] = True if module._diff: - result['diff'] = dict(before=assigned_roles_before, after=update_roles) + result['diff'] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles}) if module.check_mode: module.exit_json(**result) kc.add_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm) @@ -357,7 +377,7 @@ def main(): # Remove mapping of role result['changed'] = True if module._diff: - result['diff'] = dict(before=assigned_roles_before, after=update_roles) + result['diff'] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles}) if module.check_mode: module.exit_json(**result) kc.delete_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm) diff --git a/plugins/modules/keycloak_userprofile.py b/plugins/modules/keycloak_userprofile.py index 49b52c4521..9760a17ecf 100644 --- a/plugins/modules/keycloak_userprofile.py +++ b/plugins/modules/keycloak_userprofile.py @@ -14,11 +14,11 @@ module: keycloak_userprofile short_description: Allows managing Keycloak User Profiles description: - - This module allows you to create, update, or delete Keycloak User Profiles using the Keycloak API. You can also customize the "Unmanaged Attributes" - with it. - - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at - U(https://www.keycloak.org/docs-api/24.0.5/rest-api/index.html). - For compatibility reasons, the module also accepts the camelCase versions of the options. + - This module allows you to create, update, or delete Keycloak User Profiles using the Keycloak API. You can also customize + the "Unmanaged Attributes" with it. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/24.0.5/rest-api/index.html). For compatibility reasons, the module also accepts + the camelCase versions of the options. version_added: "9.4.0" attributes: @@ -33,8 +33,8 @@ options: state: description: - State of the User Profile provider. - - On V(present), the User Profile provider will be created if it does not yet exist, or updated with the parameters you provide. - - On V(absent), the User Profile provider will be removed if it exists. + - On V(present), the User Profile provider is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the User Profile provider is removed if it exists. default: 'present' type: str choices: @@ -170,7 +170,7 @@ options: group: description: - - Specifies the User Profile group where this attribute will be added. + - Specifies the User Profile group where this attribute is added. type: str permissions: @@ -264,8 +264,8 @@ options: - ADMIN_VIEW notes: - - Currently, only a single V(declarative-user-profile) entry is supported for O(provider_id) (design of the Keyckoak API). However, there can - be multiple O(config.kc_user_profile_config[].attributes[]) entries. + - Currently, only a single V(declarative-user-profile) entry is supported for O(provider_id) (design of the Keyckoak API). + However, there can be multiple O(config.kc_user_profile_config[].attributes[]) entries. extends_documentation_fragment: - community.general.keycloak - community.general.keycloak.actiongroup_keycloak @@ -405,7 +405,6 @@ data: description: The data returned by the Keycloak API. returned: when state is present type: dict - sample: {'...': '...'} """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ @@ -455,7 +454,6 @@ def main(): ), config=dict( type='dict', - required=False, options={ 'kc_user_profile_config': dict( type='list', @@ -465,7 +463,6 @@ def main(): 'attributes': dict( type='list', elements='dict', - required=False, options={ 'name': dict(type='str', required=True), 'display_name': dict(type='str', aliases=['displayName'], required=True), @@ -475,17 +472,17 @@ def main(): 'length': dict( type='dict', options={ - 'min': dict(type='int', required=False), + 'min': dict(type='int'), 'max': dict(type='int', required=True) } ), - 'email': dict(type='dict', required=False), - 'username_prohibited_characters': dict(type='dict', aliases=['usernameProhibitedCharacters'], required=False), - 'up_username_not_idn_homograph': dict(type='dict', aliases=['upUsernameNotIdnHomograph'], required=False), - 'person_name_prohibited_characters': dict(type='dict', aliases=['personNameProhibitedCharacters'], required=False), - 'uri': dict(type='dict', required=False), - 'pattern': dict(type='dict', required=False), - 'options': dict(type='dict', required=False) + 'email': dict(type='dict'), + 'username_prohibited_characters': dict(type='dict', aliases=['usernameProhibitedCharacters']), + 'up_username_not_idn_homograph': dict(type='dict', aliases=['upUsernameNotIdnHomograph']), + 'person_name_prohibited_characters': dict(type='dict', aliases=['personNameProhibitedCharacters']), + 'uri': dict(type='dict'), + 'pattern': dict(type='dict'), + 'options': dict(type='dict') } ), 'annotations': dict(type='dict'), @@ -512,15 +509,15 @@ def main(): options={ 'name': dict(type='str', required=True), 'display_header': dict(type='str', aliases=['displayHeader'], required=True), - 'display_description': dict(type='str', aliases=['displayDescription'], required=False), - 'annotations': dict(type='dict', required=False) + 'display_description': dict(type='str', aliases=['displayDescription']), + 'annotations': dict(type='dict') } ), 'unmanaged_attribute_policy': dict( type='str', aliases=['unmanagedAttributePolicy'], choices=['ENABLED', 'ADMIN_EDIT', 'ADMIN_VIEW'], - required=False + ) } ) @@ -532,8 +529,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) # Initialize the result object. Only "changed" seems to have special # meaning for Ansible. diff --git a/plugins/modules/keyring.py b/plugins/modules/keyring.py index e96d01ac44..eef59dd10a 100644 --- a/plugins/modules/keyring.py +++ b/plugins/modules/keyring.py @@ -19,8 +19,8 @@ author: - Alexander Hussey (@ahussey-redhat) short_description: Set or delete a passphrase using the Operating System's native keyring description: >- - This module uses the L(keyring Python library, https://pypi.org/project/keyring/) to set or delete passphrases for a given service and username - from the OS' native keyring. + This module uses the L(keyring Python library, https://pypi.org/project/keyring/) to set or delete passphrases for a given + service and username from the OS' native keyring. requirements: - keyring (Python library) - gnome-keyring (application - required for headless Gnome keyring access) @@ -206,10 +206,10 @@ def run_module(): username=dict(type="str", required=True), keyring_password=dict(type="str", required=True, no_log=True), user_password=dict( - type="str", required=False, no_log=True, aliases=["password"] + type="str", no_log=True, aliases=["password"] ), state=dict( - type="str", required=False, default="present", choices=["absent", "present"] + type="str", default="present", choices=["absent", "present"] ), ) diff --git a/plugins/modules/keyring_info.py b/plugins/modules/keyring_info.py index 9fcd4d5d92..836ecafdde 100644 --- a/plugins/modules/keyring_info.py +++ b/plugins/modules/keyring_info.py @@ -19,8 +19,8 @@ author: - Alexander Hussey (@ahussey-redhat) short_description: Get a passphrase using the Operating System's native keyring description: >- - This module uses the L(keyring Python library, https://pypi.org/project/keyring/) to retrieve passphrases for a given service and username from - the OS' native keyring. + This module uses the L(keyring Python library, https://pypi.org/project/keyring/) to retrieve passphrases for a given service + and username from the OS' native keyring. requirements: - keyring (Python library) - gnome-keyring (application - required for headless Linux keyring access) diff --git a/plugins/modules/kibana_plugin.py b/plugins/modules/kibana_plugin.py index 953d3518a6..b975e2dcea 100644 --- a/plugins/modules/kibana_plugin.py +++ b/plugins/modules/kibana_plugin.py @@ -59,11 +59,11 @@ options: version: description: - Version of the plugin to be installed. - - If plugin exists with previous version, plugin will B(not) be updated unless O(force) is set to V(true). + - If the plugin is installed with in a previous version, it is B(not) updated unless O(force=true). type: str force: description: - - Delete and re-install the plugin. Can be useful for plugins update. + - Delete and re-install the plugin. It can be useful for plugins update. type: bool default: false allow_root: @@ -102,21 +102,13 @@ name: returned: success type: str url: - description: The url from where the plugin is installed from. + description: The URL from where the plugin is installed from. returned: success type: str timeout: description: The timeout for plugin download. returned: success type: str -stdout: - description: The command stdout. - returned: success - type: str -stderr: - description: The command stderr. - returned: success - type: str state: description: The state for the managed plugin. returned: success @@ -236,11 +228,11 @@ def main(): argument_spec=dict( name=dict(required=True), state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), - url=dict(default=None), + url=dict(), timeout=dict(default="1m"), plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"), plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"), - version=dict(default=None), + version=dict(), force=dict(default=False, type="bool"), allow_root=dict(default=False, type="bool"), ), diff --git a/plugins/modules/krb_ticket.py b/plugins/modules/krb_ticket.py index d93966e501..3a01944535 100644 --- a/plugins/modules/krb_ticket.py +++ b/plugins/modules/krb_ticket.py @@ -30,7 +30,7 @@ options: principal: description: - The principal name. - - If not set, the user running this module will be used. + - If not set, the user running this module is used. type: str state: description: @@ -50,21 +50,24 @@ options: - Use O(cache_name) as the ticket cache name and location. - If this option is not used, the default cache name and location are used. - The default credentials cache may vary between systems. - - If not set the the value of E(KRB5CCNAME) environment variable will be used instead, its value is used to name the default ticket cache. + - If not set the the value of E(KRB5CCNAME) environment variable is used instead, its value is used to name the default + ticket cache. type: str lifetime: description: - Requests a ticket with the lifetime, if the O(lifetime) is not specified, the default ticket lifetime is used. - - Specifying a ticket lifetime longer than the maximum ticket lifetime (configured by each site) will not override the configured maximum - ticket lifetime. - - 'The value for O(lifetime) must be followed by one of the following suffixes: V(s) - seconds, V(m) - minutes, V(h) - hours, V(d) - days.' - - You cannot mix units; a value of V(3h30m) will result in an error. + - Specifying a ticket lifetime longer than the maximum ticket lifetime (configured by each site) does not override the + configured maximum ticket lifetime. + - 'The value for O(lifetime) must be followed by one of the following suffixes: V(s) - seconds, V(m) - minutes, V(h) + - hours, V(d) - days.' + - You cannot mix units; a value of V(3h30m) results in an error. - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference. type: str start_time: description: - Requests a postdated ticket. - - Postdated tickets are issued with the invalid flag set, and need to be resubmitted to the KDC for validation before use. + - Postdated tickets are issued with the invalid flag set, and need to be resubmitted to the KDC for validation before + use. - O(start_time) specifies the duration of the delay before the ticket can become valid. - You can use absolute time formats, for example V(July 27, 2012 at 20:30) you would neet to set O(start_time=20120727203000). - You can also use time duration format similar to O(lifetime) or O(renewable). @@ -73,9 +76,9 @@ options: renewable: description: - Requests renewable tickets, with a total lifetime equal to O(renewable). - - 'The value for O(renewable) must be followed by one of the following delimiters: V(s) - seconds, V(m) - minutes, V(h) - hours, V(d) - - days.' - - You cannot mix units; a value of V(3h30m) will result in an error. + - 'The value for O(renewable) must be followed by one of the following delimiters: V(s) - seconds, V(m) - minutes, V(h) + - hours, V(d) - days.' + - You cannot mix units; a value of V(3h30m) results in an error. - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference. type: str forwardable: @@ -96,7 +99,8 @@ options: type: bool canonicalization: description: - - Requests canonicalization of the principal name, and allows the KDC to reply with a different client principal from the one requested. + - Requests canonicalization of the principal name, and allows the KDC to reply with a different client principal from + the one requested. type: bool enterprise: description: @@ -115,7 +119,7 @@ options: keytab: description: - Requests a ticket, obtained from a key in the local host's keytab. - - If O(keytab_path) is not specified will try to use default client keytab path (C(-i) option). + - If O(keytab_path) is not specified it tries to use default client keytab path (C(-i) option). type: bool keytab_path: description: diff --git a/plugins/modules/launchd.py b/plugins/modules/launchd.py index ea2163964b..310e1af9b1 100644 --- a/plugins/modules/launchd.py +++ b/plugins/modules/launchd.py @@ -37,11 +37,12 @@ options: version_added: 10.1.0 state: description: - - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. - - Launchd does not support V(restarted) nor V(reloaded) natively. These will trigger a stop/start (restarted) or an unload/load (reloaded). + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - C(launchd) does not support V(restarted) nor V(reloaded) natively. These states trigger a stop/start (restarted) or + an unload/load (reloaded). - V(restarted) unloads and loads the service before start to ensure that the latest job definition (plist) is used. - - V(reloaded) unloads and loads the service to ensure that the latest job definition (plist) is used. Whether a service is started or stopped - depends on the content of the definition file. + - V(reloaded) unloads and loads the service to ensure that the latest job definition (plist) is used. Whether a service + is started or stopped depends on the content of the definition file. type: str choices: [reloaded, restarted, started, stopped, unloaded] enabled: @@ -52,8 +53,8 @@ options: force_stop: description: - Whether the service should not be restarted automatically by launchd. - - Services might have the 'KeepAlive' attribute set to true in a launchd configuration. In case this is set to true, stopping a service - will cause that launchd starts the service again. + - Services might have the C(KeepAlive) attribute set to V(true) in a launchd configuration. In case this is set to V(true), + stopping a service causes that C(launchd) starts the service again. - Set this option to V(true) to let this module change the C(KeepAlive) attribute to V(false). type: bool default: false @@ -110,16 +111,16 @@ EXAMPLES = r""" RETURN = r""" status: - description: Metadata about service status. - returned: always - type: dict - sample: - { - "current_pid": "-", - "current_state": "stopped", - "previous_pid": "82636", - "previous_state": "running" - } + description: Metadata about service status. + returned: always + type: dict + sample: + { + "current_pid": "-", + "current_state": "stopped", + "previous_pid": "82636", + "previous_state": "running" + } """ import os diff --git a/plugins/modules/layman.py b/plugins/modules/layman.py index 21b4eba9a4..b19428d9f9 100644 --- a/plugins/modules/layman.py +++ b/plugins/modules/layman.py @@ -15,8 +15,8 @@ module: layman author: "Jakub Jirutka (@jirutka)" short_description: Manage Gentoo overlays description: - - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. Please note that Layman must be installed - on a managed node prior using this module. + - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. Please note that Layman + must be installed on a managed node prior using this module. requirements: - layman python module extends_documentation_fragment: @@ -29,13 +29,14 @@ attributes: options: name: description: - - The overlay id to install, synchronize, or uninstall. Use V(ALL) to sync all of the installed overlays (can be used only when O(state=updated)). + - The overlay ID to install, synchronize, or uninstall. Use V(ALL) to sync all of the installed overlays (can be used + only when O(state=updated)). required: true type: str list_url: description: - - An URL of the alternative overlays list that defines the overlay to install. This list will be fetched and saved under C(${overlay_defs}/${name}.xml), - where C(overlay_defs) is read from the Layman's configuration. + - An URL of the alternative overlays list that defines the overlay to install. This list is fetched and saved under + C(${overlay_defs}/${name}.xml), where C(overlay_defs) is read from the Layman's configuration. aliases: [url] type: str state: @@ -46,8 +47,7 @@ options: type: str validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be set to V(false) when no other option exists. Prior to 1.9.3 the - code defaulted to V(false). + - If V(false), SSL certificates are not validated. This should only be set to V(false) when no other option exists. type: bool default: true """ @@ -236,7 +236,7 @@ def main(): name=dict(required=True), list_url=dict(aliases=['url']), state=dict(default="present", choices=['present', 'absent', 'updated']), - validate_certs=dict(required=False, default=True, type='bool'), + validate_certs=dict(default=True, type='bool'), ), supports_check_mode=True ) diff --git a/plugins/modules/ldap_attrs.py b/plugins/modules/ldap_attrs.py index 144aff712f..592da93a63 100644 --- a/plugins/modules/ldap_attrs.py +++ b/plugins/modules/ldap_attrs.py @@ -19,9 +19,10 @@ description: - Add or remove multiple LDAP attribute values. notes: - This only deals with attributes on existing entries. To add or remove whole entries, see M(community.general.ldap_entry). - - For O(state=present) and O(state=absent), all value comparisons are performed on the server for maximum accuracy. For O(state=exact), values - have to be compared in Python, which obviously ignores LDAP matching rules. This should work out in most cases, but it is theoretically possible - to see spurious changes when target and actual values are semantically identical but lexically distinct. + - For O(state=present) and O(state=absent), all value comparisons are performed on the server for maximum accuracy. For + O(state=exact), values have to be compared in Python, which obviously ignores LDAP matching rules. This should work out + in most cases, but it is theoretically possible to see spurious changes when target and actual values are semantically + identical but lexically distinct. version_added: '0.2.0' author: - Jiri Tyr (@jtyr) @@ -42,26 +43,26 @@ options: choices: [present, absent, exact] default: present description: - - The state of the attribute values. If V(present), all given attribute values will be added if they are missing. If V(absent), all given - attribute values will be removed if present. If V(exact), the set of attribute values will be forced to exactly those provided and no - others. If O(state=exact) and the attribute value is empty, all values for this attribute will be removed. + - The state of the attribute values. If V(present), all given attribute values are added if they are missing. If V(absent), + all given attribute values are removed if present. If V(exact), the set of attribute values is forced to exactly those + provided and no others. If O(state=exact) and the attribute value is empty, all values for this attribute are removed. attributes: required: true type: dict description: - The attribute(s) and value(s) to add or remove. - Each attribute value can be a string for single-valued attributes or a list of strings for multi-valued attributes. - - If you specify values for this option in YAML, please note that you can improve readability for long string values by using YAML block - modifiers as seen in the examples for this module. - - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10) (float), make - sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP. + - If you specify values for this option in YAML, please note that you can improve readability for long string values + by using YAML block modifiers as seen in the examples for this module. + - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10) + (float), make sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP. ordered: required: false type: bool default: false description: - - If V(true), prepend list values with X-ORDERED index numbers in all attributes specified in the current task. This is useful mostly with - C(olcAccess) attribute to easily manage LDAP Access Control Lists. + - If V(true), prepend list values with X-ORDERED index numbers in all attributes specified in the current task. This + is useful mostly with C(olcAccess) attribute to easily manage LDAP Access Control Lists. extends_documentation_fragment: - community.general.ldap.documentation - community.general.attributes @@ -295,7 +296,7 @@ def main(): module = AnsibleModule( argument_spec=gen_specs( attributes=dict(type='dict', required=True), - ordered=dict(type='bool', default=False, required=False), + ordered=dict(type='bool', default=False), state=dict(type='str', default='present', choices=['absent', 'exact', 'present']), ), supports_check_mode=True, diff --git a/plugins/modules/ldap_entry.py b/plugins/modules/ldap_entry.py index 17d2267243..230f6337ab 100644 --- a/plugins/modules/ldap_entry.py +++ b/plugins/modules/ldap_entry.py @@ -15,8 +15,8 @@ DOCUMENTATION = r""" module: ldap_entry short_description: Add or remove LDAP entries description: - - Add or remove LDAP entries. This module only asserts the existence or non-existence of an LDAP entry, not its attributes. To assert the attribute - values of an entry, see M(community.general.ldap_attrs). + - Add or remove LDAP entries. This module only asserts the existence or non-existence of an LDAP entry, not its attributes. + To assert the attribute values of an entry, see M(community.general.ldap_attrs). author: - Jiri Tyr (@jtyr) requirements: @@ -29,18 +29,19 @@ attributes: options: attributes: description: - - If O(state=present), attributes necessary to create an entry. Existing entries are never modified. To assert specific attribute values - on an existing entry, use M(community.general.ldap_attrs) module instead. + - If O(state=present), attributes necessary to create an entry. Existing entries are never modified. To assert specific + attribute values on an existing entry, use M(community.general.ldap_attrs) module instead. - Each attribute value can be a string for single-valued attributes or a list of strings for multi-valued attributes. - - If you specify values for this option in YAML, please note that you can improve readability for long string values by using YAML block - modifiers as seen in the examples for this module. - - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10) (float), make - sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP. + - If you specify values for this option in YAML, please note that you can improve readability for long string values + by using YAML block modifiers as seen in the examples for this module. + - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10) + (float), make sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP. type: dict default: {} objectClass: description: - - If O(state=present), value or list of values to use when creating the entry. It can either be a string or an actual list of strings. + - If O(state=present), value or list of values to use when creating the entry. It can either be a string or an actual + list of strings. type: list elements: str state: diff --git a/plugins/modules/ldap_inc.py b/plugins/modules/ldap_inc.py index 2715ac7ce6..224027f666 100644 --- a/plugins/modules/ldap_inc.py +++ b/plugins/modules/ldap_inc.py @@ -15,25 +15,21 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ldap_inc short_description: Use the Modify-Increment LDAP V3 feature to increment an attribute value version_added: 10.2.0 description: - Atomically increments the value of an attribute and return its new value. notes: - - When implemented by the directory server, the module uses the ModifyIncrement extension - defined in L(RFC4525, https://www.rfc-editor.org/rfc/rfc4525.html) and the control PostRead. This extension and the control are - implemented in OpenLdap but not all directory servers implement them. In this case, the - module automatically uses a more classic method based on two phases, first the current - value is read then the modify operation remove the old value and add the new one in a - single request. If the value has changed by a concurrent call then the remove action will - fail. Then the sequence is retried 3 times before raising an error to the playbook. In an - heavy modification environment, the module does not guarante to be systematically successful. - - This only deals with integer attribute of an existing entry. To modify attributes - of an entry, see M(community.general.ldap_attrs) or to add or remove whole entries, - see M(community.general.ldap_entry). + - When implemented by the directory server, the module uses the ModifyIncrement extension defined in L(RFC4525, https://www.rfc-editor.org/rfc/rfc4525.html) + and the control PostRead. This extension and the control are implemented in OpenLdap but not all directory servers implement + them. In this case, the module automatically uses a more classic method based on two phases, first the current value is + read then the modify operation remove the old value and add the new one in a single request. If the value has changed + by a concurrent call then the remove action fails. Then the sequence is retried 3 times before raising an error to the + playbook. In an heavy modification environment, the module does not guarante to be systematically successful. + - This only deals with integer attribute of an existing entry. To modify attributes of an entry, see M(community.general.ldap_attrs) + or to add or remove whole entries, see M(community.general.ldap_entry). author: - Philippe Duveau (@pduveau) requirements: @@ -71,11 +67,10 @@ options: extends_documentation_fragment: - community.general.ldap.documentation - community.general.attributes - -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Increments uidNumber 1 Number for example.com community.general.ldap_inc: dn: "cn=uidNext,ou=unix-management,dc=example,dc=com" @@ -90,10 +85,10 @@ EXAMPLES = r''' attributes: - uidNumber: "{{ ldap_uidNumber_sequence.value }}" when: ldap_uidNumber_sequence.incremented -''' +""" -RETURN = r''' +RETURN = r""" incremented: description: - It is set to V(true) if the attribute value has changed. @@ -121,7 +116,7 @@ rfc4525: returned: success type: bool sample: true -''' +""" from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native, to_bytes @@ -158,7 +153,7 @@ def main(): module = AnsibleModule( argument_spec=gen_specs( attribute=dict(type='str', required=True), - increment=dict(type='int', default=1, required=False), + increment=dict(type='int', default=1), method=dict(type='str', default='auto', choices=['auto', 'rfc4525', 'legacy']), ), supports_check_mode=True, diff --git a/plugins/modules/ldap_passwd.py b/plugins/modules/ldap_passwd.py index 791a11dca9..b29254f8c6 100644 --- a/plugins/modules/ldap_passwd.py +++ b/plugins/modules/ldap_passwd.py @@ -13,8 +13,8 @@ DOCUMENTATION = r""" module: ldap_passwd short_description: Set passwords in LDAP description: - - Set a password for an LDAP entry. This module only asserts that a given password is valid for a given entry. To assert the existence of an - entry, see M(community.general.ldap_entry). + - Set a password for an LDAP entry. This module only asserts that a given password is valid for a given entry. To assert + the existence of an entry, see M(community.general.ldap_entry). author: - Keller Fuchs (@KellerFuchs) requirements: diff --git a/plugins/modules/ldap_search.py b/plugins/modules/ldap_search.py index b06b9e8094..47c4d8d64d 100644 --- a/plugins/modules/ldap_search.py +++ b/plugins/modules/ldap_search.py @@ -52,21 +52,22 @@ options: default: false type: bool description: - - Set to V(true) to return the full attribute schema of entries, not their attribute values. Overrides O(attrs) when provided. + - Set to V(true) to return the full attribute schema of entries, not their attribute values. Overrides O(attrs) when + provided. page_size: default: 0 type: int description: - - The page size when performing a simple paged result search (RFC 2696). This setting can be tuned to reduce issues with timeouts and server - limits. + - The page size when performing a simple paged result search (RFC 2696). This setting can be tuned to reduce issues + with timeouts and server limits. - Setting the page size to V(0) (default) disables paged searching. version_added: 7.1.0 base64_attributes: description: - - If provided, all attribute values returned that are listed in this option will be Base64 encoded. - - If the special value V(*) appears in this list, all attributes will be Base64 encoded. - - All other attribute values will be converted to UTF-8 strings. If they contain binary data, please note that invalid UTF-8 bytes will - be omitted. + - If provided, all attribute values returned that are listed in this option are Base64 encoded. + - If the special value V(*) appears in this list, all attributes are Base64 encoded. + - All other attribute values are converted to UTF-8 strings. If they contain binary data, please note that invalid UTF-8 + bytes are omitted. type: list elements: str version_added: 7.0.0 @@ -90,17 +91,16 @@ EXAMPLES = r""" register: ldap_group_gids """ -RESULTS = """ +# @FIXME RV 'results' is meant to be used when 'loop:' was used with the module. +RESULTS = r""" results: description: - - For every entry found, one dictionary will be returned. + - For every entry found, one dictionary is returned. - Every dictionary contains a key C(dn) with the entry's DN as a value. - - Every attribute of the entry found is added to the dictionary. If the key - has precisely one value, that value is taken directly, otherwise the key's - value is a list. - - Note that all values (for single-element lists) and list elements (for multi-valued - lists) will be UTF-8 strings. Some might contain Base64-encoded binary data; which - ones is determined by the O(base64_attributes) option. + - Every attribute of the entry found is added to the dictionary. If the key has precisely one value, that value is taken + directly, otherwise the key's value is a list. + - Note that all values (for single-element lists) and list elements (for multi-valued lists) are UTF-8 strings. Some might + contain Base64-encoded binary data; which ones is determined by the O(base64_attributes) option. type: list elements: dict """ diff --git a/plugins/modules/librato_annotation.py b/plugins/modules/librato_annotation.py index 0f7e9d794c..1087cb426c 100644 --- a/plugins/modules/librato_annotation.py +++ b/plugins/modules/librato_annotation.py @@ -13,7 +13,8 @@ DOCUMENTATION = r""" module: librato_annotation short_description: Create an annotation in Librato description: - - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically. + - Create an annotation event on the given annotation stream O(name). If the annotation stream does not exist, it creates + one automatically. author: "Seth Edwards (@Sedward)" requirements: [] extends_documentation_fragment: @@ -38,7 +39,7 @@ options: type: str description: - The annotation stream name. - - If the annotation stream does not exist, it will be created automatically. + - If the annotation stream does not exist, it creates one automatically. required: false title: type: str @@ -49,13 +50,15 @@ options: source: type: str description: - - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population. + - A string which describes the originating source of an annotation when that annotation is tracked across multiple members + of a population. required: false description: type: str description: - The description contains extra metadata about a particular annotation. - - The description should contain specifics on the individual annotation for example V(Deployed 9b562b2 shipped new feature foo!). + - The description should contain specifics on the individual annotation for example V(Deployed 9b562b2 shipped new feature + foo!). required: false start_time: type: int @@ -157,12 +160,12 @@ def main(): argument_spec=dict( user=dict(required=True), api_key=dict(required=True, no_log=True), - name=dict(required=False), + name=dict(), title=dict(required=True), - source=dict(required=False), - description=dict(required=False), - start_time=dict(required=False, default=None, type='int'), - end_time=dict(required=False, default=None, type='int'), + source=dict(), + description=dict(), + start_time=dict(type='int'), + end_time=dict(type='int'), links=dict(type='list', elements='dict') ) ) diff --git a/plugins/modules/linode.py b/plugins/modules/linode.py index 9d907c898b..d2c5714d47 100644 --- a/plugins/modules/linode.py +++ b/plugins/modules/linode.py @@ -46,8 +46,9 @@ options: default: '' linode_id: description: - - Unique ID of a linode server. This value is read-only in the sense that if you specify it on creation of a Linode it will not be used. - The Linode API generates these IDs and we can those generated value here to reference a Linode more specifically. This is useful for idempotence. + - Unique ID of a Linode server. This value is read-only in the sense that if you specify it on creation of a Linode + it is not used. The Linode API generates these IDs and we can those generated value here to reference a Linode more + specifically. This is useful for idempotency. aliases: [lid] type: int additional_disks: @@ -102,7 +103,7 @@ options: type: int backupwindow: description: - - The time window in which backups will be taken. + - The time window in which backups are taken. type: int plan: description: diff --git a/plugins/modules/linode_v4.py b/plugins/modules/linode_v4.py index cac890f79b..0095cb9002 100644 --- a/plugins/modules/linode_v4.py +++ b/plugins/modules/linode_v4.py @@ -17,8 +17,8 @@ requirements: author: - Luke Murphy (@decentral1se) notes: - - No Linode resizing is currently implemented. This module will, in time, replace the current Linode module which uses deprecated API bindings - on the Linode side. + - No Linode resizing is currently implemented. This module aims to replace the current Linode module which uses deprecated + API bindings on the Linode side. extends_documentation_fragment: - community.general.attributes attributes: @@ -43,17 +43,17 @@ options: type: str label: description: - - The instance label. This label is used as the main determiner for idempotence for the module and is therefore mandatory. + - The instance label. This label is used as the main determiner for idempotency for the module and is therefore mandatory. type: str required: true group: description: - - The group that the instance should be marked under. Please note, that group labelling is deprecated but still supported. The encouraged - method for marking instances is to use tags. + - The group that the instance should be marked under. Please note, that group labelling is deprecated but still supported. + The encouraged method for marking instances is to use tags. type: str private_ip: description: - - If V(true), the created Linode will have private networking enabled and assigned a private IPv4 address. + - If V(true), the created Linode instance has private networking enabled and assigned a private IPv4 address. type: bool default: false version_added: 3.0.0 @@ -65,8 +65,8 @@ options: elements: str root_pass: description: - - The password for the root user. If not specified, one will be generated. This generated password will be available in the task success - JSON. + - The password for the root user. If not specified, it generates a new one. This generated password is available in + the task success JSON. type: str authorized_keys: description: @@ -95,8 +95,8 @@ options: version_added: 1.3.0 stackscript_data: description: - - An object containing arguments to any User Defined Fields present in the StackScript used when creating the instance. Only valid when - a O(stackscript_id) is provided. + - An object containing arguments to any User Defined Fields present in the StackScript used when creating the instance. + Only valid when a O(stackscript_id) is provided. - See U(https://www.linode.com/docs/api/stackscripts/). type: dict version_added: 1.3.0 @@ -128,45 +128,46 @@ instance: description: The instance description in JSON serialized form. returned: Always. type: dict - sample: { - "root_pass": "foobar", # if auto-generated - "alerts": { - "cpu": 90, - "io": 10000, - "network_in": 10, - "network_out": 10, - "transfer_quota": 80 - }, - "backups": { - "enabled": false, - "schedule": { - "day": null, - "window": null - } - }, - "created": "2018-09-26T08:12:33", - "group": "Foobar Group", - "hypervisor": "kvm", - "id": 10480444, - "image": "linode/centos7", - "ipv4": [ - "130.132.285.233" - ], - "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64", - "label": "lin-foo", - "region": "eu-west", - "specs": { - "disk": 25600, - "memory": 1024, - "transfer": 1000, - "vcpus": 1 - }, - "status": "running", - "tags": [], - "type": "g6-nanode-1", - "updated": "2018-09-26T10:10:14", - "watchdog_enabled": true - } + sample: + { + "root_pass": "foobar", # if auto-generated + "alerts": { + "cpu": 90, + "io": 10000, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + } + }, + "created": "2018-09-26T08:12:33", + "group": "Foobar Group", + "hypervisor": "kvm", + "id": 10480444, + "image": "linode/centos7", + "ipv4": [ + "130.132.285.233" + ], + "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64", + "label": "lin-foo", + "region": "eu-west", + "specs": { + "disk": 25600, + "memory": 1024, + "transfer": 1000, + "vcpus": 1 + }, + "status": "running", + "tags": [], + "type": "g6-nanode-1", + "updated": "2018-09-26T10:10:14", + "watchdog_enabled": true + } """ import traceback diff --git a/plugins/modules/listen_ports_facts.py b/plugins/modules/listen_ports_facts.py index 9f9eb66481..a33c78be3c 100644 --- a/plugins/modules/listen_ports_facts.py +++ b/plugins/modules/listen_ports_facts.py @@ -20,7 +20,7 @@ requirements: short_description: Gather facts on processes listening on TCP and UDP ports notes: - C(ss) returns all processes for each listen address and port. - - This plugin will return each of them, so multiple entries for the same listen address and port are likely in results. + - This plugin returns each of them, so multiple entries for the same listen address and port are likely in results. extends_documentation_fragment: - community.general.attributes - community.general.attributes.facts @@ -29,7 +29,7 @@ options: command: description: - Override which command to use for fetching listen ports. - - By default module will use first found supported command on the system (in alphanumerical order). + - By default module uses first found supported command on the system (in alphanumerical order). type: str choices: - netstat @@ -397,7 +397,7 @@ def main(): break if bin_path is None: - raise EnvironmentError(msg='Unable to find any of the supported commands in PATH: {0}'.format(", ".join(sorted(commands_map)))) + raise EnvironmentError('Unable to find any of the supported commands in PATH: {0}'.format(", ".join(sorted(commands_map)))) # which ports are listening for connections? args = commands_map[command]['args'] diff --git a/plugins/modules/lldp.py b/plugins/modules/lldp.py index baefb09d91..018d9fc307 100644 --- a/plugins/modules/lldp.py +++ b/plugins/modules/lldp.py @@ -22,7 +22,12 @@ attributes: support: none diff_mode: support: none -options: {} +options: + multivalues: + description: If lldpctl outputs an attribute multiple time represent all values as a list. + required: false + type: bool + default: false author: "Andy Hill (@andyhky)" notes: - Requires C(lldpd) running and LLDP enabled on switches. @@ -53,26 +58,49 @@ def gather_lldp(module): if output: output_dict = {} current_dict = {} - lldp_entries = output.split("\n") + lldp_entries = output.strip().split("\n") + final = "" for entry in lldp_entries: if entry.startswith('lldp'): path, value = entry.strip().split("=", 1) path = path.split(".") path_components, final = path[:-1], path[-1] + elif final in current_dict and isinstance(current_dict[final], str): + current_dict[final] += '\n' + entry + continue + elif final in current_dict and isinstance(current_dict[final], list): + current_dict[final][-1] += '\n' + entry + continue else: - value = current_dict[final] + '\n' + entry + continue current_dict = output_dict for path_component in path_components: current_dict[path_component] = current_dict.get(path_component, {}) + if not isinstance(current_dict[path_component], dict): + current_dict[path_component] = {'value': current_dict[path_component]} current_dict = current_dict[path_component] - current_dict[final] = value + + if final in current_dict and isinstance(current_dict[final], dict) and module.params['multivalues']: + current_dict = current_dict[final] + final = 'value' + + if final not in current_dict or not module.params['multivalues']: + current_dict[final] = value + elif isinstance(current_dict[final], str): + current_dict[final] = [current_dict[final], value] + elif isinstance(current_dict[final], list): + current_dict[final].append(value) + return output_dict def main(): - module = AnsibleModule({}) + module_args = dict( + multivalues=dict(type='bool', default=False) + ) + module = AnsibleModule(module_args) lldp_output = gather_lldp(module) try: diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py index b7bf35266b..2e1932c204 100644 --- a/plugins/modules/locale_gen.py +++ b/plugins/modules/locale_gen.py @@ -37,14 +37,14 @@ options: choices: [absent, present] default: present notes: - - If C(/etc/locale.gen) exists, the module will assume to be using the B(glibc) mechanism, else if C(/var/lib/locales/supported.d/) - exists it will assume to be using the B(ubuntu_legacy) mechanism, else it will raise an error. - - When using glibc mechanism, it will manage locales by editing C(/etc/locale.gen) and running C(locale-gen). - - When using ubuntu_legacy mechanism, it will manage locales by editing C(/var/lib/locales/supported.d/local) and then running + - If C(/etc/locale.gen) exists, the module assumes to be using the B(glibc) mechanism, else if C(/var/lib/locales/supported.d/) + exists it assumes to be using the B(ubuntu_legacy) mechanism, else it raises an error. + - When using glibc mechanism, it manages locales by editing C(/etc/locale.gen) and running C(locale-gen). + - When using ubuntu_legacy mechanism, it manages locales by editing C(/var/lib/locales/supported.d/local) and then running C(locale-gen). - - Please note that the code path that uses ubuntu_legacy mechanism has not been tested for a while, because Ubuntu is already using - the glibc mechanism. There is no support for that, given our inability to test it. Therefore, that mechanism is B(deprecated) - and will be removed in community.general 13.0.0. + - Please note that the code path that uses ubuntu_legacy mechanism has not been tested for a while, because Ubuntu is already + using the glibc mechanism. There is no support for that, given our inability to test it. Therefore, that mechanism is + B(deprecated) and will be removed in community.general 13.0.0. - Currently the module is B(only supported for Debian and Ubuntu) systems. - This module requires the package C(locales) installed in Debian and Ubuntu systems. """ @@ -111,7 +111,6 @@ class LocaleGen(StateModuleHelper): ), supports_check_mode=True, ) - use_old_vardict = False def __init_module__(self): self.MECHANISMS = dict( diff --git a/plugins/modules/logentries.py b/plugins/modules/logentries.py index 420f054fac..69e83f5e49 100644 --- a/plugins/modules/logentries.py +++ b/plugins/modules/logentries.py @@ -139,8 +139,8 @@ def main(): argument_spec=dict( path=dict(required=True), state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), - name=dict(required=False, default=None, type='str'), - logtype=dict(required=False, default=None, type='str', aliases=['type']) + name=dict(type='str'), + logtype=dict(type='str', aliases=['type']) ), supports_check_mode=True ) diff --git a/plugins/modules/logentries_msg.py b/plugins/modules/logentries_msg.py index dd3b88d624..8b2a7c5155 100644 --- a/plugins/modules/logentries_msg.py +++ b/plugins/modules/logentries_msg.py @@ -45,7 +45,7 @@ options: author: "Jimmy Tang (@jcftang) " """ -RETURN = """# """ +RETURN = """#""" EXAMPLES = r""" - name: Send a message to logentries diff --git a/plugins/modules/logstash_plugin.py b/plugins/modules/logstash_plugin.py index ba7bdc2cc5..afacf7767f 100644 --- a/plugins/modules/logstash_plugin.py +++ b/plugins/modules/logstash_plugin.py @@ -49,7 +49,7 @@ options: version: type: str description: - - Specify plugin Version of the plugin to install. If plugin exists with previous version, it will NOT be updated. + - Specify version of the plugin to install. If the plugin exists with a previous version, it is B(not) updated. """ EXAMPLES = r""" diff --git a/plugins/modules/lvg.py b/plugins/modules/lvg.py index 01eca7b49c..42d4c9182e 100644 --- a/plugins/modules/lvg.py +++ b/plugins/modules/lvg.py @@ -33,13 +33,14 @@ options: description: - List of comma-separated devices to use as physical devices in this volume group. - Required when creating or resizing volume group. - - The module will take care of running pvcreate if needed. + - The module runs C(pvcreate) if needed. + - O(remove_extra_pvs) controls whether or not unspecified physical devices are removed from the volume group. type: list elements: str pesize: description: - - The size of the physical extent. O(pesize) must be a power of 2 of at least 1 sector (where the sector size is the largest sector size - of the PVs currently used in the VG), or at least 128KiB. + - The size of the physical extent. O(pesize) must be a power of 2 of at least 1 sector (where the sector size is the + largest sector size of the PVs currently used in the VG), or at least 128KiB. - O(pesize) can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte. type: str default: "4" @@ -63,8 +64,9 @@ options: description: - Control if the volume group exists and its state. - The states V(active) and V(inactive) implies V(present) state. Added in 7.1.0. - - If V(active) or V(inactive), the module manages the VG's logical volumes current state. The module also handles the VG's autoactivation - state if supported unless when creating a volume group and the autoactivation option specified in O(vg_options). + - If V(active) or V(inactive), the module manages the VG's logical volumes current state. The module also handles the + VG's autoactivation state if supported unless when creating a volume group and the autoactivation option specified + in O(vg_options). type: str choices: [absent, present, active, inactive] default: present @@ -87,6 +89,12 @@ options: type: bool default: false version_added: 7.1.0 + remove_extra_pvs: + description: + - Remove physical volumes from the volume group which are not in O(pvs). + type: bool + default: true + version_added: 10.4.0 seealso: - module: community.general.filesystem - module: community.general.lvol @@ -115,7 +123,9 @@ EXAMPLES = r""" - name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. community.general.lvg: vg: vg.services - pvs: /dev/sdb1,/dev/sdc5 + pvs: + - /dev/sdb1 + - /dev/sdc5 - name: Remove a volume group with name vg.services community.general.lvg: @@ -138,6 +148,13 @@ EXAMPLES = r""" state: active vg: vg.services +- name: Add new PVs to volume group without removing existing ones + community.general.lvg: + vg: vg.services + pvs: /dev/sdb1,/dev/sdc1 + remove_extra_pvs: false + state: present + - name: Reset a volume group UUID community.general.lvg: state: inactive @@ -148,7 +165,9 @@ EXAMPLES = r""" community.general.lvg: state: inactive vg: vg.services - pvs: /dev/sdb1,/dev/sdc5 + pvs: + - /dev/sdb1 + - /dev/sdc5 reset_vg_uuid: true reset_pv_uuid: true """ @@ -382,6 +401,7 @@ def main(): force=dict(type='bool', default=False), reset_vg_uuid=dict(type='bool', default=False), reset_pv_uuid=dict(type='bool', default=False), + remove_extra_pvs=dict(type="bool", default=True), ), required_if=[ ['reset_pv_uuid', True, ['pvs']], @@ -398,6 +418,7 @@ def main(): vgoptions = module.params['vg_options'].split() reset_vg_uuid = module.boolean(module.params['reset_vg_uuid']) reset_pv_uuid = module.boolean(module.params['reset_pv_uuid']) + remove_extra_pvs = module.boolean(module.params["remove_extra_pvs"]) this_vg = find_vg(module=module, vg=vg) present_state = state in ['present', 'active', 'inactive'] @@ -493,6 +514,9 @@ def main(): devs_to_remove = list(set(current_devs) - set(dev_list)) devs_to_add = list(set(dev_list) - set(current_devs)) + if not remove_extra_pvs: + devs_to_remove = [] + if current_devs: if present_state: for device in current_devs: diff --git a/plugins/modules/lvm_pv.py b/plugins/modules/lvm_pv.py new file mode 100644 index 0000000000..15740db8c1 --- /dev/null +++ b/plugins/modules/lvm_pv.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2025, Klention Mali +# Based on lvol module by Jeroen Hoekx +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r""" +module: lvm_pv +short_description: Manage LVM Physical Volumes +version_added: "11.0.0" +description: + - Creates, resizes or removes LVM Physical Volumes. +author: + - Klention Mali (@klention) +options: + device: + description: + - Path to the block device to manage. + type: path + required: true + state: + description: + - Control if the physical volume exists. + type: str + choices: [present, absent] + default: present + force: + description: + - Force the operation. + - When O(state=present) (creating a PV), this uses C(pvcreate -f) to force creation. + - When O(state=absent) (removing a PV), this uses C(pvremove -ff) to force removal even if part of a volume group. + type: bool + default: false + resize: + description: + - Resize PV to device size when O(state=present). + type: bool + default: false +notes: + - Requires LVM2 utilities installed on the target system. + - Device path must exist when creating a PV. +""" + +EXAMPLES = r""" +- name: Creating physical volume on /dev/sdb + community.general.lvm_pv: + device: /dev/sdb + +- name: Creating and resizing (if needed) physical volume + community.general.lvm_pv: + device: /dev/sdb + resize: true + +- name: Removing physical volume that is not part of any volume group + community.general.lvm_pv: + device: /dev/sdb + state: absent + +- name: Force removing physical volume that is already part of a volume group + community.general.lvm_pv: + device: /dev/sdb + force: true + state: absent +""" + +RETURN = r""" +""" + + +import os +from ansible.module_utils.basic import AnsibleModule + + +def get_pv_status(module, device): + """Check if the device is already a PV.""" + cmd = ['pvs', '--noheadings', '--readonly', device] + return module.run_command(cmd)[0] == 0 + + +def get_pv_size(module, device): + """Get current PV size in bytes.""" + cmd = ['pvs', '--noheadings', '--nosuffix', '--units', 'b', '-o', 'pv_size', device] + rc, out, err = module.run_command(cmd, check_rc=True) + return int(out.strip()) + + +def rescan_device(module, device): + """Perform storage rescan for the device.""" + # Extract the base device name (e.g., /dev/sdb -> sdb) + base_device = os.path.basename(device) + rescan_path = "/sys/block/{0}/device/rescan".format(base_device) + + if os.path.exists(rescan_path): + try: + with open(rescan_path, 'w') as f: + f.write('1') + return True + except IOError as e: + module.warn("Failed to rescan device {0}: {1}".format(device, str(e))) + return False + else: + module.warn("Rescan path not found for device {0}".format(device)) + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + device=dict(type='path', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + force=dict(type='bool', default=False), + resize=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + device = module.params['device'] + state = module.params['state'] + force = module.params['force'] + resize = module.params['resize'] + changed = False + actions = [] + + # Validate device existence for present state + if state == 'present' and not os.path.exists(device): + module.fail_json(msg="Device %s not found" % device) + + is_pv = get_pv_status(module, device) + + if state == 'present': + # Create PV if needed + if not is_pv: + if module.check_mode: + changed = True + actions.append('would be created') + else: + cmd = ['pvcreate'] + if force: + cmd.append('-f') + cmd.append(device) + rc, out, err = module.run_command(cmd, check_rc=True) + changed = True + actions.append('created') + is_pv = True + + # Handle resizing + elif resize and is_pv: + if module.check_mode: + # In check mode, assume resize would change + changed = True + actions.append('would be resized') + else: + # Perform device rescan if each time + if rescan_device(module, device): + actions.append('rescanned') + original_size = get_pv_size(module, device) + rc, out, err = module.run_command(['pvresize', device], check_rc=True) + new_size = get_pv_size(module, device) + if new_size != original_size: + changed = True + actions.append('resized') + + elif state == 'absent': + if is_pv: + if module.check_mode: + changed = True + actions.append('would be removed') + else: + cmd = ['pvremove', '-y'] + if force: + cmd.append('-ff') + changed = True + cmd.append(device) + rc, out, err = module.run_command(cmd, check_rc=True) + actions.append('removed') + + # Generate final message + if actions: + msg = "PV %s: %s" % (device, ', '.join(actions)) + else: + msg = "No changes needed for PV %s" % device + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lvol.py b/plugins/modules/lvol.py index 34b24f7570..6166e437f2 100644 --- a/plugins/modules/lvol.py +++ b/plugins/modules/lvol.py @@ -38,16 +38,19 @@ options: size: type: str description: - - The size of the logical volume, according to lvcreate(8) C(--size), by default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] - units; or according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE|ORIGIN]; Float values must begin with a digit. - - When resizing, apart from specifying an absolute size you may, according to lvextend(8)|lvreduce(8) C(--size), specify the amount to extend - the logical volume with the prefix V(+) or the amount to reduce the logical volume by with prefix V(-). + - The size of the logical volume, according to lvcreate(8) C(--size), by default in megabytes or optionally with one + of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) C(--extents) as a percentage of [VG|PVS|FREE|ORIGIN]; Float + values must begin with a digit. + - When resizing, apart from specifying an absolute size you may, according to lvextend(8)|lvreduce(8) C(--size), specify + the amount to extend the logical volume with the prefix V(+) or the amount to reduce the logical volume by with prefix + V(-). - Resizing using V(+) or V(-) was not supported prior to community.general 3.0.0. - Please note that when using V(+), V(-), or percentage of FREE, the module is B(not idempotent). state: type: str description: - - Control if the logical volume exists. If V(present) and the volume does not already exist then the O(size) option is required. + - Control if the logical volume exists. If V(present) and the volume does not already exist then the O(size) option + is required. choices: [absent, present] default: present active: @@ -57,7 +60,8 @@ options: default: true force: description: - - Shrink or remove operations of volumes requires this switch. Ensures that that filesystems get never corrupted/destroyed by mistake. + - Shrink or remove operations of volumes requires this switch. Ensures that filesystems never get corrupted/destroyed + by mistake. type: bool default: false opts: @@ -67,7 +71,8 @@ options: snapshot: type: str description: - - The name of a snapshot volume to be configured. When creating a snapshot volume, the O(lv) parameter specifies the origin volume. + - The name of a snapshot volume to be configured. When creating a snapshot volume, the O(lv) parameter specifies the + origin volume. pvs: type: list elements: str @@ -85,7 +90,8 @@ options: resizefs: description: - Resize the underlying filesystem together with the logical volume. - - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems. Attempts to resize other filesystem types will fail. + - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems. Attempts to resize other filesystem types + result in failure. type: bool default: false notes: diff --git a/plugins/modules/lxc_container.py b/plugins/modules/lxc_container.py index 3285c96c9d..cbd643efdb 100644 --- a/plugins/modules/lxc_container.py +++ b/plugins/modules/lxc_container.py @@ -129,13 +129,13 @@ options: archive: description: - Create an archive of a container. - - This will create a tarball of the running container. + - This creates a tarball of the running container. type: bool default: false archive_path: description: - Path the save the archived container. - - If the path does not exist the archive method will attempt to create it. + - If the path does not exist the archive method attempts to create it. type: path archive_compression: choices: @@ -157,8 +157,8 @@ options: description: - Define the state of a container. - If you clone a container using O(clone_name) the newly cloned container created in a stopped state. - - The running container will be stopped while the clone operation is happening and upon completion of the clone the original container state - will be restored. + - The running container is stopped while the clone operation is happening and upon completion of the clone the original + container state is restored. type: str default: started container_config: @@ -171,17 +171,18 @@ requirements: - 'python3 >= 3.5 # OS Package' - 'python3-lxc # OS Package' notes: - - Containers must have a unique name. If you attempt to create a container with a name that already exists in the users namespace the module - will simply return as "unchanged". - - The O(container_command) can be used with any state except V(absent). If used with state V(stopped) the container will be V(started), the - command executed, and then the container V(stopped) again. Likewise if O(state=stopped) and the container does not exist it will be first - created, V(started), the command executed, and then V(stopped). If you use a C(|) in the variable you can use common script formatting within - the variable itself. The O(container_command) option will always execute as C(bash). When using O(container_command), a log file is created in - the C(/tmp/) directory which contains both C(stdout) and C(stderr) of any command executed. - - If O(archive=true) the system will attempt to create a compressed tarball of the running container. The O(archive) option supports LVM backed - containers and will create a snapshot of the running container when creating the archive. - - If your distro does not have a package for C(python3-lxc), which is a requirement for this module, it can be installed from source at - U(https://github.com/lxc/python3-lxc) or installed using C(pip install lxc). + - Containers must have a unique name. If you attempt to create a container with a name that already exists in the users + namespace the module simply returns as "unchanged". + - The O(container_command) can be used with any state except V(absent). If used with state V(stopped) the container is V(started), + the command executed, and then the container V(stopped) again. Likewise if O(state=stopped) and the container does not + exist it is first created, V(started), the command executed, and then V(stopped). If you use a C(|) in the variable you + can use common script formatting within the variable itself. The O(container_command) option always execute as C(bash). + When using O(container_command), a log file is created in the C(/tmp/) directory which contains both C(stdout) and C(stderr) + of any command executed. + - If O(archive=true) the system attempts to create a compressed tarball of the running container. The O(archive) option + supports LVM backed containers and creates a snapshot of the running container when creating the archive. + - If your distro does not have a package for C(python3-lxc), which is a requirement for this module, it can be installed + from source at U(https://github.com/lxc/python3-lxc) or installed using C(pip install lxc). """ EXAMPLES = r""" diff --git a/plugins/modules/lxca_cmms.py b/plugins/modules/lxca_cmms.py index 8ece67470b..87b0e2e125 100644 --- a/plugins/modules/lxca_cmms.py +++ b/plugins/modules/lxca_cmms.py @@ -144,8 +144,8 @@ FUNC_DICT = { INPUT_ARG_SPEC = dict( command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid', 'cmms_by_chassis_uuid']), - uuid=dict(default=None), - chassis=dict(default=None) + uuid=dict(), + chassis=dict() ) diff --git a/plugins/modules/lxca_nodes.py b/plugins/modules/lxca_nodes.py index f133671114..91d3337b27 100644 --- a/plugins/modules/lxca_nodes.py +++ b/plugins/modules/lxca_nodes.py @@ -175,7 +175,7 @@ INPUT_ARG_SPEC = dict( 'nodes_by_chassis_uuid', 'nodes_status_managed', 'nodes_status_unmanaged']), - uuid=dict(default=None), chassis=dict(default=None) + uuid=dict(), chassis=dict() ) diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py index 3f0ab2607e..4abec5acaa 100644 --- a/plugins/modules/lxd_container.py +++ b/plugins/modules/lxd_container.py @@ -45,8 +45,8 @@ options: required: false config: description: - - 'The config for the instance (for example V({"limits.cpu": "2"})). - - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).' + - 'The config for the instance (for example V({"limits.cpu": "2"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). - If the instance already exists and its "config" values in metadata obtained from the LXD API U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get) are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_put). @@ -69,8 +69,8 @@ options: elements: str devices: description: - - 'The devices for the instance (for example V({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})). - - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).' + - 'The devices for the instance (for example V({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). type: dict required: false ephemeral: @@ -101,16 +101,17 @@ options: type: str target: description: - - For cluster deployments. Will attempt to create an instance on a target node. If the instance exists elsewhere in a cluster, then it will - not be replaced or moved. The name should respond to same name of the node you see in C(lxc cluster list). + - For cluster deployments. It attempts to create an instance on a target node. If the instance exists elsewhere in a + cluster, then it is not replaced nor moved. The name should respond to same name of the node you see in C(lxc cluster + list). type: str required: false version_added: 1.0.0 timeout: description: - A timeout for changing the state of the instance. - - This is also used as a timeout for waiting until IPv4 addresses are set to the all network interfaces in the instance after starting or - restarting. + - This is also used as a timeout for waiting until IPv4 addresses are set to the all network interfaces in the instance + after starting or restarting. required: false default: 30 type: int @@ -126,14 +127,14 @@ options: version_added: 4.1.0 wait_for_ipv4_addresses: description: - - If this is V(true), the C(lxd_container) waits until IPv4 addresses are set to the all network interfaces in the instance after starting - or restarting. + - If this is V(true), the C(lxd_container) waits until IPv4 addresses are set to the all network interfaces in the instance + after starting or restarting. required: false default: false type: bool wait_for_container: description: - - If set to V(true), the tasks will wait till the task reports a success status when performing container operations. + - If set to V(true), the tasks wait until the task reports a success status when performing container operations. default: false type: bool version_added: 4.4.0 @@ -172,20 +173,21 @@ options: trust_password: description: - The client trusted password. - - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config set core.trust_password - ). See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config + set core.trust_password ). See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' - If trust_password is set, this module send a request for authentication before sending any requests. required: false type: str notes: - - Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance with a name that - already existed in the users namespace the module will simply return as "unchanged". - - There are two ways to run commands inside a container or virtual machine, using the command module or using the ansible lxd connection plugin - bundled in Ansible >= 2.1, the later requires python to be installed in the instance which can be done with the command module. - - You can copy a file from the host to the instance with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the - P(community.general.lxd#connection) connection plugin. See the example below. - - You can copy a file in the created instance to the localhost with C(command=lxc file pull instance_name/dir/filename filename). See the first - example below. + - Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance + with a name that already existed in the users namespace, the module simply returns as "unchanged". + - There are two ways to run commands inside a container or virtual machine, using the command module or using the ansible + lxd connection plugin bundled in Ansible >= 2.1, the later requires python to be installed in the instance which can be + done with the command module. + - You can copy a file from the host to the instance with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) + module and the P(community.general.lxd#connection) connection plugin. See the example below. + - You can copy a file in the created instance to the localhost with C(command=lxc file pull instance_name/dir/filename filename). + See the first example below. - Linuxcontainers.org has phased out LXC/LXD support with March 2024 (U(https://discuss.linuxcontainers.org/t/important-notice-for-lxd-users-image-server/18479)). Currently only Ubuntu is still providing images. @@ -260,7 +262,7 @@ EXAMPLES = r""" source: type: image mode: pull - # Provides Ubuntu minimal images + # Provides Ubuntu minimal images server: https://cloud-images.ubuntu.com/minimal/releases/ protocol: simplestreams alias: "22.04" @@ -316,8 +318,8 @@ EXAMPLES = r""" community.general.lxd_container: url: https://127.0.0.1:8443 # These client_cert and client_key values are equal to the default values. - #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" - #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + # client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + # client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" trust_password: mypassword name: mycontainer state: restarted @@ -340,7 +342,7 @@ EXAMPLES = r""" # nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster # members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'. # LXD API calls can be made to any LXD member, in this example, we send API requests to -#'node01.example.com', which matches ansible inventory name. +# 'node01.example.com', which matches ansible inventory name. - hosts: node01.example.com tasks: - name: Create LXD container @@ -391,7 +393,12 @@ addresses: description: Mapping from the network device name to a list of IPv4 addresses in the instance. returned: when state is started or restarted type: dict - sample: {"eth0": ["10.155.92.191"]} + sample: + { + "eth0": [ + "10.155.92.191" + ] + } old_state: description: The old state of the instance. returned: when state is started or restarted diff --git a/plugins/modules/lxd_profile.py b/plugins/modules/lxd_profile.py index c46559298b..2525889968 100644 --- a/plugins/modules/lxd_profile.py +++ b/plugins/modules/lxd_profile.py @@ -40,8 +40,8 @@ options: type: str config: description: - - 'The config for the instance (for example V({"limits.memory": "4GB"})). - - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).' + - 'The config for the instance (for example V({"limits.memory": "4GB"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get). - If the profile already exists and its C(config) value in metadata obtained from GET /1.0/profiles/ U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get) are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_put). @@ -50,14 +50,14 @@ options: type: dict devices: description: - - 'The devices for the profile (for example V({"rootfs": {"path": "/dev/kvm", "type": "unix-char"})). - - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).' + - 'The devices for the profile (for example V({"rootfs": {"path": "/dev/kvm", "type": "unix-char"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get). required: false type: dict new_name: description: - A new name of a profile. - - If this parameter is specified a profile will be renamed to this name. + - If this parameter is specified a profile is renamed to this name. - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_post). required: false type: str @@ -106,14 +106,14 @@ options: trust_password: description: - The client trusted password. - - You need to set this password on the LXD server before running this module using the following command. lxc config set core.trust_password - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/). - - If trust_password is set, this module send a request for authentication before sending any requests. + - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config + set core.trust_password ). See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - If O(trust_password) is set, this module send a request for authentication before sending any requests. required: false type: str notes: - - Profiles must have a unique name. If you attempt to create a profile with a name that already existed in the users namespace the module will - simply return as "unchanged". + - Profiles must have a unique name. If you attempt to create a profile with a name that already existed in the users namespace + the module simply returns as "unchanged". """ EXAMPLES = r""" @@ -154,8 +154,8 @@ EXAMPLES = r""" community.general.lxd_profile: url: https://127.0.0.1:8443 # These client_cert and client_key values are equal to the default values. - #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" - #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + # client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + # client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" trust_password: mypassword name: macvlan state: present diff --git a/plugins/modules/lxd_project.py b/plugins/modules/lxd_project.py index ee90b88168..20804f8b38 100644 --- a/plugins/modules/lxd_project.py +++ b/plugins/modules/lxd_project.py @@ -33,8 +33,8 @@ options: type: str config: description: - - 'The config for the project (for example V({"features.profiles": "true"})). - - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get).' + - 'The config for the project (for example V({"features.profiles": "true"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get). - If the project already exists and its "config" value in metadata obtained from C(GET /1.0/projects/) U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get) are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_put). @@ -42,14 +42,14 @@ options: new_name: description: - A new name of a project. - - If this parameter is specified a project will be renamed to this name. + - If this parameter is specified a project is renamed to this name. - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_post). required: false type: str merge_project: description: - - Merge the configuration of the present project with the new desired configuration, instead of replacing it. If configuration is the same - after merged, no change will be made. + - Merge the configuration of the present project with the new desired configuration, instead of replacing it. If configuration + is the same after merged, no change is made. required: false default: false type: bool @@ -91,14 +91,14 @@ options: trust_password: description: - The client trusted password. - - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config set core.trust_password - ) See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config + set core.trust_password ) See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' - If O(trust_password) is set, this module send a request for authentication before sending any requests. required: false type: str notes: - - Projects must have a unique name. If you attempt to create a project with a name that already existed in the users namespace the module will - simply return as "unchanged". + - Projects must have a unique name. If you attempt to create a project with a name that already existed in the users namespace + the module simply returns as "unchanged". """ EXAMPLES = r""" diff --git a/plugins/modules/macports.py b/plugins/modules/macports.py index 3f02eeb411..c328e45904 100644 --- a/plugins/modules/macports.py +++ b/plugins/modules/macports.py @@ -280,7 +280,7 @@ def main(): selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'), state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]), upgrade=dict(default=False, type='bool'), - variant=dict(aliases=["variants"], default=None, type='str') + variant=dict(aliases=["variants"], type='str') ) ) diff --git a/plugins/modules/mail.py b/plugins/modules/mail.py index 0cca8646ca..7c8bdb69b3 100644 --- a/plugins/modules/mail.py +++ b/plugins/modules/mail.py @@ -16,12 +16,12 @@ module: mail short_description: Send an email description: - This module is useful for sending emails from playbooks. - - One may wonder why automate sending emails? In complex environments there are from time to time processes that cannot be automated, either - because you lack the authority to make it so, or because not everyone agrees to a common approach. - - If you cannot automate a specific step, but the step is non-blocking, sending out an email to the responsible party to make them perform their - part of the bargain is an elegant way to put the responsibility in someone else's lap. - - Of course sending out a mail can be equally useful as a way to notify one or more people in a team that a specific action has been (successfully) - taken. + - One may wonder why automate sending emails? In complex environments there are from time to time processes that cannot + be automated, either because you lack the authority to make it so, or because not everyone agrees to a common approach. + - If you cannot automate a specific step, but the step is non-blocking, sending out an email to the responsible party to + make them perform their part of the bargain is an elegant way to put the responsibility in someone else's lap. + - Of course sending out a mail can be equally useful as a way to notify one or more people in a team that a specific action + has been (successfully) taken. extends_documentation_fragment: - community.general.attributes attributes: @@ -90,7 +90,7 @@ options: attach: description: - A list of pathnames of files to attach to the message. - - Attached files will have their content-type set to C(application/octet-stream). + - Attached files have their content-type set to C(application/octet-stream). type: list elements: path default: [] @@ -115,11 +115,12 @@ options: default: plain secure: description: - - If V(always), the connection will only send email if the connection is Encrypted. If the server does not accept the encrypted connection - it will fail. - - If V(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send. - - If V(never), the connection will not attempt to setup a secure SSL/TLS session, before sending. - - If V(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending. If it is unable to do so it will fail. + - If V(always), the connection only sends email if the connection is Encrypted. If the server does not accept the encrypted + connection it fails. + - If V(try), the connection attempts to setup a secure SSL/TLS session, before trying to send. + - If V(never), the connection does not attempt to setup a secure SSL/TLS session, before sending. + - If V(starttls), the connection tries to upgrade to a secure SSL/TLS connection, before sending. If it is unable to + do so it fails. type: str choices: [always, never, starttls, try] default: try @@ -136,7 +137,7 @@ options: message_id_domain: description: - The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID). - - Note that this is only available on Python 3+. On Python 2, this value will be ignored. + - Note that this is only available on Python 3+. On Python 2, this value is ignored. type: str default: ansible version_added: 8.2.0 @@ -241,7 +242,7 @@ def main(): password=dict(type='str', no_log=True), host=dict(type='str', default='localhost'), port=dict(type='int', default=25), - ehlohost=dict(type='str', default=None), + ehlohost=dict(type='str'), sender=dict(type='str', default='root', aliases=['from']), to=dict(type='list', elements='str', default=['root'], aliases=['recipients']), cc=dict(type='list', elements='str', default=[]), diff --git a/plugins/modules/make.py b/plugins/modules/make.py index a574560f7f..57ee525db5 100644 --- a/plugins/modules/make.py +++ b/plugins/modules/make.py @@ -48,7 +48,7 @@ options: params: description: - Any extra parameters to pass to make. - - If the value is empty, only the key will be used. For example, V(FOO:) will produce V(FOO), not V(FOO=). + - If the value is empty, only the key is used. For example, V(FOO:) produces V(FOO), not V(FOO=). type: dict target: description: diff --git a/plugins/modules/manageiq_alerts.py b/plugins/modules/manageiq_alerts.py index 87fafcf10b..d1b3fdba69 100644 --- a/plugins/modules/manageiq_alerts.py +++ b/plugins/modules/manageiq_alerts.py @@ -300,7 +300,7 @@ def main(): expression=dict(type='dict'), options=dict(type='dict'), enabled=dict(type='bool'), - state=dict(required=False, default='present', + state=dict(default='present', choices=['present', 'absent']), ) # add the manageiq connection arguments to the arguments diff --git a/plugins/modules/manageiq_group.py b/plugins/modules/manageiq_group.py index 9858dd5947..68170ea733 100644 --- a/plugins/modules/manageiq_group.py +++ b/plugins/modules/manageiq_group.py @@ -44,7 +44,7 @@ options: role_id: type: int description: - - The the group role id. + - The the group role ID. required: false role: type: str @@ -56,7 +56,7 @@ options: tenant_id: type: int description: - - The tenant for the group identified by the tenant id. + - The tenant for the group identified by the tenant ID. required: false default: tenant: @@ -75,8 +75,8 @@ options: managed_filters_merge_mode: type: str description: - - In merge mode existing categories are kept or updated, new categories are added. - - In replace mode all categories will be replaced with the supplied O(managed_filters). + - In V(merge) mode existing categories are kept or updated, new categories are added. + - In V(replace) mode all categories are replaced with the supplied O(managed_filters). choices: [merge, replace] default: replace belongsto_filters: @@ -172,7 +172,7 @@ group: returned: success type: str id: - description: The group id. + description: The group ID. returned: success type: int group_type: @@ -570,14 +570,14 @@ def main(): argument_spec = dict( description=dict(required=True, type='str'), state=dict(choices=['absent', 'present'], default='present'), - role_id=dict(required=False, type='int'), - role=dict(required=False, type='str'), - tenant_id=dict(required=False, type='int'), - tenant=dict(required=False, type='str'), - managed_filters=dict(required=False, type='dict'), - managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'), - belongsto_filters=dict(required=False, type='list', elements='str'), - belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'), + role_id=dict(type='int'), + role=dict(type='str'), + tenant_id=dict(type='int'), + tenant=dict(type='str'), + managed_filters=dict(type='dict'), + managed_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'), + belongsto_filters=dict(type='list', elements='str'), + belongsto_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) diff --git a/plugins/modules/manageiq_policies.py b/plugins/modules/manageiq_policies.py index e53388f293..247e2dc94c 100644 --- a/plugins/modules/manageiq_policies.py +++ b/plugins/modules/manageiq_policies.py @@ -45,8 +45,21 @@ options: description: - The type of the resource to which the profile should be [un]assigned. required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user resource_name: type: str description: @@ -89,36 +102,37 @@ EXAMPLES = r""" RETURN = r""" manageiq_policies: - description: - - List current policy_profile and policies for a provider in ManageIQ. - returned: always - type: dict - sample: '{ - "changed": false, - "profiles": [ + description: + - List current policy_profile and policies for a provider in ManageIQ. + returned: always + type: dict + sample: + { + "changed": false, + "profiles": [ + { + "policies": [ { - "policies": [ - { - "active": true, - "description": "OpenSCAP", - "name": "openscap policy" - }, - { - "active": true, - "description": "Analyse incoming container images", - "name": "analyse incoming container images" - }, - { - "active": true, - "description": "Schedule compliance after smart state analysis", - "name": "schedule compliance after smart state analysis" - } - ], - "profile_description": "OpenSCAP profile", - "profile_name": "openscap profile" + "active": true, + "description": "OpenSCAP", + "name": "openscap policy" + }, + { + "active": true, + "description": "Analyse incoming container images", + "name": "analyse incoming container images" + }, + { + "active": true, + "description": "Schedule compliance after smart state analysis", + "name": "schedule compliance after smart state analysis" } - ] - }' + ], + "profile_description": "OpenSCAP profile", + "profile_name": "openscap profile" + } + ] + } """ from ansible.module_utils.basic import AnsibleModule @@ -133,7 +147,7 @@ def main(): resource_name=dict(type='str'), resource_type=dict(required=True, type='str', choices=list(manageiq_entities().keys())), - state=dict(required=False, type='str', + state=dict(type='str', choices=['present', 'absent'], default='present'), ) # add the manageiq connection arguments to the arguments diff --git a/plugins/modules/manageiq_policies_info.py b/plugins/modules/manageiq_policies_info.py index f4235203ab..2db694f11c 100644 --- a/plugins/modules/manageiq_policies_info.py +++ b/plugins/modules/manageiq_policies_info.py @@ -29,8 +29,21 @@ options: description: - The type of the resource to obtain the profile for. required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user resource_name: type: str description: @@ -83,8 +96,8 @@ from ansible_collections.community.general.plugins.module_utils.manageiq import def main(): argument_spec = dict( - resource_id=dict(required=False, type='int'), - resource_name=dict(required=False, type='str'), + resource_id=dict(type='int'), + resource_name=dict(type='str'), resource_type=dict(required=True, type='str', choices=list(manageiq_entities().keys())), ) diff --git a/plugins/modules/manageiq_provider.py b/plugins/modules/manageiq_provider.py index 98677c7beb..334555c29a 100644 --- a/plugins/modules/manageiq_provider.py +++ b/plugins/modules/manageiq_provider.py @@ -31,7 +31,7 @@ options: description: - V(absent) - provider should not exist, - V(present) - provider should be present, - - V(refresh) - provider will be refreshed. + - V(refresh) - provider is refreshed. choices: ['absent', 'present', 'refresh'] default: 'present' name: @@ -44,7 +44,7 @@ options: choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE'] zone: type: str - description: The ManageIQ zone name that will manage the provider. + description: The ManageIQ zone name that manages the provider. default: 'default' provider_region: type: str @@ -63,7 +63,7 @@ options: description: Google Compute Engine Project ID. azure_tenant_id: type: str - description: Tenant ID. defaults to None. + description: Tenant ID. Defaults to V(null). aliases: [keystone_v3_domain_id] tenant_mapping_enabled: type: bool @@ -190,25 +190,25 @@ options: description: The provider's API port. userid: type: str - description: Provider's API endpoint authentication userid. defaults to None. + description: Provider's API endpoint authentication userid. Defaults to V(null). password: type: str - description: Provider's API endpoint authentication password. defaults to None. + description: Provider's API endpoint authentication password. Defaults to V(null). auth_key: type: str - description: Provider's API endpoint authentication bearer token. defaults to None. + description: Provider's API endpoint authentication bearer token. Defaults to V(null). validate_certs: type: bool - description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). Defaults to V(true). default: true aliases: [verify_ssl] security_protocol: type: str choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] - description: How SSL certificates should be used for HTTPS requests. defaults to None. + description: How SSL certificates should be used for HTTPS requests. Defaults to V(null). certificate_authority: type: str - description: The CA bundle string with custom certificates. defaults to None. + description: The CA bundle string with custom certificates. Defaults to V(null). path: type: str description: diff --git a/plugins/modules/manageiq_tags.py b/plugins/modules/manageiq_tags.py index bae59353cf..efd135393d 100644 --- a/plugins/modules/manageiq_tags.py +++ b/plugins/modules/manageiq_tags.py @@ -45,16 +45,29 @@ options: description: - The relevant resource type in manageiq. required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user resource_name: type: str description: - - The name of the resource at which tags will be controlled. + - The name of the resource at which tags are be controlled. - Must be specified if O(resource_id) is not set. Both options are mutually exclusive. resource_id: description: - - The ID of the resource at which tags will be controlled. + - The ID of the resource at which tags are controlled. - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. type: int version_added: 2.2.0 @@ -125,7 +138,7 @@ def main(): resource_name=dict(type='str'), resource_type=dict(required=True, type='str', choices=list(manageiq_entities().keys())), - state=dict(required=False, type='str', + state=dict(type='str', choices=['present', 'absent'], default='present'), ) # add the manageiq connection arguments to the arguments diff --git a/plugins/modules/manageiq_tags_info.py b/plugins/modules/manageiq_tags_info.py index 5d32104e7a..2a742f69c5 100644 --- a/plugins/modules/manageiq_tags_info.py +++ b/plugins/modules/manageiq_tags_info.py @@ -27,16 +27,29 @@ options: description: - The relevant resource type in ManageIQ. required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user resource_name: type: str description: - - The name of the resource at which tags will be controlled. + - The name of the resource at which tags are controlled. - Must be specified if O(resource_id) is not set. Both options are mutually exclusive. resource_id: description: - - The ID of the resource at which tags will be controlled. + - The ID of the resource at which tags are controlled. - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. type: int """ diff --git a/plugins/modules/manageiq_tenant.py b/plugins/modules/manageiq_tenant.py index 4700e46356..fda97509ce 100644 --- a/plugins/modules/manageiq_tenant.py +++ b/plugins/modules/manageiq_tenant.py @@ -49,7 +49,7 @@ options: parent_id: type: int description: - - The id of the parent tenant. If not supplied the root tenant is used. + - The ID of the parent tenant. If not supplied the root tenant is used. - The O(parent_id) takes president over O(parent) when supplied. required: false default: @@ -140,7 +140,7 @@ tenant: type: complex contains: id: - description: The tenant id. + description: The tenant ID. returned: success type: int name: @@ -152,7 +152,7 @@ tenant: returned: success type: str parent_id: - description: The id of the parent tenant. + description: The ID of the parent tenant. returned: success type: int quotas: @@ -482,8 +482,8 @@ def main(): argument_spec = dict( name=dict(required=True, type='str'), description=dict(required=True, type='str'), - parent_id=dict(required=False, type='int'), - parent=dict(required=False, type='str'), + parent_id=dict(type='int'), + parent=dict(type='str'), state=dict(choices=['absent', 'present'], default='present'), quotas=dict(type='dict', default={}) ) diff --git a/plugins/modules/manageiq_user.py b/plugins/modules/manageiq_user.py index a4d5c21dfc..475086c823 100644 --- a/plugins/modules/manageiq_user.py +++ b/plugins/modules/manageiq_user.py @@ -59,8 +59,8 @@ options: default: always choices: ['always', 'on_create'] description: - - V(always) will update passwords unconditionally. - - V(on_create) will only set the password for a newly created user. + - V(always) updates passwords unconditionally. + - V(on_create) only sets the password for a newly created user. """ EXAMPLES = r""" diff --git a/plugins/modules/matrix.py b/plugins/modules/matrix.py index 8a9fcf175c..fb6c797bff 100644 --- a/plugins/modules/matrix.py +++ b/plugins/modules/matrix.py @@ -13,7 +13,7 @@ author: "Jan Christian Grünhage (@jcgruenhage)" module: matrix short_description: Send notifications to matrix description: - - This module sends html formatted notifications to matrix rooms. + - This module sends HTML formatted notifications to matrix rooms. extends_documentation_fragment: - community.general.attributes attributes: @@ -49,7 +49,7 @@ options: user_id: type: str description: - - The user id of the user. + - The user ID of the user. password: type: str description: @@ -99,9 +99,9 @@ def run_module(): msg_html=dict(type='str', required=True), room_id=dict(type='str', required=True), hs_url=dict(type='str', required=True), - token=dict(type='str', required=False, no_log=True), - user_id=dict(type='str', required=False), - password=dict(type='str', required=False, no_log=True), + token=dict(type='str', no_log=True), + user_id=dict(type='str'), + password=dict(type='str', no_log=True), ) result = dict( diff --git a/plugins/modules/mattermost.py b/plugins/modules/mattermost.py index 47a16f0fe4..4cb32c1f3b 100644 --- a/plugins/modules/mattermost.py +++ b/plugins/modules/mattermost.py @@ -32,13 +32,13 @@ options: url: type: str description: - - Mattermost url (for example V(http://mattermost.yourcompany.com)). + - Mattermost URL (for example V(http://mattermost.yourcompany.com)). required: true api_key: type: str description: - - Mattermost webhook API key. Log into your Mattermost site, go to Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook. This - will give you full URL. O(api_key) is the last part. U(http://mattermost.example.com/hooks/API_KEY). + - Mattermost webhook API key. Log into your Mattermost site, go to Menu -> Integration -> Incoming Webhook -> Add Incoming + Webhook. This gives you a full URL. O(api_key) is the last part. U(http://mattermost.example.com/hooks/API_KEY). required: true text: type: str @@ -75,7 +75,8 @@ options: version_added: 10.0.0 validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. default: true type: bool """ @@ -136,10 +137,10 @@ def main(): url=dict(type='str', required=True), api_key=dict(type='str', required=True, no_log=True), text=dict(type='str'), - channel=dict(type='str', default=None), + channel=dict(type='str'), username=dict(type='str', default='Ansible'), icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), - priority=dict(type='str', default=None, choices=['important', 'urgent']), + priority=dict(type='str', choices=['important', 'urgent']), validate_certs=dict(default=True, type='bool'), attachments=dict(type='list', elements='dict'), ), diff --git a/plugins/modules/maven_artifact.py b/plugins/modules/maven_artifact.py index 7193626999..af3be70f39 100644 --- a/plugins/modules/maven_artifact.py +++ b/plugins/modules/maven_artifact.py @@ -16,7 +16,7 @@ module: maven_artifact short_description: Downloads an Artifact from a Maven Repository description: - Downloads an artifact from a maven repository given the maven coordinates provided to the module. - - Can retrieve snapshots or release versions of the artifact and will resolve the latest available version if one is not available. + - Can retrieve snapshots or release versions of the artifact and resolve the latest available version if one is not available. author: "Chris Schmidt (@chrisisbeef)" requirements: - lxml @@ -75,7 +75,8 @@ options: password: type: str description: - - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3. + - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on + S3. aliases: ["aws_secret_access_key"] headers: description: @@ -83,9 +84,9 @@ options: type: dict force_basic_auth: description: - - Httplib2, the library used by the uri module only sends authentication information when a webservice responds to an initial request with - a 401 status. Since some basic auth services do not properly send a 401, logins will fail. This option forces the sending of the Basic - authentication header upon initial request. + - C(httplib2), the library used by the URI module only sends authentication information when a webservice responds to + an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins fail. This + option forces the sending of the Basic authentication header upon initial request. default: false type: bool version_added: '0.2.0' @@ -108,7 +109,7 @@ options: default: 10 validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be set to V(false) when no other option exists. + - If V(false), SSL certificates are not validated. This should only be set to V(false) when no other option exists. type: bool default: true client_cert: @@ -126,18 +127,20 @@ options: keep_name: description: - If V(true), the downloaded artifact's name is preserved, in other words the version number remains part of it. - - This option only has effect when O(dest) is a directory and O(version) is set to V(latest) or O(version_by_spec) is defined. + - This option only has effect when O(dest) is a directory and O(version) is set to V(latest) or O(version_by_spec) is + defined. type: bool default: false verify_checksum: type: str description: - - If V(never), the MD5/SHA1 checksum will never be downloaded and verified. - - If V(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default. - - If V(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist, to verify if they are identical. - This was the behaviour before 2.6. Since it downloads the checksum before (maybe) downloading the artifact, and since some repository - software, when acting as a proxy/cache, return a 404 error if the artifact has not been cached yet, it may fail unexpectedly. If you still - need it, you should consider using V(always) instead - if you deal with a checksum, it is better to use it to verify integrity after download. + - If V(never), the MD5/SHA1 checksum is never downloaded and verified. + - If V(download), the MD5/SHA1 checksum is downloaded and verified only after artifact download. This is the default. + - If V(change), the MD5/SHA1 checksum is downloaded and verified if the destination already exist, to verify if they + are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) downloading the artifact, + and since some repository software, when acting as a proxy/cache, return a 404 error if the artifact has not been + cached yet, it may fail unexpectedly. If you still need it, you should consider using V(always) instead - if you deal + with a checksum, it is better to use it to verify integrity after download. - V(always) combines V(download) and V(change). required: false default: 'download' @@ -145,9 +148,9 @@ options: checksum_alg: type: str description: - - If V(md5), checksums will use the MD5 algorithm. This is the default. - - If V(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use FIPS-compliant algorithms, since MD5 - will be blocked on such systems. + - If V(md5), checksums use the MD5 algorithm. This is the default. + - If V(sha1), checksums use the SHA1 algorithm. This can be used on systems configured to use FIPS-compliant algorithms, + since MD5 is blocked on such systems. default: 'md5' choices: ['md5', 'sha1'] version_added: 3.2.0 @@ -241,7 +244,6 @@ import tempfile import traceback import re -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible.module_utils.ansible_release import __version__ as ansible_version from re import match @@ -619,35 +621,32 @@ def main(): argument_spec=dict( group_id=dict(required=True), artifact_id=dict(required=True), - version=dict(default=None), - version_by_spec=dict(default=None), + version=dict(), + version_by_spec=dict(), classifier=dict(default=''), extension=dict(default='jar'), repository_url=dict(default='https://repo1.maven.org/maven2'), - username=dict(default=None, aliases=['aws_secret_key']), - password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']), + username=dict(aliases=['aws_secret_key']), + password=dict(no_log=True, aliases=['aws_secret_access_key']), headers=dict(type='dict'), force_basic_auth=dict(default=False, type='bool'), state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state timeout=dict(default=10, type='int'), dest=dict(type="path", required=True), - validate_certs=dict(required=False, default=True, type='bool'), - client_cert=dict(type="path", required=False), - client_key=dict(type="path", required=False), - keep_name=dict(required=False, default=False, type='bool'), - verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']), - checksum_alg=dict(required=False, default='md5', choices=['md5', 'sha1']), - unredirected_headers=dict(type='list', elements='str', required=False), + validate_certs=dict(default=True, type='bool'), + client_cert=dict(type="path"), + client_key=dict(type="path"), + keep_name=dict(default=False, type='bool'), + verify_checksum=dict(default='download', choices=['never', 'download', 'change', 'always']), + checksum_alg=dict(default='md5', choices=['md5', 'sha1']), + unredirected_headers=dict(type='list', elements='str'), directory_mode=dict(type='str'), ), add_file_common_args=True, mutually_exclusive=([('version', 'version_by_spec')]) ) - if LooseVersion(ansible_version) < LooseVersion("2.12") and module.params['unredirected_headers']: - module.fail_json(msg="Unredirected Headers parameter provided, but your ansible-core version does not support it. Minimum version is 2.12") - - if LooseVersion(ansible_version) >= LooseVersion("2.12") and module.params['unredirected_headers'] is None: + if module.params['unredirected_headers'] is None: # if the user did not supply unredirected params, we use the default, ONLY on ansible core 2.12 and above module.params['unredirected_headers'] = ['Authorization', 'Cookie'] diff --git a/plugins/modules/memset_dns_reload.py b/plugins/modules/memset_dns_reload.py index 100f81fc05..cb8ebe9191 100644 --- a/plugins/modules/memset_dns_reload.py +++ b/plugins/modules/memset_dns_reload.py @@ -13,10 +13,10 @@ module: memset_dns_reload author: "Simon Weald (@glitchcrab)" short_description: Request reload of Memset's DNS infrastructure, notes: - - DNS reload requests are a best-effort service provided by Memset; these generally happen every 15 minutes by default, however you can request - an immediate reload if later tasks rely on the records being created. An API key generated using the Memset customer control panel is required - with the following minimum scope - C(dns.reload). If you wish to poll the job status to wait until the reload has completed, then C(job.status) - is also required. + - DNS reload requests are a best-effort service provided by Memset; these generally happen every 15 minutes by default, + however you can request an immediate reload if later tasks rely on the records being created. An API key generated using + the Memset customer control panel is required with the following minimum scope - C(dns.reload). If you wish to poll the + job status to wait until the reload has completed, then C(job.status) is also required. description: - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes. extends_documentation_fragment: @@ -36,8 +36,9 @@ options: default: false type: bool description: - - Boolean value, if set will poll the reload job's status and return when the job has completed (unless the 30 second timeout is reached - first). If the timeout is reached then the task will not be marked as failed, but stderr will indicate that the polling failed. + - If V(true), it polls the reload job's status and return when the job has completed (unless the 30 second timeout is + reached first). If the timeout is reached then the task does not return as failed, but stderr indicates that the polling + failed. """ EXAMPLES = r""" @@ -166,7 +167,7 @@ def main(): module = AnsibleModule( argument_spec=dict( api_key=dict(required=True, type='str', no_log=True), - poll=dict(required=False, default=False, type='bool') + poll=dict(default=False, type='bool') ), supports_check_mode=False ) diff --git a/plugins/modules/memset_server_info.py b/plugins/modules/memset_server_info.py index 3c0829ce09..59d395a161 100644 --- a/plugins/modules/memset_server_info.py +++ b/plugins/modules/memset_server_info.py @@ -74,31 +74,32 @@ memset_api: description: Details about the firewall group this server is in. returned: always type: dict - sample: { - "default_outbound_policy": "RETURN", - "name": "testyaa-fw1", - "nickname": "testyaa cPanel rules", - "notes": "", - "public": false, - "rules": { - "51d7db54d39c3544ef7c48baa0b9944f": { - "action": "ACCEPT", - "comment": "", - "dest_ip6s": "any", - "dest_ips": "any", - "dest_ports": "any", - "direction": "Inbound", - "ip_version": "any", - "ordering": 2, - "protocols": "icmp", - "rule_group_name": "testyaa-fw1", - "rule_id": "51d7db54d39c3544ef7c48baa0b9944f", - "source_ip6s": "any", - "source_ips": "any", - "source_ports": "any" + sample: + { + "default_outbound_policy": "RETURN", + "name": "testyaa-fw1", + "nickname": "testyaa cPanel rules", + "notes": "", + "public": false, + "rules": { + "51d7db54d39c3544ef7c48baa0b9944f": { + "action": "ACCEPT", + "comment": "", + "dest_ip6s": "any", + "dest_ips": "any", + "dest_ports": "any", + "direction": "Inbound", + "ip_version": "any", + "ordering": 2, + "protocols": "icmp", + "rule_group_name": "testyaa-fw1", + "rule_id": "51d7db54d39c3544ef7c48baa0b9944f", + "source_ip6s": "any", + "source_ips": "any", + "source_ports": "any" + } } } - } firewall_type: description: The type of firewall the server has (for example self-managed, managed). returned: always @@ -118,15 +119,16 @@ memset_api: description: List of dictionaries of all IP addresses assigned to the server. returned: always type: list - sample: [ - { - "address": "1.2.3.4", - "bytes_in_today": 1000.0, - "bytes_in_yesterday": 2000.0, - "bytes_out_today": 1000.0, - "bytes_out_yesterday": 2000.0 - } - ] + sample: + [ + { + "address": "1.2.3.4", + "bytes_in_today": 1000.0, + "bytes_in_yesterday": 2000.0, + "bytes_out_today": 1000.0, + "bytes_out_yesterday": 2000.0 + } + ] monitor: description: Whether the server has monitoring enabled. returned: always @@ -146,7 +148,7 @@ memset_api: description: The network zone(s) the server is in. returned: always type: list - sample: ['reading'] + sample: ["reading"] nickname: description: Customer-set nickname for the server. returned: always @@ -221,10 +223,14 @@ memset_api: description: Dictionary of tagged and untagged VLANs this server is in. returned: always type: dict - sample: { - tagged: [], - untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ] - } + sample: + { + "tagged": [], + "untagged": [ + "testyaa-vlan1", + "testyaa-vlan2" + ] + } vulnscan: description: Vulnerability scanning level. returned: always diff --git a/plugins/modules/memset_zone.py b/plugins/modules/memset_zone.py index 4d8804e3be..553328909d 100644 --- a/plugins/modules/memset_zone.py +++ b/plugins/modules/memset_zone.py @@ -13,8 +13,9 @@ module: memset_zone author: "Simon Weald (@glitchcrab)" short_description: Creates and deletes Memset DNS zones notes: - - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point to the same IP). An API key - generated using the Memset customer control panel is needed with the following minimum scope - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). + - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point + to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum scope + - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). description: - Manage DNS zones in a Memset account. extends_documentation_fragment: @@ -88,7 +89,7 @@ memset_api: type: list sample: [] id: - description: Zone id. + description: Zone ID. returned: always type: str sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" @@ -288,8 +289,8 @@ def main(): state=dict(required=True, choices=['present', 'absent'], type='str'), api_key=dict(required=True, type='str', no_log=True), name=dict(required=True, aliases=['nickname'], type='str'), - ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), - force=dict(required=False, default=False, type='bool') + ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), + force=dict(default=False, type='bool') ), supports_check_mode=True ) diff --git a/plugins/modules/memset_zone_domain.py b/plugins/modules/memset_zone_domain.py index d8e56b4fe6..6e4dd27320 100644 --- a/plugins/modules/memset_zone_domain.py +++ b/plugins/modules/memset_zone_domain.py @@ -13,9 +13,9 @@ module: memset_zone_domain author: "Simon Weald (@glitchcrab)" short_description: Create and delete domains in Memset DNS zones notes: - - Zone domains can be thought of as a collection of domains, all of which share the same DNS records (in other words, they point to the same IP). An API - key generated using the Memset customer control panel is needed with the following minimum scope - C(dns.zone_domain_create), C(dns.zone_domain_delete), - C(dns.zone_domain_list). + - Zone domains can be thought of as a collection of domains, all of which share the same DNS records (in other words, they + point to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum + scope - C(dns.zone_domain_create), C(dns.zone_domain_delete), C(dns.zone_domain_list). - Currently this module can only create one domain at a time. Multiple domains should be created using C(loop). description: - Manage DNS zone domains in a Memset account. diff --git a/plugins/modules/memset_zone_record.py b/plugins/modules/memset_zone_record.py index 553cd66926..fd87c35fa0 100644 --- a/plugins/modules/memset_zone_record.py +++ b/plugins/modules/memset_zone_record.py @@ -13,8 +13,9 @@ module: memset_zone_record author: "Simon Weald (@glitchcrab)" short_description: Create and delete records in Memset DNS zones notes: - - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point to the same IP). An API key - generated using the Memset customer control panel is needed with the following minimum scope - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). + - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point + to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum scope + - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). - Currently this module can only create one DNS record at a time. Multiple records should be created using C(loop). description: - Manage DNS records in a Memset account. @@ -355,15 +356,15 @@ def main(): global module module = AnsibleModule( argument_spec=dict( - state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str'), api_key=dict(required=True, type='str', no_log=True), zone=dict(required=True, type='str'), type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'), address=dict(required=True, aliases=['ip', 'data'], type='str'), - record=dict(required=False, default='', type='str'), - ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), - priority=dict(required=False, default=0, type='int'), - relative=dict(required=False, default=False, type='bool') + record=dict(default='', type='str'), + ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), + priority=dict(default=0, type='int'), + relative=dict(default=False, type='bool') ), supports_check_mode=True ) diff --git a/plugins/modules/mksysb.py b/plugins/modules/mksysb.py index d3c9abeac0..7e188ec844 100644 --- a/plugins/modules/mksysb.py +++ b/plugins/modules/mksysb.py @@ -77,7 +77,7 @@ options: storage_path: type: str description: - - Storage path where the mksysb will stored. + - Storage path where the mksysb backup is stored. required: true use_snapshot: description: @@ -96,10 +96,6 @@ EXAMPLES = r""" """ RETURN = r""" -changed: - description: Return changed for mksysb actions as true or false. - returned: always - type: bool msg: description: Return message regarding the action. returned: always @@ -141,7 +137,6 @@ class MkSysB(ModuleHelper): backup_dmapi_fs=cmd_runner_fmt.as_bool("-A"), combined_path=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda p, n: ["%s/%s" % (p, n)])), ) - use_old_vardict = False def __init_module__(self): if not os.path.isdir(self.vars.storage_path): diff --git a/plugins/modules/modprobe.py b/plugins/modules/modprobe.py index b7b93cce2b..d5bb6fddbb 100644 --- a/plugins/modules/modprobe.py +++ b/plugins/modules/modprobe.py @@ -48,13 +48,15 @@ options: version_added: 7.0.0 description: - Persistency between reboots for configured module. - - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent during reboots. - - If V(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module will be loaded on next reboot. - - If V(absent), will comment out module name from C(/etc/modules-load.d/) and comment out params from C(/etc/modprobe.d/) so the module - will not be loaded on next reboot. - - If V(disabled), will not touch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is. - - Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar triggers encoded - in the kernel modules themselves instead of configuration like this. + - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent + during reboots. + - If V(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module is loaded + on next reboot. + - If V(absent), comments out module name from C(/etc/modules-load.d/) and comments out params from C(/etc/modprobe.d/) + so the module is not loaded on next reboot. + - If V(disabled), does not touch anything and leaves C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is. + - Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar + triggers encoded in the kernel modules themselves instead of configuration like this. - In fact, most modern kernel modules are prepared for automatic loading already. - B(Note:) This option works only with distributions that use C(systemd) when set to values other than V(disabled). """ diff --git a/plugins/modules/monit.py b/plugins/modules/monit.py index aac495fa70..a10967264c 100644 --- a/plugins/modules/monit.py +++ b/plugins/modules/monit.py @@ -35,8 +35,8 @@ options: type: str timeout: description: - - If there are pending actions for the service monitored by monit, then Ansible will check for up to this many seconds to verify the requested - action has been performed. Ansible will sleep for five seconds between each check. + - If there are pending actions for the service monitored by monit, then it checks for up to this many seconds to verify + the requested action has been performed. The module sleeps for five seconds between each check. default: 300 type: int author: diff --git a/plugins/modules/mqtt.py b/plugins/modules/mqtt.py index 462f809ade..b35a257da7 100644 --- a/plugins/modules/mqtt.py +++ b/plugins/modules/mqtt.py @@ -44,7 +44,7 @@ options: type: str description: - MQTT client identifier. - - If not specified, a value C(hostname + pid) will be used. + - If not specified, it uses a value C(hostname + pid). topic: type: str description: @@ -53,8 +53,8 @@ options: payload: type: str description: - - Payload. The special string V("None") may be used to send a NULL (that is, empty) payload which is useful to simply notify with the O(topic) - or to clear previously retained messages. + - Payload. The special string V("None") may be used to send a NULL (that is, empty) payload which is useful to simply + notify with the O(topic) or to clear previously retained messages. required: true qos: type: str @@ -64,29 +64,30 @@ options: choices: ["0", "1", "2"] retain: description: - - Setting this flag causes the broker to retain (in other words keep) the message so that applications that subsequently subscribe to the topic can - received the last retained message immediately. + - Setting this flag causes the broker to retain (in other words keep) the message so that applications that subsequently + subscribe to the topic can received the last retained message immediately. type: bool default: false ca_cert: type: path description: - - The path to the Certificate Authority certificate files that are to be treated as trusted by this client. If this is the only option given - then the client will operate in a similar manner to a web browser. That is to say it will require the broker to have a certificate signed - by the Certificate Authorities in ca_certs and will communicate using TLS v1, but will not attempt any form of authentication. This provides - basic network encryption but may not be sufficient depending on how the broker is configured. + - The path to the Certificate Authority certificate files that are to be treated as trusted by this client. If this + is the only option given then the client operates in a similar manner to a web browser. That is to say it requires + the broker to have a certificate signed by the Certificate Authorities in ca_certs and communicates using TLS v1, + but does not attempt any form of authentication. This provides basic network encryption but may not be sufficient + depending on how the broker is configured. aliases: [ca_certs] client_cert: type: path description: - - The path pointing to the PEM encoded client certificate. If this is not None it will be used as client information for TLS based authentication. - Support for this feature is broker dependent. + - The path pointing to the PEM encoded client certificate. If this is set it is used as client information for TLS based + authentication. Support for this feature is broker dependent. aliases: [certfile] client_key: type: path description: - - The path pointing to the PEM encoded client private key. If this is not None it will be used as client information for TLS based authentication. - Support for this feature is broker dependent. + - The path pointing to the PEM encoded client private key. If this is set it is used as client information for TLS based + authentication. Support for this feature is broker dependent. aliases: [keyfile] tls_version: description: @@ -161,15 +162,15 @@ def main(): port=dict(default=1883, type='int'), topic=dict(required=True), payload=dict(required=True), - client_id=dict(default=None), + client_id=dict(), qos=dict(default="0", choices=["0", "1", "2"]), retain=dict(default=False, type='bool'), - username=dict(default=None), - password=dict(default=None, no_log=True), - ca_cert=dict(default=None, type='path', aliases=['ca_certs']), - client_cert=dict(default=None, type='path', aliases=['certfile']), - client_key=dict(default=None, type='path', aliases=['keyfile']), - tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2']) + username=dict(), + password=dict(no_log=True), + ca_cert=dict(type='path', aliases=['ca_certs']), + client_cert=dict(type='path', aliases=['certfile']), + client_key=dict(type='path', aliases=['keyfile']), + tls_version=dict(choices=['tlsv1.1', 'tlsv1.2']) ), supports_check_mode=True ) diff --git a/plugins/modules/mssql_db.py b/plugins/modules/mssql_db.py index 95f529aff3..8a15bfe699 100644 --- a/plugins/modules/mssql_db.py +++ b/plugins/modules/mssql_db.py @@ -57,12 +57,13 @@ options: type: str target: description: - - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL files (C(.sql)) files are supported. + - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL files (C(.sql)) files are + supported. type: str autocommit: description: - - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can not - be changed within a transaction. + - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since + some content can not be changed within a transaction. type: bool default: false notes: @@ -157,7 +158,7 @@ def main(): login_password=dict(default='', no_log=True), login_host=dict(required=True), login_port=dict(default='1433'), - target=dict(default=None), + target=dict(), autocommit=dict(type='bool', default=False), state=dict( default='present', choices=['present', 'absent', 'import']) diff --git a/plugins/modules/mssql_script.py b/plugins/modules/mssql_script.py index 045cafde88..37bd0853d0 100644 --- a/plugins/modules/mssql_script.py +++ b/plugins/modules/mssql_script.py @@ -23,7 +23,7 @@ attributes: check_mode: support: partial details: - - The script will not be executed in check mode. + - The script is not be executed in check mode. diff_mode: support: none @@ -56,15 +56,16 @@ options: type: str transaction: description: - - If transactional mode is requested, start a transaction and commit the change only if the script succeed. Otherwise, rollback the transaction. + - If transactional mode is requested, start a transaction and commit the change only if the script succeed. Otherwise, + rollback the transaction. - If transactional mode is not requested (default), automatically commit the change. type: bool default: false version_added: 8.4.0 output: description: - - With V(default) each row will be returned as a list of values. See RV(query_results). - - Output format V(dict) will return dictionary with the column names as keys. See RV(query_results_dict). + - With V(default) each row is returned as a list of values. See RV(query_results). + - Output format V(dict) returns dictionary with the column names as keys. See RV(query_results_dict). - V(dict) requires named columns to be returned by each query otherwise an error is thrown. choices: ["dict", "default"] default: 'default' @@ -169,12 +170,33 @@ query_results: type: list elements: list returned: success and O(output=default) - sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] + sample: + [ + [ + [ + [ + "Batch 0 - Select 0" + ] + ], + [ + [ + "Batch 0 - Select 1" + ] + ] + ], + [ + [ + [ + "Batch 1 - Select 0" + ] + ] + ] + ] contains: queries: description: - List of result sets of each query. - - If a query returns no results, the results of this and all the following queries will not be included in the output. + - If a query returns no results, the results of this and all the following queries are not included in the output. - Use the V(GO) keyword in O(script) to separate queries. type: list elements: list @@ -196,13 +218,34 @@ query_results_dict: type: list elements: list returned: success and O(output=dict) - sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] + sample: + [ + [ + [ + [ + "Batch 0 - Select 0" + ] + ], + [ + [ + "Batch 0 - Select 1" + ] + ] + ], + [ + [ + [ + "Batch 1 - Select 0" + ] + ] + ] + ] contains: queries: description: - List of result sets of each query. - - If a query returns no results, the results of this and all the following queries will not be included in the output. Use 'GO' keyword - to separate queries. + - If a query returns no results, the results of this and all the following queries are not included in the output. + Use V(GO) keyword to separate queries. type: list elements: list contains: @@ -239,7 +282,7 @@ def clean_output(o): def run_module(): module_args = dict( - name=dict(required=False, aliases=['db'], default=''), + name=dict(aliases=['db'], default=''), login_user=dict(), login_password=dict(no_log=True), login_host=dict(required=True), diff --git a/plugins/modules/nagios.py b/plugins/modules/nagios.py index 7a0c26b48e..830a805f87 100644 --- a/plugins/modules/nagios.py +++ b/plugins/modules/nagios.py @@ -20,13 +20,8 @@ short_description: Perform common tasks in Nagios related to downtime and notifi description: - 'The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts.' - The C(nagios) module is not idempotent. - - All actions require the O(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer - to the host the playbook is currently running on. - - You can specify multiple services at once by separating them with commas, for example O(services=httpd,nfs,puppet). - - When specifying what service to handle there is a special service value, O(host), which will handle alerts/downtime/acknowledge for the I(host - itself), for example O(services=host). This keyword may not be given with other services at the same time. B(Setting alerts/downtime/acknowledge - for a host does not affect alerts/downtime/acknowledge for any of the services running on it.) To schedule downtime for all services on particular - host use keyword "all", for example O(services=all). + - All actions require the O(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) + variable to refer to the host the playbook is currently running on. extends_documentation_fragment: - community.general.attributes attributes: @@ -40,8 +35,20 @@ options: - Action to take. - The V(acknowledge) and V(forced_check) actions were added in community.general 1.2.0. required: true - choices: ["downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", "silence_nagios", "unsilence_nagios", - "command", "servicegroup_service_downtime", "servicegroup_host_downtime", "acknowledge", "forced_check"] + choices: + - downtime + - delete_downtime + - enable_alerts + - disable_alerts + - silence + - unsilence + - silence_nagios + - unsilence_nagios + - command + - servicegroup_service_downtime + - servicegroup_host_downtime + - acknowledge + - forced_check type: str host: description: @@ -76,6 +83,12 @@ options: description: - What to manage downtime/alerts for. Separate multiple services with commas. - 'B(Required) option when O(action) is one of: V(downtime), V(acknowledge), V(forced_check), V(enable_alerts), V(disable_alerts).' + - You can specify multiple services at once by separating them with commas, for example O(services=httpd,nfs,puppet). + - When specifying what O(services) to handle there is a special service value, V(host), which handles alerts/downtime/acknowledge + for the I(host itself), for example O(services=host). This keyword may not be given with other services at the same + time. B(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the + services running on it.) To schedule downtime for all O(services) on particular host use keyword V(all), for example + O(services=all). aliases: ["service"] type: str servicegroup: @@ -85,8 +98,8 @@ options: type: str command: description: - - The raw command to send to nagios, which should not include the submitted time header or the line-feed. - - B(Required) option when using the V(command) O(action). + - The raw command to send to Nagios, which should not include the submitted time header or the line-feed. + - B(Required) option when O(action=command). type: str author: "Tim Bielawa (@tbielawa)" diff --git a/plugins/modules/netcup_dns.py b/plugins/modules/netcup_dns.py index 370aaa5dca..c48e0a2fb2 100644 --- a/plugins/modules/netcup_dns.py +++ b/plugins/modules/netcup_dns.py @@ -35,7 +35,7 @@ options: type: str customer_id: description: - - Netcup customer id. + - Netcup customer ID. required: true type: int domain: @@ -68,7 +68,7 @@ options: default: false description: - Whether the record should be the only one for that record type and record name. Only use with O(state=present). - - This will delete all other records with the same record name and type. + - This deletes all other records with the same record name and type. priority: description: - Record priority. Required for O(type=MX). @@ -184,7 +184,7 @@ records: type: int sample: 0 id: - description: Internal id of the record. + description: Internal ID of the record. returned: success type: int sample: 12345 @@ -213,15 +213,15 @@ def main(): customer_id=dict(required=True, type='int'), domain=dict(required=True), - record=dict(required=False, default='@', aliases=['name']), + record=dict(default='@', aliases=['name']), type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS', 'OPENPGPKEY', 'SMIMEA', 'SSHFP']), value=dict(required=True), - priority=dict(required=False, type='int'), - solo=dict(required=False, type='bool', default=False), - state=dict(required=False, choices=['present', 'absent'], default='present'), - timeout=dict(required=False, type='int', default=5), + priority=dict(type='int'), + solo=dict(type='bool', default=False), + state=dict(choices=['present', 'absent'], default='present'), + timeout=dict(type='int', default=5), ), supports_check_mode=True diff --git a/plugins/modules/newrelic_deployment.py b/plugins/modules/newrelic_deployment.py index 99ff996670..af58402a44 100644 --- a/plugins/modules/newrelic_deployment.py +++ b/plugins/modules/newrelic_deployment.py @@ -62,15 +62,16 @@ options: required: false validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. required: false default: true type: bool app_name_exact_match: type: bool description: - - If this flag is set to V(true) then the application ID lookup by name would only work for an exact match. If set to V(false) it returns - the first result. + - If this flag is set to V(true) then the application ID lookup by name would only work for an exact match. If set to + V(false) it returns the first result. required: false default: false version_added: 7.5.0 @@ -101,14 +102,14 @@ def main(): module = AnsibleModule( argument_spec=dict( token=dict(required=True, no_log=True), - app_name=dict(required=False), - application_id=dict(required=False), - changelog=dict(required=False), - description=dict(required=False), + app_name=dict(), + application_id=dict(), + changelog=dict(), + description=dict(), revision=dict(required=True), - user=dict(required=False), + user=dict(), validate_certs=dict(default=True, type='bool'), - app_name_exact_match=dict(required=False, type='bool', default=False), + app_name_exact_match=dict(type='bool', default=False), ), required_one_of=[['app_name', 'application_id']], required_if=[('app_name_exact_match', True, ['app_name'])], diff --git a/plugins/modules/nexmo.py b/plugins/modules/nexmo.py index 3293362ec3..2d3a62b053 100644 --- a/plugins/modules/nexmo.py +++ b/plugins/modules/nexmo.py @@ -45,11 +45,12 @@ options: msg: type: str description: - - Message to text to send. Messages longer than 160 characters will be split into multiple messages. + - Message text to send. Messages longer than 160 characters are split into multiple messages. required: true validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool default: true extends_documentation_fragment: diff --git a/plugins/modules/nginx_status_info.py b/plugins/modules/nginx_status_info.py index 7fa681d6d8..7f9865878c 100644 --- a/plugins/modules/nginx_status_info.py +++ b/plugins/modules/nginx_status_info.py @@ -62,8 +62,8 @@ accepts: type: int sample: 81769947 handled: - description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have - been reached. + description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some + resource limits have been reached. returned: success type: int sample: 81769947 diff --git a/plugins/modules/nictagadm.py b/plugins/modules/nictagadm.py index a02a8fcffd..07b17bcf9e 100644 --- a/plugins/modules/nictagadm.py +++ b/plugins/modules/nictagadm.py @@ -35,7 +35,7 @@ options: type: str etherstub: description: - - Specifies that the nic tag will be attached to a created O(etherstub). + - Specifies that the nic tag is attached to a created O(etherstub). - Parameter O(etherstub) is mutually exclusive with both O(mtu), and O(mac). type: bool default: false @@ -46,7 +46,7 @@ options: type: int force: description: - - When O(state=absent) this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs. + - When O(state=absent) this switch uses the C(-f) parameter and delete the nic tag regardless of existing VMs. type: bool default: false state: @@ -83,7 +83,7 @@ mac: type: str sample: 00:1b:21:a3:f5:4d etherstub: - description: Specifies if the nic tag will create and attach to an etherstub. + description: Specifies if the nic tag was created and attached to an etherstub. returned: always type: bool sample: false @@ -93,7 +93,7 @@ mtu: type: int sample: 1500 force: - description: Shows if -f was used during the deletion of a nic tag. + description: Shows if C(-f) was used during the deletion of a nic tag. returned: always type: bool sample: false diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py index a8784e870c..0d35e5aacc 100644 --- a/plugins/modules/nmcli.py +++ b/plugins/modules/nmcli.py @@ -19,11 +19,12 @@ requirements: extends_documentation_fragment: - community.general.attributes description: - - Manage the network devices. Create, modify and manage various connection and device type, for example V(ethernet), V(team), V(bond), V(vlan) and so on. - - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.' - - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.' - - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager.' - - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.' + - Manage the network devices. Create, modify and manage various connection and device type, for example V(ethernet), V(team), + V(bond), V(vlan) and so on. + - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: C(NetworkManager).' + - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: C(NetworkManager-tui).' + - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: C(network-manager).' + - 'On openSUSE, the requirements can be met by installing the following packages: C(NetworkManager).' attributes: check_mode: support: full @@ -33,9 +34,9 @@ options: state: description: - Whether the device should exist or not, taking action if the state is different from what is stated. - - Using O(state=present) to create connection will automatically bring connection up. - - Using O(state=up) and O(state=down) will not modify connection with other parameters. These states have been added in community.general - 9.5.0. + - Using O(state=present) creates connection set to be brought up automatically. + - Using O(state=up) and O(state=down) does not modify connection with other parameters. These states have been added + in community.general 9.5.0. type: str required: true choices: [absent, present, up, down] @@ -45,6 +46,16 @@ options: - Whether the connection profile can be automatically activated. type: bool default: true + autoconnect_priority: + description: + - The priority of the connection profile for autoconnect. If set, connection profiles with higher priority are preferred. + type: int + version_added: 11.0.0 + autoconnect_retries: + description: + - The number of times to retry autoconnecting. + type: int + version_added: 11.0.0 conn_name: description: - The name used to call the connection. Pattern is V([-][-]). @@ -60,9 +71,9 @@ options: ifname: description: - The interface to bind the connection to. - - The connection will only be applicable to this interface name. + - The connection is only applicable to this interface name. - A special value of V(*) can be used for interface-independent connections. - - The ifname argument is mandatory for all connection types except bond, team, bridge, vlan and vpn. + - The O(ifname) argument is mandatory for all connection types except bond, team, bridge, vlan and vpn. - This parameter defaults to O(conn_name) when left unset for all connection types except vpn that removes it. type: str type: @@ -78,11 +89,38 @@ options: - Type V(ovs-port) is added in community.general 8.6.0. - Type V(wireguard) is added in community.general 4.3.0. - Type V(vpn) is added in community.general 5.1.0. - - Using V(bond-slave), V(bridge-slave), or V(team-slave) implies V(ethernet) connection type with corresponding O(slave_type) option. - - If you want to control non-ethernet connection attached to V(bond), V(bridge), or V(team) consider using O(slave_type) option. + - Type V(vrf) is added in community.general 10.4.0. + - Using V(bond-slave), V(bridge-slave), or V(team-slave) implies V(ethernet) connection type with corresponding O(slave_type) + option. + - If you want to control non-ethernet connection attached to V(bond), V(bridge), or V(team) consider using O(slave_type) + option. type: str - choices: [bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, macvlan, sit, team, team-slave, vlan, vxlan, - wifi, gsm, wireguard, ovs-bridge, ovs-port, ovs-interface, vpn, loopback] + choices: + - bond + - bond-slave + - bridge + - bridge-slave + - dummy + - ethernet + - generic + - gre + - infiniband + - ipip + - macvlan + - sit + - team + - team-slave + - vlan + - vxlan + - wifi + - gsm + - wireguard + - ovs-bridge + - ovs-port + - ovs-interface + - vpn + - vrf + - loopback mode: description: - This is the type of device or network connection that you wish to create for a bond or bridge. @@ -95,12 +133,17 @@ options: type: str choices: [datagram, connected] version_added: 5.8.0 + infiniband_mac: + description: + - MAC address of the Infiniband IPoIB devices. + type: str + version_added: 10.6.0 slave_type: description: - Type of the device of this slave's master connection (for example V(bond)). - Type V(ovs-port) is added in community.general 8.6.0. type: str - choices: ['bond', 'bridge', 'team', 'ovs-port'] + choices: ['bond', 'bridge', 'team', 'ovs-port', 'vrf'] version_added: 7.0.0 master: description: @@ -344,7 +387,8 @@ options: version_added: 4.2.0 mtu: description: - - The connection MTU, for example V(9000). This can not be applied when creating the interface and is done once the interface has been created. + - The connection MTU, for example V(9000). This can not be applied when creating the interface and is done once the + interface has been created. - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, gsm, pppoe, infiniband). - This parameter defaults to V(1500) when unset. type: int @@ -354,7 +398,8 @@ options: type: str primary: description: - - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'. + - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the + 'ifname'. type: str miimon: description: @@ -374,6 +419,12 @@ options: - This is only used with bond - xmit_hash_policy type. type: str version_added: 5.6.0 + fail_over_mac: + description: + - This is only used with bond - fail_over_mac. + type: str + choices: [none, active, follow] + version_added: 10.3.0 arp_interval: description: - This is only used with bond - ARP interval. @@ -429,8 +480,8 @@ options: default: 100 hairpin: description: - - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame - was received on. + - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through + the slave the frame was received on. - The default change to V(false) in community.general 7.0.0. It used to be V(true) before. type: bool default: false @@ -449,8 +500,8 @@ options: version_added: 3.4.0 runner_fast_rate: description: - - Option specifies the rate at which our link partner is asked to transmit LACPDU packets. If this is V(true) then packets will be sent - once per second. Otherwise they will be sent every 30 seconds. + - Option specifies the rate at which our link partner is asked to transmit LACPDU packets. If this is V(true) then packets + are sent once per second. Otherwise they are sent every 30 seconds. - Only allowed for O(runner=lacp). type: bool version_added: 6.5.0 @@ -510,6 +561,11 @@ options: - Only used when O(type=gre). type: str version_added: 3.6.0 + table: + description: + - This is only used with VRF - VRF table number. + type: int + version_added: 10.4.0 zone: description: - The trust level of the connection. @@ -519,33 +575,34 @@ options: wifi_sec: description: - The security configuration of the WiFi connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' - 'For instance to use common WPA-PSK auth with a password: V({key-mgmt: wpa-psk, psk: my_password}).' type: dict suboptions: auth-alg: description: - - When WEP is used (that is, if O(wifi_sec.key-mgmt) is V(none) or V(ieee8021x)) indicate the 802.11 authentication algorithm required - by the AP here. + - When WEP is used (that is, if O(wifi_sec.key-mgmt) is V(none) or V(ieee8021x)) indicate the 802.11 authentication + algorithm required by the AP here. - One of V(open) for Open System, V(shared) for Shared Key, or V(leap) for Cisco LEAP. - - When using Cisco LEAP (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)) the O(wifi_sec.leap-username) and - O(wifi_sec.leap-password) properties must be specified. + - When using Cisco LEAP (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)) the O(wifi_sec.leap-username) + and O(wifi_sec.leap-password) properties must be specified. type: str choices: [open, shared, leap] fils: description: - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection. - - One of V(0) (use global default value), V(1) (disable FILS), V(2) (enable FILS if the supplicant and the access point support it) - or V(3) (enable FILS and fail if not supported). - - When set to V(0) and no global default is set, FILS will be optionally enabled. + - One of V(0) (use global default value), V(1) (disable FILS), V(2) (enable FILS if the supplicant and the access + point support it) or V(3) (enable FILS and fail if not supported). + - When set to V(0) and no global default is set, FILS is optionally enabled. type: int choices: [0, 1, 2, 3] default: 0 group: description: - - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms - in the list. + - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize + one of the algorithms in the list. - For maximum compatibility leave this property empty. type: list elements: str @@ -553,9 +610,9 @@ options: key-mgmt: description: - Key management used for the connection. - - One of V(none) (WEP or no password protection), V(ieee8021x) (Dynamic WEP), V(owe) (Opportunistic Wireless Encryption), V(wpa-psk) - (WPA2 + WPA3 personal), V(sae) (WPA3 personal only), V(wpa-eap) (WPA2 + WPA3 enterprise) or V(wpa-eap-suite-b-192) (WPA3 enterprise - only). + - One of V(none) (WEP or no password protection), V(ieee8021x) (Dynamic WEP), V(owe) (Opportunistic Wireless Encryption), + V(wpa-psk) (WPA2 + WPA3 personal), V(sae) (WPA3 personal only), V(wpa-eap) (WPA2 + WPA3 enterprise) or V(wpa-eap-suite-b-192) + (WPA3 enterprise only). - This property must be set for any Wi-Fi connection that uses security. type: str choices: [none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192] @@ -571,8 +628,8 @@ options: type: str pairwise: description: - - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in - the list. + - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one + of the algorithms in the list. - For maximum compatibility leave this property empty. type: list elements: str @@ -580,9 +637,9 @@ options: pmf: description: - Indicates whether Protected Management Frames (802.11w) must be enabled for the connection. - - One of V(0) (use global default value), V(1) (disable PMF), V(2) (enable PMF if the supplicant and the access point support it) or - V(3) (enable PMF and fail if not supported). - - When set to V(0) and no global default is set, PMF will be optionally enabled. + - One of V(0) (use global default value), V(1) (disable PMF), V(2) (enable PMF if the supplicant and the access + point support it) or V(3) (enable PMF and fail if not supported). + - When set to V(0) and no global default is set, PMF is optionally enabled. type: int choices: [0, 1, 2, 3] default: 0 @@ -601,21 +658,22 @@ options: psk: description: - Pre-Shared-Key for WPA networks. - - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) hashed to derive - the actual key, or the key in form of 64 hexadecimal character. + - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) + hashed to derive the actual key, or the key in form of 64 hexadecimal character. - The WPA3-Personal networks use a passphrase of any length for SAE authentication. type: str wep-key-flags: description: - - Flags indicating how to handle the O(wifi_sec.wep-key0), O(wifi_sec.wep-key1), O(wifi_sec.wep-key2), and O(wifi_sec.wep-key3) properties. + - Flags indicating how to handle the O(wifi_sec.wep-key0), O(wifi_sec.wep-key1), O(wifi_sec.wep-key2), and O(wifi_sec.wep-key3) + properties. type: list elements: int wep-key-type: description: - Controls the interpretation of WEP keys. - - Allowed values are V(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII password; - or V(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the actual - WEP key. + - Allowed values are V(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or + 13-character ASCII password; or V(2), in which case the passphrase is provided as a string and it is hashed using + the de-facto MD5 method to derive the actual WEP key. type: int choices: [1, 2] wep-key0: @@ -640,8 +698,8 @@ options: type: str wep-tx-keyidx: description: - - When static WEP is used (that is, if O(wifi_sec.key-mgmt=none)) and a non-default WEP key index is used by the AP, put that WEP key - index here. + - When static WEP is used (that is, if O(wifi_sec.key-mgmt=none)) and a non-default WEP key index is used by the + AP, put that WEP key index here. - Valid values are V(0) (default key) through V(3). - Note that some consumer access points (like the Linksys WRT54G) number the keys V(1) to V(4). type: int @@ -650,8 +708,8 @@ options: wps-method: description: - Flags indicating which mode of WPS is to be used if any. - - There is little point in changing the default setting as NetworkManager will automatically determine whether it is feasible to start - WPS enrollment from the Access Point capabilities. + - There is little point in changing the default setting as NetworkManager automatically determines whether it is + feasible to start WPS enrollment from the Access Point capabilities. - WPS can be disabled by setting this property to a value of V(1). type: int default: 0 @@ -664,7 +722,8 @@ options: wifi: description: - The configuration of the WiFi connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' - 'For instance to create a hidden AP mode WiFi connection: V({hidden: true, mode: ap}).' type: dict @@ -673,9 +732,9 @@ options: description: - Configures AP isolation, which prevents communication between wireless devices connected to this AP. - This property can be set to a value different from V(-1) only when the interface is configured in AP mode. - - If set to V(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks - from other clients in the network. At the same time, it prevents devices to access resources on the same wireless networks as file - shares, printers, and so on. + - If set to V(1), devices are not able to communicate with each other. This increases security because it protects + devices against attacks from other clients in the network. At the same time, it prevents devices to access resources + on the same wireless networks as file shares, printers, and so on. - If set to V(0), devices can talk to each other. - When set to V(-1), the global default is used; in case the global default is unspecified it is assumed to be V(0). type: int @@ -684,17 +743,18 @@ options: assigned-mac-address: description: - The new field for the cloned MAC address. - - It can be either a hardware address in ASCII representation, or one of the special values V(preserve), V(permanent), V(random) or - V(stable). - - This field replaces the deprecated O(wifi.cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses. + - It can be either a hardware address in ASCII representation, or one of the special values V(preserve), V(permanent), + V(random) or V(stable). + - This field replaces the deprecated O(wifi.cloned-mac-address) on D-Bus, which can only contain explicit hardware + addresses. - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property C(cloned-mac-address). type: str band: description: - 802.11 frequency band of the network. - One of V(a) for 5GHz 802.11a or V(bg) for 2.4GHz 802.11. - - This will lock associations to the Wi-Fi network to the specific band, so for example, if V(a) is specified, the device will not associate - with the same network in the 2.4GHz band even if the network's settings are compatible. + - This locks associations to the Wi-Fi network to the specific band, so for example, if V(a) is specified, the device + does not associate with the same network in the 2.4GHz band even if the network's settings are compatible. - This setting depends on specific driver capability and may not work with all drivers. type: str choices: [a, bg] @@ -707,45 +767,48 @@ options: channel: description: - Wireless channel to use for the Wi-Fi connection. - - The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel. + - The device only joins (or creates for Ad-Hoc networks) a Wi-Fi network on the specified channel. - Because channel numbers overlap between bands, this property also requires the O(wifi.band) property to be set. type: int default: 0 cloned-mac-address: description: - - This D-Bus field is deprecated in favor of O(wifi.assigned-mac-address) which is more flexible and allows specifying special variants - like V(random). + - This D-Bus field is deprecated in favor of O(wifi.assigned-mac-address) which is more flexible and allows specifying + special variants like V(random). - For libnm and nmcli, this field is called C(cloned-mac-address). type: str generate-mac-address-mask: description: - - With O(wifi.cloned-mac-address) setting V(random) or V(stable), by default all bits of the MAC address are scrambled and a locally-administered, - unicast MAC address is created. This property allows to specify that certain bits are fixed. - - Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address. + - With O(wifi.cloned-mac-address) setting V(random) or V(stable), by default all bits of the MAC address are scrambled + and a locally-administered, unicast MAC address is created. This property allows to specify that certain bits + are fixed. + - Note that the least significant bit of the first MAC address is always unset to create a unicast MAC address. - If the property is V(null), it is eligible to be overwritten by a default connection setting. - - If the value is still V(null) or an empty string, the default is to create a locally-administered, unicast MAC address. - - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled with the current MAC - address of the device, while the unset bits are subject to randomization. - - Setting V(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the V(random) - or V(stable) algorithm. - - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC address to fill the - bits that shall not be randomized. - - For example, a value of V(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits - are randomized. - - A value of V(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address. - - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example, V(02:00:00:00:00:00 00:00:00:00:00:00 - 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally administered. + - If the value is still V(null) or an empty string, the default is to create a locally-administered, unicast MAC + address. + - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled + with the current MAC address of the device, while the unset bits are subject to randomization. + - Setting V(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower + 3 bytes using the V(random) or V(stable) algorithm. + - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC + address to fill the bits that shall not be randomized. + - For example, a value of V(FE:FF:FF:00:00:00 68:F7:28:00:00:00) sets the OUI of the MAC address to 68:F7:28, while + the lower bits are randomized. + - A value of V(02:00:00:00:00:00 00:00:00:00:00:00) creates a fully scrambled globally-administered, burned-in MAC + address. + - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example, V(02:00:00:00:00:00 + 00:00:00:00:00:00 02:00:00:00:00:00) creates a fully scrambled MAC address, randomly locally or globally administered. type: str hidden: description: - - If V(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP - mode. - - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as probe-scanning the - SSID. However, these workarounds expose inherent insecurities with hidden SSID networks, and thus hidden SSID networks should be used - with caution. + - If V(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure + and AP mode. + - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as + probe-scanning the SSID. However, these workarounds expose inherent insecurities with hidden SSID networks, and + thus hidden SSID networks should be used with caution. - In AP mode, the created network does not broadcast its SSID. - - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations (in AP mode), as - the explicit probe-scans are distinctly recognizable on the air. + - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations + (in AP mode), as the explicit probe-scans are distinctly recognizable on the air. type: bool default: false mac-address-blacklist: @@ -756,15 +819,15 @@ options: elements: str mac-address-randomization: description: - - One of V(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), V(1) - (never randomize the MAC address), or V(2) (always randomize the MAC address). + - One of V(0) (never randomize unless the user has set a global default to randomize and the supplicant supports + randomization), V(1) (never randomize the MAC address), or V(2) (always randomize the MAC address). - This property is deprecated for O(wifi.cloned-mac-address). type: int default: 0 choices: [0, 1, 2] mac-address: description: - - If specified, this connection will only apply to the Wi-Fi device whose permanent MAC address matches. + - If specified, this connection only applies to the Wi-Fi device whose permanent MAC address matches. - This property does not change the MAC address of the device (for example for MAC spoofing). type: str mode: @@ -773,13 +836,14 @@ options: choices: [infrastructure, mesh, adhoc, ap] default: infrastructure mtu: - description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into + multiple Ethernet frames. type: int default: 0 powersave: description: - - One of V(2) (disable Wi-Fi power saving), V(3) (enable Wi-Fi power saving), V(1) (do not touch currently configure setting) or V(0) - (use the globally configured value). + - One of V(2) (disable Wi-Fi power saving), V(3) (enable Wi-Fi power saving), V(1) (do not touch currently configure + setting) or V(0) (use the globally configured value). - All other values are reserved. type: int default: 0 @@ -801,11 +865,12 @@ options: wake-on-wlan: description: - The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options. - - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (V(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (V(0x4)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (V(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (V(0x10)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (V(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (V(0x40)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (V(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (V(0x100)) or the special values V(0x1) - (to use global settings) and V(0x8000) (to disable management of Wake-on-LAN in NetworkManager). + - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (V(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) + (V(0x4)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (V(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) + (V(0x10)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (V(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) + (V(0x40)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (V(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) + (V(0x100)) or the special values V(0x1) (to use global settings) and V(0x8000) (to disable management of Wake-on-LAN + in NetworkManager). - Note the option values' sum must be specified in order to combine multiple options. type: int default: 1 @@ -820,44 +885,50 @@ options: gsm: description: - The configuration of the GSM connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).' - - 'For instance to use apn, pin, username and password: V({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).' + - 'For instance to use apn, pin, username and password: V({apn: provider.apn, pin: 1234, username: apn.username, password: + apn.password}).' type: dict version_added: 3.7.0 suboptions: apn: description: - The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network. - - The APN often determines how the user will be billed for their network usage and whether the user has access to the Internet or just - a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile broadband plan. + - The APN often determines how the user is billed for their network usage and whether the user has access to the + Internet or just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile + broadband plan. - The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9. type: str auto-config: - description: When V(true), the settings such as O(gsm.apn), O(gsm.username), or O(gsm.password) will default to values that match the - network the modem will register to in the Mobile Broadband Provider database. + description: When V(true), the settings such as O(gsm.apn), O(gsm.username), or O(gsm.password) default to values + that match the network the modem registers to in the Mobile Broadband Provider database. type: bool default: false device-id: description: - The device unique identifier (as given by the V(WWAN) management service) which this connection applies to. - - If given, the connection will only apply to the specified device. + - If given, the connection only applies to the specified device. type: str home-only: description: - - When V(true), only connections to the home network will be allowed. - - Connections to roaming networks will not be made. + - When V(true), only connections to the home network are allowed. + - Connections to roaming networks are not made. type: bool default: false mtu: - description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into + multiple Ethernet frames. type: int default: 0 network-id: description: - The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration. - - If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified network. - - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise possible. + - If the Network ID is specified, NetworkManager attempts to force the device to register only on the specified + network. + - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise + possible. type: str number: description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems. @@ -871,11 +942,12 @@ options: password-flags: description: - NMSettingSecretFlags indicating how to handle the O(gsm.password) property. - - 'Following choices are allowed: V(0) B(NONE): The system is responsible for providing and storing this secret (default), V(1) B(AGENT_OWNED): - A user secret agent is responsible for providing and storing this secret; when it is required agents will be asked to retrieve it - V(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed V(4) B(NOT_REQUIRED): - In situations where it cannot be automatically determined that the secret is required (some VPNs and PPP providers do not require - all secrets) this flag indicates that the specific secret is not required.' + - 'Following choices are allowed: V(0) B(NONE): The system is responsible for providing and storing this secret + (default), V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when + it is required agents are asked to retrieve it V(2) B(NOT_SAVED): This secret should not be saved, but should + be requested from the user each time it is needed V(4) B(NOT_REQUIRED): In situations where it cannot be automatically + determined that the secret is required (some VPNs and PPP providers do not require all secrets) this flag indicates + that the specific secret is not required.' type: int choices: [0, 1, 2, 4] default: 0 @@ -894,13 +966,15 @@ options: sim-id: description: - The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to. - - If given, the connection will apply to any device also allowed by O(gsm.device-id) which contains a SIM card matching the given identifier. + - If given, the connection applies to any device also allowed by O(gsm.device-id) which contains a SIM card matching + the given identifier. type: str sim-operator-id: description: - - A MCC/MNC string like V(310260) or V(21601I) identifying the specific mobile network operator which this connection applies to. - - If given, the connection will apply to any device also allowed by O(gsm.device-id) and O(gsm.sim-id) which contains a SIM card provisioned - by the given operator. + - A MCC/MNC string like V(310260) or V(21601I) identifying the specific mobile network operator which this connection + applies to. + - If given, the connection applies to any device also allowed by O(gsm.device-id) and O(gsm.sim-id) which contains + a SIM card provisioned by the given operator. type: str username: description: @@ -910,7 +984,8 @@ options: macvlan: description: - The configuration of the MAC VLAN connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-macvlan.html).' type: dict version_added: 6.6.0 @@ -924,8 +999,9 @@ options: required: true parent: description: - - If given, specifies the parent interface name or parent connection UUID from which this MAC-VLAN interface should be created. If this - property is not specified, the connection must contain an "802-3-ethernet" setting with a "mac-address" property. + - If given, specifies the parent interface name or parent connection UUID from which this MAC-VLAN interface should + be created. If this property is not specified, the connection must contain an "802-3-ethernet" setting with a + "mac-address" property. type: str required: true promiscuous: @@ -939,7 +1015,8 @@ options: wireguard: description: - The configuration of the Wireguard connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-wireguard.html).' - 'For instance to configure a listen port: V({listen-port: 12345}).' type: dict @@ -949,38 +1026,42 @@ options: description: - The 32-bit fwmark for outgoing packets. - The use of fwmark is optional and is by default off. Setting it to 0 disables it. - - Note that O(wireguard.ip4-auto-default-route) or O(wireguard.ip6-auto-default-route) enabled, implies to automatically choose a fwmark. + - Note that O(wireguard.ip4-auto-default-route) or O(wireguard.ip6-auto-default-route) enabled, implies to automatically + choose a fwmark. type: int ip4-auto-default-route: description: - Whether to enable special handling of the IPv4 default route. - - If enabled, the IPv4 default route from O(wireguard.peer-routes) will be placed to a dedicated routing-table and two policy routing - rules will be added. - - The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table is chosen automatically. - This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved Rule-based Routing". + - If enabled, the IPv4 default route from O(wireguard.peer-routes) is placed to a dedicated routing-table and two + policy routing rules are added. + - The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table + is chosen automatically. This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved + Rule-based Routing". type: bool ip6-auto-default-route: description: - Like O(wireguard.ip4-auto-default-route), but for the IPv6 default route. type: bool listen-port: - description: The WireGuard connection listen-port. If not specified, the port will be chosen randomly when the interface comes up. + description: The WireGuard connection listen-port. If not specified, the port is chosen randomly when the interface + comes up. type: int mtu: description: - - If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple fragments. - - If zero a default MTU is used. Note that contrary to wg-quick's MTU setting, this does not take into account the current routes at - the time of activation. + - If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple + fragments. + - If zero a default MTU is used. Note that contrary to wg-quick's MTU setting, this does not take into account the + current routes at the time of activation. type: int peer-routes: description: - Whether to automatically add routes for the AllowedIPs ranges of the peers. - - If V(true) (the default), NetworkManager will automatically add routes in the routing tables according to C(ipv4.route-table) and - C(ipv6.route-table). Usually you want this automatism enabled. - - If V(false), no such routes are added automatically. In this case, the user may want to configure static routes in C(ipv4.routes) - and C(ipv6.routes), respectively. - - Note that if the peer's AllowedIPs is V(0.0.0.0/0) or V(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default) setting - is enabled, the peer route for this peer will not be added automatically. + - If V(true) (the default), NetworkManager automatically adds routes in the routing tables according to C(ipv4.route-table) + and C(ipv6.route-table). Usually you want this automatism enabled. + - If V(false), no such routes are added automatically. In this case, the user may want to configure static routes + in C(ipv4.routes) and C(ipv6.routes), respectively. + - Note that if the peer's AllowedIPs is V(0.0.0.0/0) or V(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default) + setting is enabled, the peer route for this peer is not added automatically. type: bool private-key: description: The 256 bit private-key in base64 encoding. @@ -992,13 +1073,13 @@ options: vpn: description: - Configuration of a VPN connection (PPTP and L2TP). - - In order to use L2TP you need to be sure that C(network-manager-l2tp) - and C(network-manager-l2tp-gnome) if host has UI - are installed - on the host. + - In order to use L2TP you need to be sure that C(network-manager-l2tp) - and C(network-manager-l2tp-gnome) if host + has UI - are installed on the host. type: dict version_added: 5.1.0 suboptions: permissions: - description: User that will have permission to use the connection. + description: User that has permission to use the connection. type: str required: true service-type: @@ -1006,17 +1087,19 @@ options: type: str required: true gateway: - description: The gateway to connection. It can be an IP address (for example V(192.0.2.1)) or a FQDN address (for example V(vpn.example.com)). + description: The gateway to connection. It can be an IP address (for example V(192.0.2.1)) or a FQDN address (for + example V(vpn.example.com)). type: str required: true password-flags: description: - NMSettingSecretFlags indicating how to handle the C(vpn.password) property. - - 'Following choices are allowed: V(0) B(NONE): The system is responsible for providing and storing this secret (default); V(1) B(AGENT_OWNED): - A user secret agent is responsible for providing and storing this secret; when it is required agents will be asked to retrieve it; - V(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed; V(4) B(NOT_REQUIRED): - In situations where it cannot be automatically determined that the secret is required (some VPNs and PPP providers do not require - all secrets) this flag indicates that the specific secret is not required.' + - 'Following choices are allowed: V(0) B(NONE): The system is responsible for providing and storing this secret + (default); V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when + it is required agents are asked to retrieve it; V(2) B(NOT_SAVED): This secret should not be saved, but should + be requested from the user each time it is needed; V(4) B(NOT_REQUIRED): In situations where it cannot be automatically + determined that the secret is required (some VPNs and PPP providers do not require all secrets) this flag indicates + that the specific secret is not required.' type: int choices: [0, 1, 2, 4] default: 0 @@ -1032,7 +1115,8 @@ options: ipsec-psk: description: - The pre-shared key in base64 encoding. - - "You can encode using this Ansible jinja2 expression: V(\"0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}\")." + - > + You can encode using this Ansible Jinja2 expression: V("0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}"). - This is only used when O(vpn.ipsec-enabled=true). type: str sriov: @@ -1065,7 +1149,8 @@ options: vfs: description: - 'Virtual function descriptors in the form: V(INDEX [ATTR=VALUE[ ATTR=VALUE]...]).' - - Multiple VFs can be specified using a comma as separator, for example V(2 mac=00:11:22:33:44:55 spoof-check=true,3 vlans=100). + - Multiple VFs can be specified using a comma as separator, for example V(2 mac=00:11:22:33:44:55 spoof-check=true,3 + vlans=100). type: str """ @@ -1177,7 +1262,6 @@ EXAMPLES = r""" # ``` - ## playbook-add.yml example - hosts: openstack-stage @@ -1214,7 +1298,7 @@ EXAMPLES = r""" with_items: - '{{ nmcli_team_slave }}' -###### Working with all cloud nodes - Bonding +##### Working with all cloud nodes - Bonding - name: Try nmcli add bond - conn_name only & ip4 gw4 mode community.general.nmcli: type: bond @@ -1343,7 +1427,7 @@ EXAMPLES = r""" community.general.nmcli: conn_name: my-eth1 state: up - reload: true + conn_reload: true - name: Add second ip4 address community.general.nmcli: @@ -1530,6 +1614,29 @@ EXAMPLES = r""" vlanid: 5 state: present +## Creating VRF and adding VLAN interface to it +- name: Create VRF + community.general.nmcli: + type: vrf + ifname: vrf10 + table: 10 + state: present + conn_name: vrf10 + method4: disabled + method6: disabled + +- name: Create VLAN interface inside VRF + community.general.nmcli: + conn_name: "eth0.124" + type: vlan + vlanid: "124" + vlandev: "eth0" + master: "vrf10" + slave_type: vrf + state: "present" + ip4: '192.168.124.50' + gw4: '192.168.124.1' + ## Defining ip rules while setting a static IP ## table 'production' is set with id 200 in this example. - name: Set Static ips for interface with ip rules and routes @@ -1616,6 +1723,8 @@ class Nmcli(object): self.state = module.params['state'] self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] self.autoconnect = module.params['autoconnect'] + self.autoconnect_priority = module.params['autoconnect_priority'] + self.autoconnect_retries = module.params['autoconnect_retries'] self.conn_name = module.params['conn_name'] self.conn_reload = module.params['conn_reload'] self.slave_type = module.params['slave_type'] @@ -1658,6 +1767,7 @@ class Nmcli(object): self.downdelay = module.params['downdelay'] self.updelay = module.params['updelay'] self.xmit_hash_policy = module.params['xmit_hash_policy'] + self.fail_over_mac = module.params['fail_over_mac'] self.arp_interval = module.params['arp_interval'] self.arp_ip_target = module.params['arp_ip_target'] self.slavepriority = module.params['slavepriority'] @@ -1695,6 +1805,7 @@ class Nmcli(object): self.wireguard = module.params['wireguard'] self.vpn = module.params['vpn'] self.transport_mode = module.params['transport_mode'] + self.infiniband_mac = module.params['infiniband_mac'] self.sriov = module.params['sriov'] if self.method4: @@ -1715,6 +1826,9 @@ class Nmcli(object): else: self.ipv6_method = None + if self.type == "vrf": + self.table = module.params['table'] + self.edit_commands = [] self.extra_options_validation() @@ -1726,10 +1840,7 @@ class Nmcli(object): self.module.fail_json(msg="'master' option is required when 'slave_type' is specified.") def execute_command(self, cmd, use_unsafe_shell=False, data=None): - if isinstance(cmd, list): - cmd = [to_text(item) for item in cmd] - else: - cmd = to_text(cmd) + cmd = [to_text(item) for item in cmd] return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) def execute_edit_commands(self, commands, arguments): @@ -1742,12 +1853,15 @@ class Nmcli(object): # Options common to multiple connection types. options = { 'connection.autoconnect': self.autoconnect, + 'connection.autoconnect-priority': self.autoconnect_priority, + 'connection.autoconnect-retries': self.autoconnect_retries, 'connection.zone': self.zone, } # IP address options. # The ovs-interface type can be both ip_conn_type and have a master - if (self.ip_conn_type and not self.master) or self.type == "ovs-interface": + # An interface that has a master but is of slave type vrf can have an IP address + if (self.ip_conn_type and (not self.master or self.slave_type == "vrf")) or self.type == "ovs-interface": options.update({ 'ipv4.addresses': self.enforce_ipv4_cidr_notation(self.ip4), 'ipv4.dhcp-client-id': self.dhcp_client_id, @@ -1806,6 +1920,7 @@ class Nmcli(object): 'primary': self.primary, 'updelay': self.updelay, 'xmit_hash_policy': self.xmit_hash_policy, + 'fail_over_mac': self.fail_over_mac, }) elif self.type == 'bond-slave': if self.slave_type and self.slave_type != 'bond': @@ -1960,6 +2075,12 @@ class Nmcli(object): options.update({ 'infiniband.transport-mode': self.transport_mode, }) + if self.infiniband_mac: + options['infiniband.mac-address'] = self.infiniband_mac + elif self.type == 'vrf': + options.update({ + 'table': self.table, + }) if self.type == 'ethernet': if self.sriov: @@ -2016,6 +2137,7 @@ class Nmcli(object): 'vpn', 'loopback', 'ovs-interface', + 'vrf' ) @property @@ -2145,7 +2267,7 @@ class Nmcli(object): @staticmethod def settings_type(setting): - if setting in ('bridge.stp', + if setting in {'bridge.stp', 'bridge-port.hairpin-mode', 'connection.autoconnect', 'ipv4.never-default', @@ -2155,9 +2277,9 @@ class Nmcli(object): 'ipv6.ignore-auto-dns', 'ipv6.ignore-auto-routes', '802-11-wireless.hidden', - 'team.runner-fast-rate'): + 'team.runner-fast-rate'}: return bool - elif setting in ('ipv4.addresses', + elif setting in {'ipv4.addresses', 'ipv6.addresses', 'ipv4.dns', 'ipv4.dns-search', @@ -2174,8 +2296,11 @@ class Nmcli(object): '802-11-wireless-security.proto', '802-11-wireless-security.psk-flags', '802-11-wireless-security.wep-key-flags', - '802-11-wireless.mac-address-blacklist'): + '802-11-wireless.mac-address-blacklist'}: return list + elif setting in {'connection.autoconnect-priority', + 'connection.autoconnect-retries'}: + return int return str def get_route_params(self, raw_values): @@ -2253,6 +2378,9 @@ class Nmcli(object): if key == 'xmit_hash_policy': cmd.extend(['+bond.options', 'xmit_hash_policy=%s' % value]) continue + if key == 'fail_over_mac': + cmd.extend(['+bond.options', 'fail_over_mac=%s' % value]) + continue cmd.extend([key, value]) return self.execute_command(cmd) @@ -2354,7 +2482,7 @@ class Nmcli(object): for line in out.splitlines(): prefix = '%s.' % setting - if (line.startswith(prefix)): + if line.startswith(prefix): pair = line.split(':', 1) property = pair[0].strip().replace(prefix, '') properties.append(property) @@ -2432,9 +2560,11 @@ class Nmcli(object): if isinstance(current_value, list) and isinstance(value, list): # compare values between two lists - if key in ('ipv4.addresses', 'ipv6.addresses'): + if key in ('ipv4.addresses', 'ipv6.addresses', 'ipv4.dns', 'ipv6.dns', 'ipv4.dns-search', 'ipv6.dns-search'): # The order of IP addresses matters because the first one # is the default source address for outbound connections. + # Similarly, the order of DNS nameservers and search + # suffixes is important. changed |= current_value != value else: changed |= sorted(current_value) != sorted(value) @@ -2480,11 +2610,13 @@ def main(): argument_spec=dict( ignore_unsupported_suboptions=dict(type='bool', default=False), autoconnect=dict(type='bool', default=True), + autoconnect_priority=dict(type='int'), + autoconnect_retries=dict(type='int'), state=dict(type='str', required=True, choices=['absent', 'present', 'up', 'down']), conn_name=dict(type='str', required=True), conn_reload=dict(type='bool', default=False), master=dict(type='str'), - slave_type=dict(type='str', choices=['bond', 'bridge', 'team', 'ovs-port']), + slave_type=dict(type='str', choices=['bond', 'bridge', 'team', 'ovs-port', 'vrf']), ifname=dict(type='str'), type=dict(type='str', choices=[ @@ -2512,6 +2644,7 @@ def main(): 'ovs-interface', 'ovs-bridge', 'ovs-port', + 'vrf', ]), ip4=dict(type='list', elements='str'), gw4=dict(type='str'), @@ -2569,6 +2702,7 @@ def main(): downdelay=dict(type='int'), updelay=dict(type='int'), xmit_hash_policy=dict(type='str'), + fail_over_mac=dict(type='str', choices=['none', 'active', 'follow']), arp_interval=dict(type='int'), arp_ip_target=dict(type='str'), primary=dict(type='str'), @@ -2622,13 +2756,21 @@ def main(): tap=dict(type='bool'))), wireguard=dict(type='dict'), vpn=dict(type='dict'), - transport_mode=dict(type='str', choices=['datagram', 'connected']), sriov=dict(type='dict'), + table=dict(type='int'), + # infiniband specific vars + transport_mode=dict(type='str', choices=['datagram', 'connected']), + infiniband_mac=dict(type='str'), + ), mutually_exclusive=[['never_default4', 'gw4'], ['routes4_extended', 'routes4'], ['routes6_extended', 'routes6']], - required_if=[("type", "wifi", [("ssid")])], + required_if=[ + ("type", "wifi", ["ssid"]), + ("type", "team-slave", ["master", "ifname"]), + ("slave_type", "team", ["master", "ifname"]), + ], supports_check_mode=True, ) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') @@ -2638,21 +2780,12 @@ def main(): (rc, out, err) = (None, '', '') result = {'conn_name': nmcli.conn_name, 'state': nmcli.state} - # check for issues - if nmcli.conn_name is None: - nmcli.module.fail_json(msg="Please specify a name for the connection") # team checks if nmcli.type == "team": if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup": nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup") if nmcli.runner_fast_rate is not None and nmcli.runner != "lacp": nmcli.module.fail_json(msg="runner-fast-rate is only allowed for runner lacp") - # team-slave checks - if nmcli.type == 'team-slave' or nmcli.slave_type == 'team': - if nmcli.master is None: - nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type) - if nmcli.ifname is None: - nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type) if nmcli.type == 'wifi': unsupported_properties = {} if nmcli.wifi: @@ -2675,7 +2808,7 @@ def main(): (rc, out, err) = nmcli.down_connection() (rc, out, err) = nmcli.remove_connection() if rc != 0: - module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) + module.fail_json(name=('Error removing connection named %s' % nmcli.conn_name), msg=err, rc=rc) elif nmcli.state == 'present': if nmcli.connection_exists(): @@ -2712,7 +2845,7 @@ def main(): (rc, out, err) = nmcli.reload_connection() (rc, out, err) = nmcli.up_connection() if rc != 0: - module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) + module.fail_json(name=('Error bringing up connection named %s' % nmcli.conn_name), msg=err, rc=rc) elif nmcli.state == 'down': if nmcli.connection_exists(): @@ -2722,7 +2855,7 @@ def main(): (rc, out, err) = nmcli.reload_connection() (rc, out, err) = nmcli.down_connection() if rc != 0: - module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) + module.fail_json(name=('Error bringing down connection named %s' % nmcli.conn_name), msg=err, rc=rc) except NmcliModuleError as e: module.fail_json(name=nmcli.conn_name, msg=str(e)) diff --git a/plugins/modules/nomad_job_info.py b/plugins/modules/nomad_job_info.py index 0a5c81cf15..b3703b64ce 100644 --- a/plugins/modules/nomad_job_info.py +++ b/plugins/modules/nomad_job_info.py @@ -49,219 +49,219 @@ EXAMPLES = r""" RETURN = r""" result: - description: List with dictionary contains jobs info - returned: success - type: list - sample: [ - { + description: List with dictionary contains jobs info. + returned: success + type: list + sample: + [ + { + "Affinities": null, + "AllAtOnce": false, + "Constraints": null, + "ConsulToken": "", + "CreateIndex": 13, + "Datacenters": [ + "dc1" + ], + "Dispatched": false, + "ID": "example", + "JobModifyIndex": 13, + "Meta": null, + "ModifyIndex": 13, + "Multiregion": null, + "Name": "example", + "Namespace": "default", + "NomadTokenID": "", + "ParameterizedJob": null, + "ParentID": "", + "Payload": null, + "Periodic": null, + "Priority": 50, + "Region": "global", + "Spreads": null, + "Stable": false, + "Status": "pending", + "StatusDescription": "", + "Stop": false, + "SubmitTime": 1602244370615307000, + "TaskGroups": [ + { "Affinities": null, - "AllAtOnce": false, "Constraints": null, - "ConsulToken": "", - "CreateIndex": 13, - "Datacenters": [ - "dc1" - ], - "Dispatched": false, - "ID": "example", - "JobModifyIndex": 13, - "Meta": null, - "ModifyIndex": 13, - "Multiregion": null, - "Name": "example", - "Namespace": "default", - "NomadTokenID": "", - "ParameterizedJob": null, - "ParentID": "", - "Payload": null, - "Periodic": null, - "Priority": 50, - "Region": "global", - "Spreads": null, - "Stable": false, - "Status": "pending", - "StatusDescription": "", - "Stop": false, - "SubmitTime": 1602244370615307000, - "TaskGroups": [ - { - "Affinities": null, - "Constraints": null, - "Count": 1, - "EphemeralDisk": { - "Migrate": false, - "SizeMB": 300, - "Sticky": false - }, - "Meta": null, - "Migrate": { - "HealthCheck": "checks", - "HealthyDeadline": 300000000000, - "MaxParallel": 1, - "MinHealthyTime": 10000000000 - }, - "Name": "cache", - "Networks": null, - "ReschedulePolicy": { - "Attempts": 0, - "Delay": 30000000000, - "DelayFunction": "exponential", - "Interval": 0, - "MaxDelay": 3600000000000, - "Unlimited": true - }, - "RestartPolicy": { - "Attempts": 3, - "Delay": 15000000000, - "Interval": 1800000000000, - "Mode": "fail" - }, - "Scaling": null, - "Services": null, - "ShutdownDelay": null, - "Spreads": null, - "StopAfterClientDisconnect": null, - "Tasks": [ - { - "Affinities": null, - "Artifacts": null, - "CSIPluginConfig": null, - "Config": { - "image": "redis:3.2", - "port_map": [ - { - "db": 6379.0 - } - ] - }, - "Constraints": null, - "DispatchPayload": null, - "Driver": "docker", - "Env": null, - "KillSignal": "", - "KillTimeout": 5000000000, - "Kind": "", - "Leader": false, - "Lifecycle": null, - "LogConfig": { - "MaxFileSizeMB": 10, - "MaxFiles": 10 - }, - "Meta": null, - "Name": "redis", - "Resources": { - "CPU": 500, - "Devices": null, - "DiskMB": 0, - "IOPS": 0, - "MemoryMB": 256, - "Networks": [ - { - "CIDR": "", - "DNS": null, - "Device": "", - "DynamicPorts": [ - { - "HostNetwork": "default", - "Label": "db", - "To": 0, - "Value": 0 - } - ], - "IP": "", - "MBits": 10, - "Mode": "", - "ReservedPorts": null - } - ] - }, - "RestartPolicy": { - "Attempts": 3, - "Delay": 15000000000, - "Interval": 1800000000000, - "Mode": "fail" - }, - "Services": [ - { - "AddressMode": "auto", - "CanaryMeta": null, - "CanaryTags": null, - "Checks": [ - { - "AddressMode": "", - "Args": null, - "CheckRestart": null, - "Command": "", - "Expose": false, - "FailuresBeforeCritical": 0, - "GRPCService": "", - "GRPCUseTLS": false, - "Header": null, - "InitialStatus": "", - "Interval": 10000000000, - "Method": "", - "Name": "alive", - "Path": "", - "PortLabel": "", - "Protocol": "", - "SuccessBeforePassing": 0, - "TLSSkipVerify": false, - "TaskName": "", - "Timeout": 2000000000, - "Type": "tcp" - } - ], - "Connect": null, - "EnableTagOverride": false, - "Meta": null, - "Name": "redis-cache", - "PortLabel": "db", - "Tags": [ - "global", - "cache" - ], - "TaskName": "" - } - ], - "ShutdownDelay": 0, - "Templates": null, - "User": "", - "Vault": null, - "VolumeMounts": null - } - ], - "Update": { - "AutoPromote": false, - "AutoRevert": false, - "Canary": 0, - "HealthCheck": "checks", - "HealthyDeadline": 180000000000, - "MaxParallel": 1, - "MinHealthyTime": 10000000000, - "ProgressDeadline": 600000000000, - "Stagger": 30000000000 - }, - "Volumes": null - } - ], - "Type": "service", - "Update": { - "AutoPromote": false, - "AutoRevert": false, - "Canary": 0, - "HealthCheck": "", - "HealthyDeadline": 0, - "MaxParallel": 1, - "MinHealthyTime": 0, - "ProgressDeadline": 0, - "Stagger": 30000000000 + "Count": 1, + "EphemeralDisk": { + "Migrate": false, + "SizeMB": 300, + "Sticky": false }, - "VaultNamespace": "", - "VaultToken": "", - "Version": 0 - } + "Meta": null, + "Migrate": { + "HealthCheck": "checks", + "HealthyDeadline": 300000000000, + "MaxParallel": 1, + "MinHealthyTime": 10000000000 + }, + "Name": "cache", + "Networks": null, + "ReschedulePolicy": { + "Attempts": 0, + "Delay": 30000000000, + "DelayFunction": "exponential", + "Interval": 0, + "MaxDelay": 3600000000000, + "Unlimited": true + }, + "RestartPolicy": { + "Attempts": 3, + "Delay": 15000000000, + "Interval": 1800000000000, + "Mode": "fail" + }, + "Scaling": null, + "Services": null, + "ShutdownDelay": null, + "Spreads": null, + "StopAfterClientDisconnect": null, + "Tasks": [ + { + "Affinities": null, + "Artifacts": null, + "CSIPluginConfig": null, + "Config": { + "image": "redis:3.2", + "port_map": [ + { + "db": 6379.0 + } + ] + }, + "Constraints": null, + "DispatchPayload": null, + "Driver": "docker", + "Env": null, + "KillSignal": "", + "KillTimeout": 5000000000, + "Kind": "", + "Leader": false, + "Lifecycle": null, + "LogConfig": { + "MaxFileSizeMB": 10, + "MaxFiles": 10 + }, + "Meta": null, + "Name": "redis", + "Resources": { + "CPU": 500, + "Devices": null, + "DiskMB": 0, + "IOPS": 0, + "MemoryMB": 256, + "Networks": [ + { + "CIDR": "", + "DNS": null, + "Device": "", + "DynamicPorts": [ + { + "HostNetwork": "default", + "Label": "db", + "To": 0, + "Value": 0 + } + ], + "IP": "", + "MBits": 10, + "Mode": "", + "ReservedPorts": null + } + ] + }, + "RestartPolicy": { + "Attempts": 3, + "Delay": 15000000000, + "Interval": 1800000000000, + "Mode": "fail" + }, + "Services": [ + { + "AddressMode": "auto", + "CanaryMeta": null, + "CanaryTags": null, + "Checks": [ + { + "AddressMode": "", + "Args": null, + "CheckRestart": null, + "Command": "", + "Expose": false, + "FailuresBeforeCritical": 0, + "GRPCService": "", + "GRPCUseTLS": false, + "Header": null, + "InitialStatus": "", + "Interval": 10000000000, + "Method": "", + "Name": "alive", + "Path": "", + "PortLabel": "", + "Protocol": "", + "SuccessBeforePassing": 0, + "TLSSkipVerify": false, + "TaskName": "", + "Timeout": 2000000000, + "Type": "tcp" + } + ], + "Connect": null, + "EnableTagOverride": false, + "Meta": null, + "Name": "redis-cache", + "PortLabel": "db", + "Tags": [ + "global", + "cache" + ], + "TaskName": "" + } + ], + "ShutdownDelay": 0, + "Templates": null, + "User": "", + "Vault": null, + "VolumeMounts": null + } + ], + "Update": { + "AutoPromote": false, + "AutoRevert": false, + "Canary": 0, + "HealthCheck": "checks", + "HealthyDeadline": 180000000000, + "MaxParallel": 1, + "MinHealthyTime": 10000000000, + "ProgressDeadline": 600000000000, + "Stagger": 30000000000 + }, + "Volumes": null + } + ], + "Type": "service", + "Update": { + "AutoPromote": false, + "AutoRevert": false, + "Canary": 0, + "HealthCheck": "", + "HealthyDeadline": 0, + "MaxParallel": 1, + "MinHealthyTime": 0, + "ProgressDeadline": 0, + "Stagger": 30000000000 + }, + "VaultNamespace": "", + "VaultToken": "", + "Version": 0 + } ] - """ from ansible.module_utils.basic import AnsibleModule, missing_required_lib diff --git a/plugins/modules/nomad_token.py b/plugins/modules/nomad_token.py index 07abd9d7c3..c189bf4b85 100644 --- a/plugins/modules/nomad_token.py +++ b/plugins/modules/nomad_token.py @@ -98,25 +98,26 @@ EXAMPLES = r""" RETURN = r""" result: - description: Result returned by nomad. - returned: always - type: dict - sample: { - "accessor_id": "0d01c55f-8d63-f832-04ff-1866d4eb594e", - "create_index": 14, - "create_time": "2023-11-12T18:48:34.248857001Z", - "expiration_time": null, - "expiration_ttl": "", - "global": true, - "hash": "eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=", - "modify_index": 836, - "name": "devs", - "policies": [ - "readonly" - ], - "roles": null, - "secret_id": "12e878ab-e1f6-e103-b4c4-3b5173bb4cea", - "type": "client" + description: Result returned by nomad. + returned: always + type: dict + sample: + { + "accessor_id": "0d01c55f-8d63-f832-04ff-1866d4eb594e", + "create_index": 14, + "create_time": "2023-11-12T18:48:34.248857001Z", + "expiration_time": null, + "expiration_ttl": "", + "global": true, + "hash": "eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=", + "modify_index": 836, + "name": "devs", + "policies": [ + "readonly" + ], + "roles": null, + "secret_id": "12e878ab-e1f6-e103-b4c4-3b5173bb4cea", + "type": "client" } """ diff --git a/plugins/modules/nosh.py b/plugins/modules/nosh.py index e3e73e0b93..7cd4f4ad66 100644 --- a/plugins/modules/nosh.py +++ b/plugins/modules/nosh.py @@ -35,22 +35,22 @@ options: required: false choices: [started, stopped, reset, restarted, reloaded] description: - - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. - - V(restarted) will always bounce the service. - - V(reloaded) will send a SIGHUP or start the service. - - V(reset) will start or stop the service according to whether it is enabled or not. + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the service. + - V(reloaded) sends a SIGHUP or starts the service. + - V(reset) starts or stops the service according to whether it is enabled or not. enabled: required: false type: bool description: - - Enable or disable the service, independently of C(*.preset) file preference or running state. Mutually exclusive with O(preset). Will - take effect prior to O(state=reset). + - Enable or disable the service, independently of C(*.preset) file preference or running state. Mutually exclusive with + O(preset). It takes effect prior to O(state=reset). preset: required: false type: bool description: - - Enable or disable the service according to local preferences in C(*.preset) files. Mutually exclusive with O(enabled). Only has an effect - if set to true. Will take effect prior to O(state=reset). + - Enable or disable the service according to local preferences in C(*.preset) files. Mutually exclusive with O(enabled). + Only has an effect if set to true. It takes effect prior to O(state=reset). user: required: false default: false @@ -145,7 +145,8 @@ state: type: str sample: "reloaded" status: - description: A dictionary with the key=value pairs returned by C(system-control show-json) or V(none) if the service is not loaded. + description: A dictionary with the key=value pairs returned by C(system-control show-json) or V(none) if the service is + not loaded. returned: success type: complex contains: diff --git a/plugins/modules/nsupdate.py b/plugins/modules/nsupdate.py index 9f665626b2..4049996ca3 100644 --- a/plugins/modules/nsupdate.py +++ b/plugins/modules/nsupdate.py @@ -63,8 +63,8 @@ options: type: str zone: description: - - DNS record will be modified on this O(zone). - - When omitted DNS will be queried to attempt finding the correct zone. + - DNS record is modified on this O(zone). + - When omitted, DNS is queried to attempt finding the correct zone. type: str record: description: @@ -144,10 +144,6 @@ EXAMPLES = r""" """ RETURN = r""" -changed: - description: If module has modified record. - returned: success - type: str record: description: DNS record. returned: success @@ -476,18 +472,18 @@ def main(): module = AnsibleModule( argument_spec=dict( - state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str'), server=dict(required=True, type='str'), - port=dict(required=False, default=53, type='int'), - key_name=dict(required=False, type='str'), - key_secret=dict(required=False, type='str', no_log=True), - key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'), - zone=dict(required=False, default=None, type='str'), + port=dict(default=53, type='int'), + key_name=dict(type='str'), + key_secret=dict(type='str', no_log=True), + key_algorithm=dict(default='hmac-md5', choices=tsig_algs, type='str'), + zone=dict(type='str'), record=dict(required=True, type='str'), - type=dict(required=False, default='A', type='str'), - ttl=dict(required=False, default=3600, type='int'), - value=dict(required=False, default=None, type='list', elements='str'), - protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str') + type=dict(default='A', type='str'), + ttl=dict(default=3600, type='int'), + value=dict(type='list', elements='str'), + protocol=dict(default='tcp', choices=['tcp', 'udp'], type='str') ), supports_check_mode=True ) diff --git a/plugins/modules/ocapi_command.py b/plugins/modules/ocapi_command.py index 001d113ca3..39269c99cb 100644 --- a/plugins/modules/ocapi_command.py +++ b/plugins/modules/ocapi_command.py @@ -154,7 +154,8 @@ msg: sample: "Action was successful" jobUri: - description: URI to use to monitor status of the operation. Returned for async commands such as Firmware Update, Firmware Activate. + description: URI to use to monitor status of the operation. Returned for async commands such as Firmware Update, Firmware + Activate. returned: when supported type: str sample: "https://ioma.wdc.com/Storage/Devices/openflex-data24-usalp03020qb0003/Jobs/FirmwareUpdate/" diff --git a/plugins/modules/ocapi_info.py b/plugins/modules/ocapi_info.py index f2d708965c..150b3ad7e2 100644 --- a/plugins/modules/ocapi_info.py +++ b/plugins/modules/ocapi_info.py @@ -93,7 +93,8 @@ operationStatus: sample: "Activate needed" operationStatusId: - description: Integer value of status (corresponds to operationStatus). Applies to O(command=JobStatus). See OCAPI documentation for details. + description: Integer value of status (corresponds to operationStatus). Applies to O(command=JobStatus). See OCAPI documentation + for details. returned: when supported type: int sample: 65540 @@ -106,8 +107,8 @@ operationHealth: operationHealthId: description: >- - Integer value for health of the operation (corresponds to RV(operationHealth)). Applies to O(command=JobStatus). See OCAPI documentation for - details. + Integer value for health of the operation (corresponds to RV(operationHealth)). Applies to O(command=JobStatus). See OCAPI + documentation for details. returned: when supported type: str sample: "OK" @@ -119,24 +120,25 @@ details: elements: str status: - description: Dictionary containing status information. See OCAPI documentation for details. - returned: when supported - type: dict - sample: { - "Details": [ - "None" - ], - "Health": [ - { - "ID": 5, - "Name": "OK" - } - ], - "State": { - "ID": 16, - "Name": "In service" - } - } + description: Dictionary containing status information. See OCAPI documentation for details. + returned: when supported + type: dict + sample: + { + "Details": [ + "None" + ], + "Health": [ + { + "ID": 5, + "Name": "OK" + } + ], + "State": { + "ID": 16, + "Name": "In service" + } + } """ from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/oci_vcn.py b/plugins/modules/oci_vcn.py index 9503698fd7..56a637ac2c 100644 --- a/plugins/modules/oci_vcn.py +++ b/plugins/modules/oci_vcn.py @@ -12,8 +12,8 @@ DOCUMENTATION = r""" module: oci_vcn short_description: Manage Virtual Cloud Networks(VCN) in OCI description: - - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. The complete Oracle Cloud Infrastructure Ansible - Modules can be downloaded from U(https://github.com/oracle/oci-ansible-modules/releases). + - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. The complete Oracle Cloud + Infrastructure Ansible Modules can be downloaded from U(https://github.com/oracle/oci-ansible-modules/releases). attributes: check_mode: support: none @@ -25,18 +25,18 @@ options: type: str required: false compartment_id: - description: The OCID of the compartment to contain the VCN. Required when creating a VCN with O(state=present). This option is mutually exclusive - with O(vcn_id). + description: The OCID of the compartment to contain the VCN. Required when creating a VCN with O(state=present). This + option is mutually exclusive with O(vcn_id). type: str display_name: description: A user-friendly name. Does not have to be unique, and it is changeable. type: str aliases: ['name'] dns_label: - description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to form a fully qualified domain - name (FQDN) for each VNIC within this subnet (for example, V(bminstance-1.subnet123.vcn1.oraclevcn.com)). Not required to be unique, but it is - a best practice to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins with a letter. The value cannot - be changed. + description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to form a fully + qualified domain name (FQDN) for each VNIC within this subnet (for example, V(bminstance-1.subnet123.vcn1.oraclevcn.com)). + Not required to be unique, but it is a best practice to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric + string that begins with a letter. The value cannot be changed. type: str state: description: Create or update a VCN with O(state=present). Use O(state=absent) to delete a VCN. @@ -44,8 +44,8 @@ options: default: present choices: ['present', 'absent'] vcn_id: - description: The OCID of the VCN. Required when deleting a VCN with O(state=absent) or updating a VCN with O(state=present). This option is - mutually exclusive with O(compartment_id). + description: The OCID of the VCN. Required when deleting a VCN with O(state=absent) or updating a VCN with O(state=present). + This option is mutually exclusive with O(compartment_id). type: str aliases: ['id'] author: "Rohit Chaware (@rohitChaware)" @@ -78,22 +78,23 @@ EXAMPLES = r""" RETURN = r""" vcn: - description: Information about the VCN - returned: On successful create and update operation - type: dict - sample: { - "cidr_block": "10.0.0.0/16", - compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx", - "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx", - "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx", - "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx", - "display_name": "ansible_vcn", - "dns_label": "ansiblevcn", - "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx", - "lifecycle_state": "AVAILABLE", - "time_created": "2017-11-13T20:22:40.626000+00:00", - "vcn_domain_name": "ansiblevcn.oraclevcn.com" - } + description: Information about the VCN. + returned: On successful create and update operation + type: dict + sample: + { + "cidr_block": "10.0.0.0/16", + "compartment_id\"": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx", + "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx", + "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx", + "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx", + "display_name": "ansible_vcn", + "dns_label": "ansiblevcn", + "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx", + "lifecycle_state": "AVAILABLE", + "time_created": "2017-11-13T20:22:40.626000+00:00", + "vcn_domain_name": "ansiblevcn.oraclevcn.com" + } """ from ansible.module_utils.basic import AnsibleModule, missing_required_lib @@ -161,17 +162,12 @@ def main(): ) module_args.update( dict( - cidr_block=dict(type="str", required=False), - compartment_id=dict(type="str", required=False), - display_name=dict(type="str", required=False, aliases=["name"]), - dns_label=dict(type="str", required=False), - state=dict( - type="str", - required=False, - default="present", - choices=["absent", "present"], - ), - vcn_id=dict(type="str", required=False, aliases=["id"]), + cidr_block=dict(type="str"), + compartment_id=dict(type="str"), + display_name=dict(type="str", aliases=["name"]), + dns_label=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), + vcn_id=dict(type="str", aliases=["id"]), ) ) diff --git a/plugins/modules/odbc.py b/plugins/modules/odbc.py index 54c923cf1e..41b5df4f08 100644 --- a/plugins/modules/odbc.py +++ b/plugins/modules/odbc.py @@ -66,6 +66,7 @@ EXAMPLES = r""" changed_when: false """ +# @FIXME RV 'results' is meant to be used when 'loop:' was used with the module. RETURN = r""" results: description: List of lists of strings containing selected rows, likely empty for DDL statements. diff --git a/plugins/modules/office_365_connector_card.py b/plugins/modules/office_365_connector_card.py index 949fdcdce4..6b8384a7ca 100644 --- a/plugins/modules/office_365_connector_card.py +++ b/plugins/modules/office_365_connector_card.py @@ -13,10 +13,11 @@ module: office_365_connector_card short_description: Use webhooks to create Connector Card messages within an Office 365 group description: - Creates Connector Card messages through Office 365 Connectors. - - See U(https://learn.microsoft.com/en-us/microsoftteams/platform/task-modules-and-cards/cards/cards-reference#connector-card-for-microsoft-365-groups). + - See + U(https://learn.microsoft.com/en-us/microsoftteams/platform/task-modules-and-cards/cards/cards-reference#connector-card-for-microsoft-365-groups). author: "Marc Sensenich (@marc-sensenich)" notes: - - This module is not idempotent, therefore if the same task is run twice there will be two Connector Cards created. + - This module is not idempotent, therefore if you run the same task twice then you create two Connector Cards. extends_documentation_fragment: - community.general.attributes attributes: @@ -34,7 +35,7 @@ options: type: str description: - A string used for summarizing card content. - - This will be shown as the message subject. + - This is the message subject. - This is required if the text parameter is not populated. color: type: str @@ -48,13 +49,13 @@ options: type: str description: - The main text of the card. - - This will be rendered below the sender information and optional title, + - This is rendered below the sender information and optional title, - And above any sections or actions present. actions: type: list elements: dict description: - - This array of objects will power the action links found at the bottom of the card. + - This array of objects is used to power the action links found at the bottom of the card. sections: type: list elements: dict @@ -74,8 +75,8 @@ EXAMPLES = r""" webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID summary: This is the summary property title: This is the **card's title** property - text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut - labore et dolore magna aliqua. + text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod + tempor incididunt ut labore et dolore magna aliqua. color: E81123 sections: - title: This is the **section's title** property @@ -86,8 +87,8 @@ EXAMPLES = r""" hero_image: image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg title: This is the image's alternate text - text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt - ut labore et dolore magna aliqua. + text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod + tempor incididunt ut labore et dolore magna aliqua. facts: - name: This is a fact name value: This is a fact value diff --git a/plugins/modules/ohai.py b/plugins/modules/ohai.py index 9a182e99a5..32b14b2e81 100644 --- a/plugins/modules/ohai.py +++ b/plugins/modules/ohai.py @@ -13,8 +13,8 @@ DOCUMENTATION = r""" module: ohai short_description: Returns inventory data from I(Ohai) description: - - Similar to the M(community.general.facter) module, this runs the I(Ohai) discovery program (U(https://docs.chef.io/ohai.html)) on the remote - host and returns JSON inventory data. I(Ohai) data is a bit more verbose and nested than I(facter). + - Similar to the M(community.general.facter) module, this runs the I(Ohai) discovery program (U(https://docs.chef.io/ohai.html)) + on the remote host and returns JSON inventory data. I(Ohai) data is a bit more verbose and nested than I(facter). extends_documentation_fragment: - community.general.attributes attributes: diff --git a/plugins/modules/one_host.py b/plugins/modules/one_host.py index 8e7e4b92ad..7c43da3058 100644 --- a/plugins/modules/one_host.py +++ b/plugins/modules/one_host.py @@ -36,8 +36,8 @@ options: state: description: - Takes the host to the desired lifecycle state. - - If V(absent) the host will be deleted from the cluster. - - If V(present) the host will be created in the cluster (includes V(enabled), V(disabled) and V(offline) states). + - If V(absent) the host is deleted from the cluster. + - If V(present) the host is created in the cluster (includes V(enabled), V(disabled) and V(offline) states). - If V(enabled) the host is fully operational. - V(disabled), for example to perform maintenance operations. - V(offline), host is totally offline. @@ -56,7 +56,8 @@ options: type: str vmm_mad_name: description: - - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name). + - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD + (name). default: kvm type: str cluster_id: diff --git a/plugins/modules/one_image.py b/plugins/modules/one_image.py index 399cfadec8..d9a21f86b7 100644 --- a/plugins/modules/one_image.py +++ b/plugins/modules/one_image.py @@ -48,8 +48,8 @@ options: type: bool new_name: description: - - A name that will be assigned to the existing or new image. - - In the case of cloning, by default O(new_name) will take the name of the origin image with the prefix 'Copy of'. + - A name that is assigned to the existing or new image. + - In the case of cloning, by default O(new_name) is set to the name of the origin image with the prefix 'Copy of'. type: str persistent: description: @@ -157,7 +157,7 @@ EXAMPLES = r""" RETURN = r""" id: - description: Image id. + description: Image ID. type: int returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: 153 @@ -167,7 +167,7 @@ name: returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: app1 group_id: - description: Image's group id. + description: Image's group ID. type: int returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: 1 @@ -177,7 +177,7 @@ group_name: returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: one-users owner_id: - description: Image's owner id. + description: Image's owner ID. type: int returned: when O(state=present), O(state=cloned), or O(state=renamed) sample: 143 @@ -325,7 +325,7 @@ datastore: returned: when O(state=present), O(state=cloned), or O(state=renamed) version_added: 9.5.0 vms: - description: The image's list of vm ID's. + description: The image's list of VM ID's. type: list elements: int returned: when O(state=present), O(state=cloned), or O(state=renamed) diff --git a/plugins/modules/one_image_info.py b/plugins/modules/one_image_info.py index af47e16bdb..f940444cad 100644 --- a/plugins/modules/one_image_info.py +++ b/plugins/modules/one_image_info.py @@ -22,17 +22,17 @@ extends_documentation_fragment: options: ids: description: - - A list of images ids whose facts you want to gather. + - A list of images IDs whose facts you want to gather. - Module can use integers too. aliases: ['id'] type: list elements: str name: description: - - A O(name) of the image whose facts will be gathered. - - If the O(name) begins with V(~) the O(name) will be used as regex pattern, - which restricts the list of images (whose facts will be returned) whose names match specified regex. - - Also, if the O(name) begins with V(~*) case-insensitive matching will be performed. + - A O(name) of the image whose facts is gathered. + - If the O(name) begins with V(~) the O(name) is used as regex pattern, which restricts the list of images (whose facts + is returned) whose names match specified regex. + - Also, if the O(name) begins with V(~*) case-insensitive matching is performed. - See examples for more details. type: str author: @@ -84,7 +84,7 @@ images: returned: success contains: id: - description: The image's id. + description: The image's ID. type: int sample: 153 name: @@ -92,7 +92,7 @@ images: type: str sample: app1 group_id: - description: The image's group id. + description: The image's group ID. type: int sample: 1 group_name: @@ -100,7 +100,7 @@ images: type: str sample: one-users owner_id: - description: The image's owner id. + description: The image's owner ID. type: int sample: 143 owner_name: @@ -231,7 +231,7 @@ images: sample: image_datastore version_added: 9.5.0 vms: - description: The image's list of vm ID's. + description: The image's list of VM ID's. type: list elements: int version_added: 9.5.0 @@ -281,8 +281,8 @@ IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', class ImageInfoModule(OpenNebulaModule): def __init__(self): argument_spec = dict( - ids=dict(type='list', aliases=['id'], elements='str', required=False), - name=dict(type='str', required=False), + ids=dict(type='list', aliases=['id'], elements='str'), + name=dict(type='str'), ) mutually_exclusive = [ ['ids', 'name'], diff --git a/plugins/modules/one_service.py b/plugins/modules/one_service.py index b9146eca2a..88ccd29d74 100644 --- a/plugins/modules/one_service.py +++ b/plugins/modules/one_service.py @@ -29,13 +29,13 @@ options: type: str api_username: description: - - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_USERNAME) environment variable - is used. + - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_USERNAME) + environment variable is used. type: str api_password: description: - - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_PASSWORD) environment variable - is used. + - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_PASSWORD) + environment variable is used. type: str template_name: description: @@ -55,8 +55,8 @@ options: type: str unique: description: - - Setting O(unique=true) will make sure that there is only one service instance running with a name set with O(service_name) when instantiating - a service from a template specified with O(template_id) or O(template_name). Check examples below. + - Setting O(unique=true) ensures that there is only one service instance running with a name set with O(service_name) + when instantiating a service from a template specified with O(template_id) or O(template_name). Check examples below. type: bool default: false state: @@ -68,16 +68,16 @@ options: type: str mode: description: - - Set permission mode of a service instance in octet format, for example V(0600) to give owner C(use) and C(manage) and nothing to group - and others. + - Set permission mode of a service instance in octet format, for example V(0600) to give owner C(use) and C(manage) + and nothing to group and others. type: str owner_id: description: - - ID of the user which will be set as the owner of the service. + - ID of the user which is set as the owner of the service. type: int group_id: description: - - ID of the group which will be set as the group of the service. + - ID of the group which is set as the group of the service. type: int wait: description: @@ -91,7 +91,7 @@ options: type: int custom_attrs: description: - - Dictionary of key/value custom attributes which will be used when instantiating a new service. + - Dictionary of key/value custom attributes which is used when instantiating a new service. default: {} type: dict role: @@ -184,7 +184,7 @@ EXAMPLES = r""" RETURN = r""" service_id: - description: Service id. + description: Service ID. type: int returned: success sample: 153 @@ -194,7 +194,7 @@ service_name: returned: success sample: app1 group_id: - description: Service's group id. + description: Service's group ID. type: int returned: success sample: 1 @@ -204,7 +204,7 @@ group_name: returned: success sample: one-users owner_id: - description: Service's owner id. + description: Service's owner ID. type: int returned: success sample: 143 @@ -224,7 +224,7 @@ mode: returned: success sample: 660 roles: - description: List of dictionaries of roles, each role is described by name, cardinality, state and nodes ids. + description: List of dictionaries of roles, each role is described by name, cardinality, state and nodes IDs. type: list returned: success sample: diff --git a/plugins/modules/one_template.py b/plugins/modules/one_template.py index 28f4f14cd3..091c4c55a7 100644 --- a/plugins/modules/one_template.py +++ b/plugins/modules/one_template.py @@ -25,19 +25,20 @@ attributes: check_mode: support: partial details: - - Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually change. + - Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually + change. diff_mode: support: none options: id: description: - - A O(id) of the template you would like to manage. If not set then a new template will be created with the given O(name). + - A O(id) of the template you would like to manage. If not set then a new template is created with the given O(name). type: int name: description: - - A O(name) of the template you would like to manage. - If a template with the given name does not exist it will be created, otherwise it will be managed by this module. + - A O(name) of the template you would like to manage. If a template with the given name does not exist it is created, + otherwise it is managed by this module. type: str template: description: @@ -50,6 +51,16 @@ options: choices: ["present", "absent"] default: present type: str + filter: + description: + - V(user_primary_group) - Resources belonging to the user's primary group. + - V(user) - Resources belonging to the user. + - V(all) - All resources. + - V(user_groups) - Resources belonging to the user and any of his groups. + choices: [user_primary_group, user, all, user_groups] + default: user + type: str + version_added: 10.3.0 extends_documentation_fragment: - community.general.opennebula @@ -110,7 +121,7 @@ EXAMPLES = r""" RETURN = r""" id: - description: Template id. + description: Template ID. type: int returned: when O(state=present) sample: 153 @@ -124,7 +135,7 @@ template: type: dict returned: when O(state=present) group_id: - description: Template's group id. + description: Template's group ID. type: int returned: when O(state=present) sample: 1 @@ -134,7 +145,7 @@ group_name: returned: when O(state=present) sample: one-users owner_id: - description: Template's owner id. + description: Template's owner ID. type: int returned: when O(state=present) sample: 143 @@ -152,10 +163,11 @@ from ansible_collections.community.general.plugins.module_utils.opennebula impor class TemplateModule(OpenNebulaModule): def __init__(self): argument_spec = dict( - id=dict(type='int', required=False), - name=dict(type='str', required=False), + id=dict(type='int'), + name=dict(type='str'), state=dict(type='str', choices=['present', 'absent'], default='present'), - template=dict(type='str', required=False), + template=dict(type='str'), + filter=dict(type='str', choices=['user_primary_group', 'user', 'all', 'user_groups'], default='user'), ) mutually_exclusive = [ @@ -181,10 +193,11 @@ class TemplateModule(OpenNebulaModule): name = params.get('name') desired_state = params.get('state') template_data = params.get('template') + filter = params.get('filter') self.result = {} - template = self.get_template_instance(id, name) + template = self.get_template_instance(id, name, filter) needs_creation = False if not template and desired_state != 'absent': if id: @@ -196,16 +209,19 @@ class TemplateModule(OpenNebulaModule): self.result = self.delete_template(template) else: if needs_creation: - self.result = self.create_template(name, template_data) + self.result = self.create_template(name, template_data, filter) else: - self.result = self.update_template(template, template_data) + self.result = self.update_template(template, template_data, filter) self.exit() - def get_template(self, predicate): - # -3 means "Resources belonging to the user" + def get_template(self, predicate, filter): + # filter was included, for discussions see: + # Issue: https://github.com/ansible-collections/community.general/issues/9278 + # PR: https://github.com/ansible-collections/community.general/pull/9547 # the other two parameters are used for pagination, -1 for both essentially means "return all" - pool = self.one.templatepool.info(-3, -1, -1) + filter_values = {'user_primary_group': -4, 'user': -3, 'all': -2, 'user_groups': -1} + pool = self.one.templatepool.info(filter_values[filter], -1, -1) for template in pool.VMTEMPLATE: if predicate(template): @@ -213,17 +229,17 @@ class TemplateModule(OpenNebulaModule): return None - def get_template_by_id(self, template_id): - return self.get_template(lambda template: (template.ID == template_id)) + def get_template_by_id(self, template_id, filter): + return self.get_template(lambda template: (template.ID == template_id), filter) - def get_template_by_name(self, name): - return self.get_template(lambda template: (template.NAME == name)) + def get_template_by_name(self, name, filter): + return self.get_template(lambda template: (template.NAME == name), filter) - def get_template_instance(self, requested_id, requested_name): + def get_template_instance(self, requested_id, requested_name, filter): if requested_id: - return self.get_template_by_id(requested_id) + return self.get_template_by_id(requested_id, filter) else: - return self.get_template_by_name(requested_name) + return self.get_template_by_name(requested_name, filter) def get_template_info(self, template): info = { @@ -238,21 +254,21 @@ class TemplateModule(OpenNebulaModule): return info - def create_template(self, name, template_data): + def create_template(self, name, template_data, filter): if not self.module.check_mode: self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data) - result = self.get_template_info(self.get_template_by_name(name)) + result = self.get_template_info(self.get_template_by_name(name, filter)) result['changed'] = True return result - def update_template(self, template, template_data): + def update_template(self, template, template_data, filter): if not self.module.check_mode: # 0 = replace the whole template self.one.template.update(template.ID, template_data, 0) - result = self.get_template_info(self.get_template_by_id(template.ID)) + result = self.get_template_info(self.get_template_by_id(template.ID, filter)) if self.module.check_mode: # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. result['changed'] = True diff --git a/plugins/modules/one_vm.py b/plugins/modules/one_vm.py index 10e4b324ee..3d23efa036 100644 --- a/plugins/modules/one_vm.py +++ b/plugins/modules/one_vm.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # Copyright (c) 2017, Milan Ilic # Copyright (c) 2019, Jan Meerkamp +# Copyright (c) 2025, Tom Paine # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -32,12 +33,14 @@ options: type: str api_username: description: - - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment variable is used. + - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment + variable is used. type: str api_password: description: - - Password of the user to login into OpenNebula RPC server. If not set then the value of the E(ONE_PASSWORD) environment variable is used. - if both O(api_username) or O(api_password) are not set, then it will try authenticate with ONE auth file. Default path is "~/.one/one_auth". + - Password of the user to login into OpenNebula RPC server. If not set then the value of the E(ONE_PASSWORD) environment + variable is used. if both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth + file. Default path is C(~/.one/one_auth). - Set environment variable E(ONE_AUTH) to override this path. type: str template_name: @@ -50,12 +53,12 @@ options: type: int vm_start_on_hold: description: - - Set to true to put vm on hold while creating. + - Set to true to put VM on hold while creating. default: false type: bool instance_ids: description: - - 'A list of instance ids used for states: V(absent), V(running), V(rebooted), V(poweredoff).' + - 'A list of instance IDs used for states: V(absent), V(running), V(rebooted), V(poweredoff).' aliases: ['ids'] type: list elements: int @@ -76,9 +79,9 @@ options: type: bool wait: description: - - Wait for the instance to reach its desired state before returning. Keep in mind if you are waiting for instance to be in running state - it does not mean that you will be able to SSH on that machine only that boot process have started on that instance, see 'wait_for' example - for details. + - Wait for the instance to reach its desired state before returning. Keep in mind if you are waiting for instance to + be in running state it does not mean that you are able to SSH on that machine only that boot process have started + on that instance. See the example using the M(ansible.builtin.wait_for) module for details. default: true type: bool wait_timeout: @@ -91,9 +94,9 @@ options: - A dictionary of key/value attributes to add to new instances, or for setting C(state) of instances with these attributes. - Keys are case insensitive and OpenNebula automatically converts them to upper case. - Be aware V(NAME) is a special attribute which sets the name of the VM when it is deployed. - - C(#) character(s) can be appended to the C(NAME) and the module will automatically add indexes to the names of VMs. + - C(#) character(s) can be appended to the C(NAME) and the module automatically adds indexes to the names of VMs. - 'For example: V(NAME: foo-###) would create VMs with names V(foo-000), V(foo-001),...' - - When used with O(count_attributes) and O(exact_count) the module will match the base name without the index part. + - When used with O(count_attributes) and O(exact_count) the module matches the base name without the index part. default: {} type: dict labels: @@ -104,13 +107,14 @@ options: elements: str count_attributes: description: - - A dictionary of key/value attributes that can only be used with O(exact_count) to determine how many nodes based on a specific attributes - criteria should be deployed. This can be expressed in multiple ways and is shown in the EXAMPLES section. + - A dictionary of key/value attributes that can only be used with O(exact_count) to determine how many nodes based on + a specific attributes criteria should be deployed. This can be expressed in multiple ways and is shown in the EXAMPLES + section. type: dict count_labels: description: - - A list of labels that can only be used with O(exact_count) to determine how many nodes based on a specific labels criteria should be deployed. - This can be expressed in multiple ways and is shown in the EXAMPLES section. + - A list of labels that can only be used with O(exact_count) to determine how many nodes based on a specific labels + criteria should be deployed. This can be expressed in multiple ways and is shown in the EXAMPLES section. type: list elements: str count: @@ -120,21 +124,22 @@ options: type: int exact_count: description: - - Indicates how many instances that match O(count_attributes) and O(count_labels) parameters should be deployed. Instances are either created - or terminated based on this value. - - B(NOTE:) Instances with the least IDs will be terminated first. + - Indicates how many instances that match O(count_attributes) and O(count_labels) parameters should be deployed. Instances + are either created or terminated based on this value. + - B(NOTE:) Instances with the least IDs are terminated first. type: int mode: description: - - Set permission mode of the instance in octet format, for example V(0600) to give owner C(use) and C(manage) and nothing to group and others. + - Set permission mode of the instance in octet format, for example V(0600) to give owner C(use) and C(manage) and nothing + to group and others. type: str owner_id: description: - - ID of the user which will be set as the owner of the instance. + - ID of the user which is set as the owner of the instance. type: int group_id: description: - - ID of the group which will be set as the group of the instance. + - ID of the group which is set as the group of the instance. type: int memory: description: @@ -152,7 +157,7 @@ options: type: float vcpu: description: - - Number of CPUs (cores) new VM will have. + - Number of CPUs (cores) the new VM uses. type: int networks: description: @@ -165,9 +170,9 @@ options: - Creates an image from a VM disk. - It is a dictionary where you have to specify C(name) of the new image. - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0. - - B(NOTE:) This operation will only be performed on the first VM (if more than one VM ID is passed) and the VM has to be in the C(poweredoff) - state. - - Also this operation will fail if an image with specified C(name) already exists. + - B(NOTE:) This operation is only performed on the first VM (if more than one VM ID is passed) and the VM has to be + in the C(poweredoff) state. + - Also this operation fails if an image with specified C(name) already exists. type: dict persistent: description: @@ -190,6 +195,18 @@ options: - When O(instance_ids) is provided, updates running VMs with the C(updateconf) API call. - When new VMs are being created, emulates the C(updateconf) API call using direct template merge. - Allows for complete modifications of the C(CONTEXT) attribute. + - 'Supported attributes include:' + - B(BACKUP_CONFIG:) V(BACKUP_VOLATILE), V(FS_FREEZE), V(INCREMENT_MODE), V(KEEP_LAST), V(MODE); + - B(CONTEXT:) (Any value, except V(ETH*). Variable substitutions are made); + - B(CPU_MODEL:) V(FEATURES), V(MODEL); + - B(FEATURES:) V(ACPI), V(APIC), V(GUEST_AGENT), V(HYPERV), V(IOTHREADS), V(LOCALTIME), V(PAE), V(VIRTIO_BLK_QUEUES), + V(VIRTIO_SCSI_QUEUES); + - B(GRAPHICS:) V(COMMAND), V(KEYMAP), V(LISTEN), V(PASSWD), V(PORT), V(TYPE); + - B(INPUT:) V(BUS), V(TYPE); + - B(OS:) V(ARCH), V(BOOT), V(BOOTLOADER), V(FIRMWARE), V(INITRD), V(KERNEL), V(KERNEL_CMD), V(MACHINE), V(ROOT), V(SD_DISK_BUS), + V(UUID); + - B(RAW:) V(DATA), V(DATA_VMX), V(TYPE), V(VALIDATE); + - B(VIDEO:) V(ATS), V(IOMMU), V(RESOLUTION), V(TYPE), V(VRAM). type: dict version_added: 6.3.0 author: @@ -338,8 +355,9 @@ EXAMPLES = r""" register: vm - name: Wait for SSH to come up - ansible.builtin.wait_for_connection: - delegate_to: '{{ vm.instances[0].networks[0].ip }}' + ansible.builtin.wait_for: + port: 22 + host: '{{ vm.instances[0].networks[0].ip }}' - name: Terminate VMs by ids community.general.one_vm: @@ -427,7 +445,7 @@ EXAMPLES = r""" RETURN = r""" instances_ids: - description: A list of instances ids whose state is changed or which are fetched with O(instance_ids) option. + description: A list of instances IDs whose state is changed or which are fetched with O(instance_ids) option. type: list returned: success sample: [1234, 1235] @@ -437,35 +455,35 @@ instances: returned: success contains: vm_id: - description: Vm id. + description: VM ID. type: int sample: 153 vm_name: - description: Vm name. + description: VM name. type: str sample: foo template_id: - description: Vm's template id. + description: VM's template ID. type: int sample: 153 group_id: - description: Vm's group id. + description: VM's group ID. type: int sample: 1 group_name: - description: Vm's group name. + description: VM's group name. type: str sample: one-users owner_id: - description: Vm's owner id. + description: VM's owner ID. type: int sample: 143 owner_name: - description: Vm's owner name. + description: VM's owner name. type: str sample: app-user mode: - description: Vm's mode. + description: VM's mode. type: str returned: success sample: 660 @@ -496,20 +514,21 @@ instances: networks: description: A list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC. type: list - sample: [ - { - "ip": "10.120.5.33", - "mac": "02:00:0a:78:05:21", - "name": "default-test-private", - "security_groups": "0,10" - }, - { - "ip": "10.120.5.34", - "mac": "02:00:0a:78:05:22", - "name": "default-test-private", - "security_groups": "0" - } - ] + sample: + [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] uptime_h: description: Uptime of the instance in hours. type: int @@ -521,59 +540,64 @@ instances: attributes: description: A dictionary of key/values attributes that are associated with the instance. type: dict - sample: { - "HYPERVISOR": "kvm", - "LOGO": "images/logos/centos.png", - "TE_GALAXY": "bar", - "USER_INPUTS": null - } + sample: + { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } updateconf: description: A dictionary of key/values attributes that are set with the updateconf API call. type: dict version_added: 6.3.0 - sample: { - "OS": { "ARCH": "x86_64" }, - "CONTEXT": { - "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0", - "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..." + sample: + { + "OS": { + "ARCH": "x86_64" + }, + "CONTEXT": { + "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0", + "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..." + } } - } tagged_instances: description: - - A list of instances info based on a specific attributes and/or labels that are specified with O(count_attributes) and O(count_labels) options. + - A list of instances info based on a specific attributes and/or labels that are specified with O(count_attributes) and + O(count_labels) options. type: complex returned: success contains: vm_id: - description: Vm id. + description: VM ID. type: int sample: 153 vm_name: - description: Vm name. + description: VM name. type: str sample: foo template_id: - description: Vm's template id. + description: VM's template ID. type: int sample: 153 group_id: - description: Vm's group id. + description: VM's group ID. type: int sample: 1 group_name: - description: Vm's group name. + description: VM's group name. type: str sample: one-users owner_id: - description: Vm's user id. + description: VM's user ID. type: int sample: 143 owner_name: - description: Vm's user name. + description: VM's user name. type: str sample: app-user mode: - description: Vm's mode. + description: VM's mode. type: str returned: success sample: 660 @@ -604,20 +628,21 @@ tagged_instances: networks: description: A list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC. type: list - sample: [ - { - "ip": "10.120.5.33", - "mac": "02:00:0a:78:05:21", - "name": "default-test-private", - "security_groups": "0,10" - }, - { - "ip": "10.120.5.34", - "mac": "02:00:0a:78:05:22", - "name": "default-test-private", - "security_groups": "0" - } - ] + sample: + [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] uptime_h: description: Uptime of the instance in hours. type: int @@ -629,12 +654,27 @@ tagged_instances: attributes: description: A dictionary of key/values attributes that are associated with the instance. type: dict - sample: {"HYPERVISOR": "kvm", "LOGO": "images/logos/centos.png", "TE_GALAXY": "bar", "USER_INPUTS": null} + sample: + { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } updateconf: description: A dictionary of key/values attributes that are set with the updateconf API call. type: dict version_added: 6.3.0 - sample: {"OS": {"ARCH": "x86_64"}, "CONTEXT": {"START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0", "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."}} + sample: + { + "OS": { + "ARCH": "x86_64" + }, + "CONTEXT": { + "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0", + "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..." + } + } """ try: @@ -652,13 +692,17 @@ from ansible.module_utils.common.dict_transformations import dict_merge from ansible_collections.community.general.plugins.module_utils.opennebula import flatten, render +# Updateconf attributes documentation: https://docs.opennebula.io/6.10/integration_and_development/system_interfaces/api.html#one-vm-updateconf UPDATECONF_ATTRIBUTES = { - "OS": ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT", "SD_DISK_BUS", "UUID"], - "FEATURES": ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT"], + "OS": ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT", "SD_DISK_BUS", "UUID", "FIRMWARE"], + "CPU_MODEL": ["MODEL", "FEATURES"], + "FEATURES": ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT", "VIRTIO_BLK_QUEUES", "VIRTIO_SCSI_QUEUES", "IOTHREADS"], "INPUT": ["TYPE", "BUS"], - "GRAPHICS": ["TYPE", "LISTEN", "PASSWD", "KEYMAP"], - "RAW": ["DATA", "DATA_VMX", "TYPE"], + "GRAPHICS": ["TYPE", "LISTEN", "PORT", "PASSWD", "KEYMAP", "COMMAND"], + "VIDEO": ["ATS", "IOMMU", "RESOLUTION", "TYPE", "VRAM"], + "RAW": ["DATA", "DATA_VMX", "TYPE", "VALIDATE"], "CONTEXT": [], + "BACKUP_CONFIG": ["FS_FREEZE", "KEEP_LAST", "BACKUP_VOLATILE", "MODE", "INCREMENT_MODE"], } diff --git a/plugins/modules/one_vnet.py b/plugins/modules/one_vnet.py index baafc39fbb..b77530e756 100644 --- a/plugins/modules/one_vnet.py +++ b/plugins/modules/one_vnet.py @@ -22,19 +22,20 @@ attributes: check_mode: support: partial details: - - Note that check mode always returns C(changed=true) for existing networks, even if the network would not actually change. + - Note that check mode always returns C(changed=true) for existing networks, even if the network would not actually + change. diff_mode: support: none options: id: description: - A O(id) of the network you would like to manage. - - If not set then a new network will be created with the given O(name). + - If not set then a new network is created with the given O(name). type: int name: description: - - A O(name) of the network you would like to manage. If a network with the given name does not exist it will be created, otherwise it will - be managed by this module. + - A O(name) of the network you would like to manage. If a network with the given name does not exist it, then is created, + otherwise it is managed by this module. type: str template: description: @@ -89,7 +90,7 @@ EXAMPLES = r""" RETURN = r""" id: - description: The network id. + description: The network ID. type: int returned: when O(state=present) sample: 153 @@ -116,12 +117,12 @@ user_id: returned: when O(state=present) sample: 1 user_name: - description: The network's user id. + description: The network's user ID. type: str returned: when O(state=present) sample: oneadmin group_id: - description: The network's group id. + description: The network's group ID. type: int returned: when O(state=present) sample: 1 @@ -131,7 +132,7 @@ group_name: returned: when O(state=present) sample: one-users owner_id: - description: The network's owner id. + description: The network's owner ID. type: int returned: when O(state=present) sample: 143 @@ -207,7 +208,7 @@ bridge_type: returned: when O(state=present) sample: linux parent_network_id: - description: The network's parent network id. + description: The network's parent network ID. type: int returned: when O(state=present) sample: 1 @@ -262,10 +263,10 @@ class NetworksModule(OpenNebulaModule): def __init__(self): argument_spec = dict( - id=dict(type='int', required=False), - name=dict(type='str', required=False), + id=dict(type='int'), + name=dict(type='str'), state=dict(type='str', choices=['present', 'absent'], default='present'), - template=dict(type='str', required=False), + template=dict(type='str'), ) mutually_exclusive = [ diff --git a/plugins/modules/oneandone_firewall_policy.py b/plugins/modules/oneandone_firewall_policy.py index 72ed4e5594..eca9a8ed70 100644 --- a/plugins/modules/oneandone_firewall_policy.py +++ b/plugins/modules/oneandone_firewall_policy.py @@ -46,36 +46,36 @@ options: type: str rules: description: - - A list of rules that will be set for the firewall policy. Each rule must contain protocol parameter, in addition to three optional parameters - (port_from, port_to, and source). + - List of rules that are set for the firewall policy. Each rule must contain protocol parameter, in addition to three + optional parameters (port_from, port_to, and source). type: list elements: dict default: [] add_server_ips: description: - - A list of server identifiers (id or name) to be assigned to a firewall policy. Used in combination with update state. + - A list of server identifiers (ID or name) to be assigned to a firewall policy. Used in combination with update state. type: list elements: str required: false default: [] remove_server_ips: description: - - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state. + - A list of server IP IDs to be unassigned from a firewall policy. Used in combination with update state. type: list elements: str required: false default: [] add_rules: description: - - A list of rules that will be added to an existing firewall policy. It is syntax is the same as the one used for rules parameter. Used - in combination with update state. + - List of rules that are added to an existing firewall policy. It is syntax is the same as the one used for rules parameter. + Used in combination with update state. type: list elements: dict required: false default: [] remove_rules: description: - - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state. + - List of rule IDs that are removed from an existing firewall policy. Used in combination with update state. type: list elements: str required: false @@ -144,7 +144,7 @@ EXAMPLES = r""" firewall_policy: ansible-firewall-policy-updated add_server_ips: - server_identifier (id or name) - - server_identifier #2 (id or name) + - "server_identifier #2 (id or name)" wait: true wait_timeout: 500 state: update @@ -182,8 +182,8 @@ EXAMPLES = r""" auth_token: oneandone_private_api_key firewall_policy: ansible-firewall-policy-updated remove_rules: - - rule_id #1 - - rule_id #2 + - "rule_id #1" + - "rule_id #2" - '...' wait: true wait_timeout: 500 @@ -194,7 +194,7 @@ RETURN = r""" firewall_policy: description: Information about the firewall policy that was processed. type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' + sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"} returned: always """ @@ -288,7 +288,7 @@ def _add_firewall_rules(module, oneandone_conn, firewall_id, rules): if module.check_mode: firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id) - if (firewall_rules and firewall_policy_id): + if firewall_rules and firewall_policy_id: return True return False diff --git a/plugins/modules/oneandone_load_balancer.py b/plugins/modules/oneandone_load_balancer.py index 45ed6157fa..5a8ce7b8f0 100644 --- a/plugins/modules/oneandone_load_balancer.py +++ b/plugins/modules/oneandone_load_balancer.py @@ -42,7 +42,7 @@ options: required: false name: description: - - Load balancer name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128. + - Load balancer name used with present state. Used as identifier (ID or name) when used with absent state. maxLength=128. type: str health_check_test: description: @@ -55,7 +55,7 @@ options: type: str health_check_path: description: - - Url to call for checking. Required for HTTP health check. maxLength=1000. + - URL to call for checking. Required for HTTP health check. maxLength=1000. type: str required: false health_check_parse: @@ -78,15 +78,15 @@ options: choices: ["ROUND_ROBIN", "LEAST_CONNECTIONS"] datacenter: description: - - ID or country code of the datacenter where the load balancer will be created. + - ID or country code of the datacenter where the load balancer is created. - If not specified, it defaults to V(US). type: str choices: ["US", "ES", "DE", "GB"] required: false rules: description: - - A list of rule objects that will be set for the load balancer. Each rule must contain protocol, port_balancer, and port_server parameters, - in addition to source parameter, which is optional. + - A list of rule objects that are set for the load balancer. Each rule must contain protocol, port_balancer, and port_server + parameters, in addition to source parameter, which is optional. type: list elements: dict default: [] @@ -97,29 +97,29 @@ options: required: false add_server_ips: description: - - A list of server identifiers (id or name) to be assigned to a load balancer. Used in combination with update state. + - A list of server identifiers (id or name) to be assigned to a load balancer. Used in combination with O(state=update). type: list elements: str required: false default: [] remove_server_ips: description: - - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state. + - A list of server IP IDs to be unassigned from a load balancer. Used in combination with O(state=update). type: list elements: str required: false default: [] add_rules: description: - - A list of rules that will be added to an existing load balancer. It is syntax is the same as the one used for rules parameter. Used in - combination with update state. + - A list of rules that are added to an existing load balancer. It is syntax is the same as the one used for rules parameter. + Used in combination with O(state=update). type: list elements: dict required: false default: [] remove_rules: description: - - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state. + - A list of rule IDs that are removed from an existing load balancer. Used in combination with O(state=update). type: list elements: str required: false @@ -233,8 +233,8 @@ EXAMPLES = r""" load_balancer: ansible load balancer updated description: Adding rules to a load balancer with ansible remove_rules: - - rule_id #1 - - rule_id #2 + - "rule_id #1" + - "rule_id #2" - '...' wait: true wait_timeout: 500 @@ -245,7 +245,7 @@ RETURN = r""" load_balancer: description: Information about the load balancer that was processed. type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}' + sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"} returned: always """ @@ -344,7 +344,7 @@ def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): if module.check_mode: lb_id = get_load_balancer(oneandone_conn, load_balancer_id) - if (load_balancer_rules and lb_id): + if load_balancer_rules and lb_id: return True return False diff --git a/plugins/modules/oneandone_monitoring_policy.py b/plugins/modules/oneandone_monitoring_policy.py index 0ba653b414..2d8693156c 100644 --- a/plugins/modules/oneandone_monitoring_policy.py +++ b/plugins/modules/oneandone_monitoring_policy.py @@ -11,7 +11,8 @@ DOCUMENTATION = r""" module: oneandone_monitoring_policy short_description: Configure 1&1 monitoring policy description: - - Create, remove, update monitoring policies (and add/remove ports, processes, and servers). This module has a dependency on 1and1 >= 1.0. + - Create, remove, update monitoring policies (and add/remove ports, processes, and servers). This module has a dependency + on 1and1 >= 1.0. extends_documentation_fragment: - community.general.attributes attributes: @@ -59,9 +60,9 @@ options: required: false thresholds: description: - - Monitoring policy thresholds. Each of the suboptions have warning and critical, which both have alert and value suboptions. Warning is - used to set limits for warning alerts, critical is used to set critical alerts. alert enables alert, and value is used to advise when - the value is exceeded. + - Monitoring policy thresholds. Each of the suboptions have warning and critical, which both have alert and value suboptions. + Warning is used to set limits for warning alerts, critical is used to set critical alerts. alert enables alert, and + value is used to advise when the value is exceeded. type: list elements: dict default: [] @@ -88,7 +89,7 @@ options: required: true ports: description: - - Array of ports that will be monitoring. + - Array of ports that are to be monitored. type: list elements: dict default: [] @@ -113,7 +114,7 @@ options: required: true processes: description: - - Array of processes that will be monitoring. + - Array of processes that are to be monitored. type: list elements: dict default: [] @@ -412,7 +413,7 @@ RETURN = r""" monitoring_policy: description: Information about the monitoring policy that was processed. type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' + sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"} returned: always """ @@ -536,7 +537,7 @@ def _add_processes(module, oneandone_conn, monitoring_policy_id, processes): if module.check_mode: mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id) - if (monitoring_policy_processes and mp_id): + if monitoring_policy_processes and mp_id: return True return False diff --git a/plugins/modules/oneandone_private_network.py b/plugins/modules/oneandone_private_network.py index 1a56fe345c..f39c464f96 100644 --- a/plugins/modules/oneandone_private_network.py +++ b/plugins/modules/oneandone_private_network.py @@ -50,7 +50,7 @@ options: type: str datacenter: description: - - The identifier of the datacenter where the private network will be created. + - The identifier of the datacenter where the private network is created. type: str choices: [US, ES, DE, GB] network_address: @@ -143,7 +143,7 @@ RETURN = r""" private_network: description: Information about the private network. type: dict - sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}' + sample: {"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"} returned: always """ diff --git a/plugins/modules/oneandone_public_ip.py b/plugins/modules/oneandone_public_ip.py index c30c0bbdc7..b6b49c5b4a 100644 --- a/plugins/modules/oneandone_public_ip.py +++ b/plugins/modules/oneandone_public_ip.py @@ -43,7 +43,7 @@ options: required: false datacenter: description: - - ID of the datacenter where the IP will be created (only for unassigned IPs). + - ID of the datacenter where the IP is created (only for unassigned IPs). type: str choices: [US, ES, DE, GB] default: US @@ -110,7 +110,7 @@ RETURN = r""" public_ip: description: Information about the public IP that was processed. type: dict - sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}' + sample: {"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"} returned: always """ diff --git a/plugins/modules/oneandone_server.py b/plugins/modules/oneandone_server.py index e24ed4f69d..7683ea1480 100644 --- a/plugins/modules/oneandone_server.py +++ b/plugins/modules/oneandone_server.py @@ -11,8 +11,8 @@ DOCUMENTATION = r""" module: oneandone_server short_description: Create, destroy, start, stop, and reboot a 1&1 Host server description: - - Create, destroy, update, start, stop, and reboot a 1&1 Host server. When the server is created it can optionally wait for it to be 'running' - before returning. + - Create, destroy, update, start, stop, and reboot a 1&1 Host server. When the server is created it can optionally wait + for it to be 'running' before returning. extends_documentation_fragment: - community.general.attributes attributes: @@ -55,25 +55,26 @@ options: type: str fixed_instance_size: description: - - The instance size name or ID of the server. It is required only for 'present' state, and it is mutually exclusive with vcore, cores_per_processor, - ram, and hdds parameters. + - The instance size name or ID of the server. It is required only for 'present' state, and it is mutually exclusive + with vcore, cores_per_processor, ram, and hdds parameters. - 'The available choices are: V(S), V(M), V(L), V(XL), V(XXL), V(3XL), V(4XL), V(5XL).' type: str vcore: description: - - The total number of processors. It must be provided with cores_per_processor, ram, and hdds parameters. + - The total number of processors. It must be provided with O(cores_per_processor), O(ram), and O(hdds) parameters. type: int cores_per_processor: description: - - The number of cores per processor. It must be provided with vcore, ram, and hdds parameters. + - The number of cores per processor. It must be provided with O(vcore), O(ram), and O(hdds) parameters. type: int ram: description: - - The amount of RAM memory. It must be provided with with vcore, cores_per_processor, and hdds parameters. + - The amount of RAM memory. It must be provided with with O(vcore), O(cores_per_processor), and O(hdds) parameters. type: float hdds: description: - - A list of hard disks with nested "size" and "is_main" properties. It must be provided with vcore, cores_per_processor, and ram parameters. + - A list of hard disks with nested O(ignore:hdds[].size) and O(ignore:hdds[].is_main) properties. It must be provided + with O(vcore), O(cores_per_processor), and O(ram) parameters. type: list elements: dict private_network: @@ -113,8 +114,8 @@ options: choices: ["cloud", "baremetal", "k8s_node"] wait: description: - - Wait for the server to be in state 'running' before returning. Also used for delete operation (set to V(false) if you do not want to wait - for each individual server to be deleted before moving on with other tasks). + - Wait for the server to be in state 'running' before returning. Also used for delete operation (set to V(false) if + you do not want to wait for each individual server to be deleted before moving on with other tasks). type: bool default: true wait_timeout: @@ -129,8 +130,8 @@ options: default: 5 auto_increment: description: - - When creating multiple servers at once, whether to differentiate hostnames by appending a count after them or substituting the count where - there is a %02d or %03d in the hostname string. + - When creating multiple servers at once, whether to differentiate hostnames by appending a count after them or substituting + the count where there is a %02d or %03d in the hostname string. type: bool default: true diff --git a/plugins/modules/onepassword_info.py b/plugins/modules/onepassword_info.py index c49dd15e17..5689d28fe6 100644 --- a/plugins/modules/onepassword_info.py +++ b/plugins/modules/onepassword_info.py @@ -46,30 +46,32 @@ options: field: type: str description: - - The name of the field to search for within this item (optional, defaults to V(password), or V(document) if the item has an attachment). + - The name of the field to search for within this item (optional, defaults to V(password), or V(document) if the + item has an attachment). section: type: str description: - - The name of a section within this item containing the specified field (optional, will search all sections if not specified). + - The name of a section within this item containing the specified field (optional, it searches all sections if not + specified). vault: type: str description: - - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional). + - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults + (optional). required: true auto_login: type: dict description: - - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info) will attempt to sign in to 1Password - automatically. + - A dictionary containing authentication details. If this is set, the module attempts to sign in to 1Password automatically. - Without this option, you must have already logged in using the 1Password CLI before running Ansible. - - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt the Ansible Vault - is equal to or greater in strength than the 1Password master password. + - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt + the Ansible Vault is equal to or greater in strength than the 1Password master password. suboptions: subdomain: type: str description: - 1Password subdomain name (V(subdomain).1password.com). - - If this is not specified, the most recent subdomain will be used. + - If this is not specified, the most recent subdomain is used. username: type: str description: @@ -141,7 +143,8 @@ RETURN = r""" # One or more dictionaries for each matching item from 1Password, along with the appropriate fields. # This shows the response you would expect to receive from the third example documented above. onepassword: - description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above. + description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third + example above. returned: success type: dict sample: @@ -205,7 +208,7 @@ class OnePasswordInfo(object): def _parse_field(self, data_json, item_id, field_name, section_title=None): data = json.loads(data_json) - if ('documentAttributes' in data['details']): + if 'documentAttributes' in data['details']: # This is actually a document, let's fetch the document data instead! document = self._run(["get", "document", data['overview']['title']]) return {'document': document[1].strip()} @@ -215,7 +218,7 @@ class OnePasswordInfo(object): # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute, # not inside it, so we need to check there first. - if (field_name in data['details']): + if field_name in data['details']: return {field_name: data['details'][field_name]} # Otherwise we continue looking inside the 'fields' attribute for the specified field. @@ -371,7 +374,7 @@ def main(): username=dict(type='str'), master_password=dict(required=True, type='str', no_log=True), secret_key=dict(type='str', no_log=True), - ), default=None), + )), search_terms=dict(required=True, type='list', elements='dict'), ), supports_check_mode=True diff --git a/plugins/modules/oneview_datacenter_info.py b/plugins/modules/oneview_datacenter_info.py index 831bd59f61..1ca33023db 100644 --- a/plugins/modules/oneview_datacenter_info.py +++ b/plugins/modules/oneview_datacenter_info.py @@ -30,7 +30,7 @@ options: type: str options: description: - - "Retrieve additional information. Options available: V(visualContent)." + - 'Retrieve additional information. Options available: V(visualContent).' type: list elements: str diff --git a/plugins/modules/oneview_enclosure_info.py b/plugins/modules/oneview_enclosure_info.py index 21feee769b..05992ee501 100644 --- a/plugins/modules/oneview_enclosure_info.py +++ b/plugins/modules/oneview_enclosure_info.py @@ -30,8 +30,8 @@ options: type: str options: description: - - 'List with options to gather additional information about an Enclosure and related resources. Options allowed: V(script), V(environmentalConfiguration), - and V(utilization). For the option V(utilization), you can provide specific parameters.' + - 'List with options to gather additional information about an Enclosure and related resources. Options allowed: V(script), + V(environmentalConfiguration), and V(utilization). For the option V(utilization), you can provide specific parameters.' type: list elements: raw @@ -122,11 +122,12 @@ EXAMPLES = r""" ansible.builtin.debug: msg: "{{ result.enclosure_utilization }}" -- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two specified dates" +- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two + specified dates" community.general.oneview_enclosure_info: name: Test-Enclosure options: - - utilization: # optional + - utilization: # optional fields: AmbientTemperature filter: - startDate=2016-07-01T14:29:42.000Z diff --git a/plugins/modules/oneview_ethernet_network.py b/plugins/modules/oneview_ethernet_network.py index 823fea3b2c..7ba3abb6e4 100644 --- a/plugins/modules/oneview_ethernet_network.py +++ b/plugins/modules/oneview_ethernet_network.py @@ -27,9 +27,9 @@ options: state: description: - Indicates the desired state for the Ethernet Network resource. - - V(present) will ensure data properties are compliant with OneView. - - V(absent) will remove the resource from OneView, if it exists. - - V(default_bandwidth_reset) will reset the network connection template to the default. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + - V(default_bandwidth_reset) resets the network connection template to the default. type: str default: present choices: [present, absent, default_bandwidth_reset] diff --git a/plugins/modules/oneview_ethernet_network_info.py b/plugins/modules/oneview_ethernet_network_info.py index 6eb4f46a19..c1c0a327fe 100644 --- a/plugins/modules/oneview_ethernet_network_info.py +++ b/plugins/modules/oneview_ethernet_network_info.py @@ -29,8 +29,8 @@ options: type: str options: description: - - 'List with options to gather additional information about an Ethernet Network and related resources. Options allowed: V(associatedProfiles) - and V(associatedUplinkGroups).' + - 'List with options to gather additional information about an Ethernet Network and related resources. Options allowed: + V(associatedProfiles) and V(associatedUplinkGroups).' type: list elements: str extends_documentation_fragment: diff --git a/plugins/modules/oneview_fc_network.py b/plugins/modules/oneview_fc_network.py index 312a5dc893..3063e80757 100644 --- a/plugins/modules/oneview_fc_network.py +++ b/plugins/modules/oneview_fc_network.py @@ -24,8 +24,8 @@ options: state: description: - Indicates the desired state for the Fibre Channel Network resource. - - V(present) will ensure data properties are compliant with OneView. - - V(absent) will remove the resource from OneView, if it exists. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. type: str choices: ['present', 'absent'] required: true diff --git a/plugins/modules/oneview_fc_network_info.py b/plugins/modules/oneview_fc_network_info.py index af20869dc3..9de22ef55c 100644 --- a/plugins/modules/oneview_fc_network_info.py +++ b/plugins/modules/oneview_fc_network_info.py @@ -87,8 +87,8 @@ class FcNetworkInfoModule(OneViewModuleBase): def __init__(self): argument_spec = dict( - name=dict(required=False, type='str'), - params=dict(required=False, type='dict') + name=dict(type='str'), + params=dict(type='dict') ) super(FcNetworkInfoModule, self).__init__( diff --git a/plugins/modules/oneview_fcoe_network.py b/plugins/modules/oneview_fcoe_network.py index 15128bd372..37fbff9ef4 100644 --- a/plugins/modules/oneview_fcoe_network.py +++ b/plugins/modules/oneview_fcoe_network.py @@ -25,8 +25,8 @@ options: state: description: - Indicates the desired state for the FCoE Network resource. - - V(present) will ensure data properties are compliant with OneView. - - V(absent) will remove the resource from OneView, if it exists. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. type: str default: present choices: ['present', 'absent'] diff --git a/plugins/modules/oneview_logical_interconnect_group.py b/plugins/modules/oneview_logical_interconnect_group.py index 866dabc6b8..2683fc5468 100644 --- a/plugins/modules/oneview_logical_interconnect_group.py +++ b/plugins/modules/oneview_logical_interconnect_group.py @@ -28,8 +28,8 @@ options: state: description: - Indicates the desired state for the Logical Interconnect Group resource. - - V(absent) will remove the resource from OneView, if it exists. - - V(present) will ensure data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + - V(present) ensures data properties are compliant with OneView. type: str choices: [absent, present] default: present @@ -63,7 +63,7 @@ EXAMPLES = r""" - relativeValue: 1 type: Enclosure permittedInterconnectTypeName: HP VC Flex-10/10D Module - # Alternatively you can inform permittedInterconnectTypeUri + # Alternatively you can inform permittedInterconnectTypeUri delegate_to: localhost - name: Ensure that the Logical Interconnect Group has the specified scopes diff --git a/plugins/modules/oneview_network_set.py b/plugins/modules/oneview_network_set.py index a7fae51f21..ee5d3560a7 100644 --- a/plugins/modules/oneview_network_set.py +++ b/plugins/modules/oneview_network_set.py @@ -27,8 +27,8 @@ options: state: description: - Indicates the desired state for the Network Set resource. - - V(present) will ensure data properties are compliant with OneView. - - V(absent) will remove the resource from OneView, if it exists. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. type: str default: present choices: ['present', 'absent'] diff --git a/plugins/modules/oneview_network_set_info.py b/plugins/modules/oneview_network_set_info.py index f3a4ace3da..51e7d0b510 100644 --- a/plugins/modules/oneview_network_set_info.py +++ b/plugins/modules/oneview_network_set_info.py @@ -30,8 +30,8 @@ options: options: description: - - 'List with options to gather information about Network Set. Option allowed: V(withoutEthernet). The option V(withoutEthernet) retrieves - the list of network_sets excluding Ethernet networks.' + - 'List with options to gather information about Network Set. Option allowed: V(withoutEthernet). The option V(withoutEthernet) + retrieves the list of network_sets excluding Ethernet networks.' type: list elements: str diff --git a/plugins/modules/oneview_san_manager.py b/plugins/modules/oneview_san_manager.py index 8c03bda463..23732cdaaf 100644 --- a/plugins/modules/oneview_san_manager.py +++ b/plugins/modules/oneview_san_manager.py @@ -29,8 +29,7 @@ options: - Indicates the desired state for the Uplink Set resource. - V(present) ensures data properties are compliant with OneView. - V(absent) removes the resource from OneView, if it exists. - - V(connection_information_set) updates the connection information for the SAN Manager. - This operation is non-idempotent. + - V(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent. type: str default: present choices: [present, absent, connection_information_set] diff --git a/plugins/modules/online_server_info.py b/plugins/modules/online_server_info.py index e36c78ef0e..a06dae1926 100644 --- a/plugins/modules/online_server_info.py +++ b/plugins/modules/online_server_info.py @@ -41,92 +41,92 @@ online_server_info: type: list elements: dict sample: - "online_server_info": [ - { - "abuse": "abuse@example.com", - "anti_ddos": false, - "bmc": { - "session_key": null - }, - "boot_mode": "normal", - "contacts": { - "owner": "foobar", - "tech": "foobar" - }, + [ + { + "abuse": "abuse@example.com", + "anti_ddos": false, + "bmc": { + "session_key": null + }, + "boot_mode": "normal", + "contacts": { + "owner": "foobar", + "tech": "foobar" + }, + "disks": [ + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } + ], + "drive_arrays": [ + { "disks": [ - { - "$ref": "/api/v1/server/hardware/disk/68452" - }, - { - "$ref": "/api/v1/server/hardware/disk/68453" - } + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } ], - "drive_arrays": [ - { - "disks": [ - { - "$ref": "/api/v1/server/hardware/disk/68452" - }, - { - "$ref": "/api/v1/server/hardware/disk/68453" - } - ], - "raid_controller": { - "$ref": "/api/v1/server/hardware/raidController/9910" - }, - "raid_level": "RAID1" - } - ], - "hardware_watch": true, - "hostname": "sd-42", - "id": 42, - "ip": [ - { - "address": "195.154.172.149", - "mac": "28:92:4a:33:5e:c6", - "reverse": "195-154-172-149.rev.poneytelecom.eu.", - "switch_port_state": "up", - "type": "public" - }, - { - "address": "10.90.53.212", - "mac": "28:92:4a:33:5e:c7", - "reverse": null, - "switch_port_state": "up", - "type": "private" - } - ], - "last_reboot": "2018-08-23T08:32:03.000Z", - "location": { - "block": "A", - "datacenter": "DC3", - "position": 19, - "rack": "A23", - "room": "4 4-4" + "raid_controller": { + "$ref": "/api/v1/server/hardware/raidController/9910" }, - "network": { - "ip": [ - "195.154.172.149" - ], - "ipfo": [], - "private": [ - "10.90.53.212" - ] - }, - "offer": "Pro-1-S-SATA", - "os": { - "name": "FreeBSD", - "version": "11.1-RELEASE" - }, - "power": "ON", - "proactive_monitoring": false, - "raid_controllers": [ - { - "$ref": "/api/v1/server/hardware/raidController/9910" - } - ], - "support": "Basic service level" - } + "raid_level": "RAID1" + } + ], + "hardware_watch": true, + "hostname": "sd-42", + "id": 42, + "ip": [ + { + "address": "195.154.172.149", + "mac": "28:92:4a:33:5e:c6", + "reverse": "195-154-172-149.rev.poneytelecom.eu.", + "switch_port_state": "up", + "type": "public" + }, + { + "address": "10.90.53.212", + "mac": "28:92:4a:33:5e:c7", + "reverse": null, + "switch_port_state": "up", + "type": "private" + } + ], + "last_reboot": "2018-08-23T08:32:03.000Z", + "location": { + "block": "A", + "datacenter": "DC3", + "position": 19, + "rack": "A23", + "room": "4 4-4" + }, + "network": { + "ip": [ + "195.154.172.149" + ], + "ipfo": [], + "private": [ + "10.90.53.212" + ] + }, + "offer": "Pro-1-S-SATA", + "os": { + "name": "FreeBSD", + "version": "11.1-RELEASE" + }, + "power": "ON", + "proactive_monitoring": false, + "raid_controllers": [ + { + "$ref": "/api/v1/server/hardware/raidController/9910" + } + ], + "support": "Basic service level" + } ] """ diff --git a/plugins/modules/online_user_info.py b/plugins/modules/online_user_info.py index 60e0763267..5b1628adad 100644 --- a/plugins/modules/online_user_info.py +++ b/plugins/modules/online_user_info.py @@ -37,13 +37,13 @@ online_user_info: returned: success type: dict sample: - "online_user_info": { - "company": "foobar LLC", - "email": "foobar@example.com", - "first_name": "foo", - "id": 42, - "last_name": "bar", - "login": "foobar" + { + "company": "foobar LLC", + "email": "foobar@example.com", + "first_name": "foo", + "id": 42, + "last_name": "bar", + "login": "foobar" } """ diff --git a/plugins/modules/open_iscsi.py b/plugins/modules/open_iscsi.py index 15e20a241a..80360833a2 100644 --- a/plugins/modules/open_iscsi.py +++ b/plugins/modules/open_iscsi.py @@ -14,7 +14,8 @@ author: - Serge van Ginderachter (@srvg) short_description: Manage iSCSI targets with Open-iSCSI description: - - Discover targets on given portal, (dis)connect targets, mark targets to manually or auto start, return device nodes of connected targets. + - Discover targets on given portal, (dis)connect targets, mark targets to manually or auto start, return device nodes of + connected targets. requirements: - open_iscsi library and tools (iscsiadm) extends_documentation_fragment: @@ -43,7 +44,7 @@ options: login: description: - Whether the target node should be connected. - - When O(target) is omitted, will login to all available. + - When O(target) is omitted, it logins to all available. type: bool aliases: [state] node_auth: @@ -82,8 +83,8 @@ options: discover: description: - Whether the list of target nodes on the portal should be (re)discovered and added to the persistent iSCSI database. - - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) to manual, hence combined with O(auto_node_startup=true) - will always return a changed state. + - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) to manual, hence combined with + O(auto_node_startup=true) always returns a changed state. type: bool default: false show_nodes: @@ -94,7 +95,7 @@ options: rescan: description: - Rescan an established session for discovering new targets. - - When O(target) is omitted, will rescan all sessions. + - When O(target) is omitted, it rescans all sessions. type: bool default: false version_added: 4.1.0 diff --git a/plugins/modules/openbsd_pkg.py b/plugins/modules/openbsd_pkg.py index 385fdb9870..e81fce3018 100644 --- a/plugins/modules/openbsd_pkg.py +++ b/plugins/modules/openbsd_pkg.py @@ -36,16 +36,16 @@ options: elements: str state: description: - - V(present) will make sure the package is installed. - - V(latest) will make sure the latest version of the package is installed. - - V(absent) will make sure the specified package is not installed. + - V(present) ensures the package is installed. + - V(latest) ensures the latest version of the package is installed. + - V(absent) ensures the specified package is not installed. choices: [absent, latest, present, installed, removed] default: present type: str build: description: - - Build the package from source instead of downloading and installing a binary. Requires that the port source tree is already installed. - Automatically builds and installs the C(sqlports) package, if it is not already installed. + - Build the package from source instead of downloading and installing a binary. Requires that the port source tree is + already installed. Automatically builds and installs the C(sqlports) package, if it is not already installed. - Mutually exclusive with O(snapshot). type: bool default: false @@ -63,8 +63,8 @@ options: type: path clean: description: - - When updating or removing packages, delete the extra configuration file(s) in the old packages which are annotated with @extra in the - packaging-list. + - When updating or removing packages, delete the extra configuration file(s) in the old packages which are annotated + with C(@extra) in the packaging-list. type: bool default: false quick: @@ -73,8 +73,8 @@ options: type: bool default: false notes: - - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) - option. + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. """ EXAMPLES = r""" diff --git a/plugins/modules/opendj_backendprop.py b/plugins/modules/opendj_backendprop.py index 7b48587faf..be4edac125 100644 --- a/plugins/modules/opendj_backendprop.py +++ b/plugins/modules/opendj_backendprop.py @@ -10,10 +10,10 @@ __metaclass__ = type DOCUMENTATION = r""" module: opendj_backendprop -short_description: Will update the backend configuration of OpenDJ using the dsconfig set-backend-prop command +short_description: Update the backend configuration of OpenDJ using the dsconfig set-backend-prop command description: - - This module will update settings for OpenDJ with the command set-backend-prop. - - It will check first using de get-backend-prop if configuration needs to be applied. + - This module updates settings for OpenDJ with the command C(set-backend-prop). + - It checks first using C(get-backend-prop) if configuration needs to be applied. author: - Werner Dijkerman (@dj-wasabi) extends_documentation_fragment: @@ -81,16 +81,16 @@ options: type: str """ -EXAMPLES = """ - - name: Add or update OpenDJ backend properties - opendj_backendprop: - hostname: localhost - port: 4444 - username: "cn=Directory Manager" - password: password - backend: userRoot - name: index-entry-limit - value: 5000 +EXAMPLES = r""" +- name: Add or update OpenDJ backend properties + opendj_backendprop: + hostname: localhost + port: 4444 + username: "cn=Directory Manager" + password: password + backend: userRoot + name: index-entry-limit + value: 5000 """ RETURN = r""" @@ -153,9 +153,9 @@ def main(): opendj_bindir=dict(default="/opt/opendj/bin", type="path"), hostname=dict(required=True), port=dict(required=True), - username=dict(default="cn=Directory Manager", required=False), - password=dict(required=False, no_log=True), - passwordfile=dict(required=False, type="path"), + username=dict(default="cn=Directory Manager"), + password=dict(no_log=True), + passwordfile=dict(type="path"), backend=dict(required=True), name=dict(required=True), value=dict(required=True), diff --git a/plugins/modules/openwrt_init.py b/plugins/modules/openwrt_init.py index aa708f08f8..c8c98f2d39 100644 --- a/plugins/modules/openwrt_init.py +++ b/plugins/modules/openwrt_init.py @@ -32,19 +32,19 @@ options: state: type: str description: - - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. - - V(restarted) will always bounce the service. - - V(reloaded) will always reload. + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the service. + - V(reloaded) always reloads. choices: ['started', 'stopped', 'restarted', 'reloaded'] enabled: description: - - Whether the service should start on boot. B(At least one of state and enabled are required). + - Whether the service should start on boot. B(At least one) of O(state) and O(enabled) are required. type: bool pattern: type: str description: - - If the service does not respond to the 'running' command, name a substring to look for as would be found in the output of the C(ps) command - as a stand-in for a 'running' result. If the string is found, the service will be assumed to be running. + - If the service does not respond to the C(running) command, name a substring to look for as would be found in the output + of the C(ps) command as a stand-in for a C(running) result. If the string is found, the service is assumed to be running. notes: - One option other than O(name) is required. requirements: diff --git a/plugins/modules/opkg.py b/plugins/modules/opkg.py index 40c48f3800..b57fbd7df7 100644 --- a/plugins/modules/opkg.py +++ b/plugins/modules/opkg.py @@ -28,8 +28,8 @@ options: name: description: - Name of package(s) to install/remove. - - C(NAME=VERSION) syntax is also supported to install a package in a certain version. See the examples. This only works on Yocto based Linux - distributions (opkg>=0.3.2) and not for OpenWrt. This is supported since community.general 6.2.0. + - V(NAME=VERSION) syntax is also supported to install a package in a certain version. See the examples. This only works + on Yocto based Linux distributions (opkg>=0.3.2) and not for OpenWrt. This is supported since community.general 6.2.0. aliases: [pkg] required: true type: list @@ -43,7 +43,8 @@ options: force: description: - The C(opkg --force) parameter used. - - State V("") is deprecated and will be removed in community.general 12.0.0. Please omit the parameter O(force) to obtain the same behavior. + - State V("") is deprecated and will be removed in community.general 12.0.0. Please omit the parameter O(force) to obtain + the same behavior. choices: - "" - "depends" @@ -133,7 +134,6 @@ class Opkg(StateModuleHelper): executable=dict(type="path"), ), ) - use_old_vardict = False def __init_module__(self): self.vars.set("install_c", 0, output=False, change=True) diff --git a/plugins/modules/osx_defaults.py b/plugins/modules/osx_defaults.py index 1cc541377c..56ff6e1ac1 100644 --- a/plugins/modules/osx_defaults.py +++ b/plugins/modules/osx_defaults.py @@ -18,8 +18,9 @@ author: short_description: Manage macOS user defaults description: - This module allows users to read, write, and delete macOS user defaults from Ansible scripts. - - MacOS applications and other programs use the defaults system to record user preferences and other information that must be maintained when - the applications are not running (such as default font for new documents, or the position of an Info panel). + - MacOS applications and other programs use the defaults system to record user preferences and other information that must + be maintained when the applications are not running (such as default font for new documents, or the position of an Info + panel). extends_documentation_fragment: - community.general.attributes attributes: @@ -68,7 +69,7 @@ options: state: description: - The state of the user defaults. - - If set to V(list) will query the given parameter specified by O(key). Returns V(null) is nothing found or mis-spelled. + - If set to V(list) it queries the given parameter specified by O(key). Returns V(null) is nothing found or misspelled. type: str choices: [absent, list, present] default: present @@ -190,7 +191,7 @@ class OSXDefaults(object): @staticmethod def is_int(value): as_str = str(value) - if (as_str.startswith("-")): + if as_str.startswith("-"): return as_str[1:].isdigit() else: return as_str.isdigit() diff --git a/plugins/modules/ovh_ip_failover.py b/plugins/modules/ovh_ip_failover.py index 4d6071cc18..425ee614f5 100644 --- a/plugins/modules/ovh_ip_failover.py +++ b/plugins/modules/ovh_ip_failover.py @@ -13,12 +13,12 @@ DOCUMENTATION = r""" module: ovh_ip_failover short_description: Manage OVH IP failover address description: - - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move an IP failover (or failover - block) between services. + - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move an IP + failover (or failover block) between services. author: "Pascal HERAUD (@pascalheraud)" notes: - - Uses the python OVH API U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with a consumer key as - described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/). + - Uses the Python OVH API U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with + a consumer key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/). requirements: - ovh >= 0.4.8 extends_documentation_fragment: @@ -49,14 +49,15 @@ options: default: true type: bool description: - - If true, the module will wait for the IP address to be moved. If false, exit without waiting. The taskId will be returned in module output. + - If V(true), the module waits for the IP address to be moved. If false, exit without waiting. The C(taskId) is returned + in module output. wait_task_completion: required: false default: 0 description: - - If not 0, the module will wait for this task id to be completed. Use O(wait_task_completion) if you want to wait for completion of a previously - executed task with O(wait_completion=false). You can execute this module repeatedly on a list of failover IPs using O(wait_completion=false) - (see examples). + - If not V(0), the module waits for this task ID to be completed. Use O(wait_task_completion) if you want to wait for + completion of a previously executed task with O(wait_completion=false). You can execute this module repeatedly on + a list of failover IPs using O(wait_completion=false) (see examples). type: int application_key: required: true diff --git a/plugins/modules/ovh_ip_loadbalancing_backend.py b/plugins/modules/ovh_ip_loadbalancing_backend.py index 174ebffa22..8bf294a1d5 100644 --- a/plugins/modules/ovh_ip_loadbalancing_backend.py +++ b/plugins/modules/ovh_ip_loadbalancing_backend.py @@ -16,10 +16,10 @@ description: - Manage OVH (French European hosting provider) LoadBalancing IP backends. author: Pascal Heraud (@pascalheraud) notes: - - Uses the Python OVH API U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with a consumer key as - described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/). + - Uses the Python OVH API U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with + a consumer key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/). requirements: - - ovh > 0.3.5 + - ovh > 0.3.5 extends_documentation_fragment: - community.general.attributes attributes: @@ -244,7 +244,7 @@ def main(): 'parameters. Error returned by OVH api was : {0}' .format(apiError)) - if (backendProperties['weight'] != weight): + if backendProperties['weight'] != weight: # Change weight try: client.post( @@ -263,7 +263,7 @@ def main(): .format(apiError)) moduleChanged = True - if (backendProperties['probe'] != probe): + if backendProperties['probe'] != probe: # Change probe backendProperties['probe'] = probe try: diff --git a/plugins/modules/ovh_monthly_billing.py b/plugins/modules/ovh_monthly_billing.py index 438bf7db7f..912b697517 100644 --- a/plugins/modules/ovh_monthly_billing.py +++ b/plugins/modules/ovh_monthly_billing.py @@ -98,10 +98,10 @@ def main(): argument_spec=dict( project_id=dict(required=True), instance_id=dict(required=True), - endpoint=dict(required=False), - application_key=dict(required=False, no_log=True), - application_secret=dict(required=False, no_log=True), - consumer_key=dict(required=False, no_log=True), + endpoint=dict(), + application_key=dict(no_log=True), + application_secret=dict(no_log=True), + consumer_key=dict(no_log=True), ), supports_check_mode=True ) diff --git a/plugins/modules/pacemaker_cluster.py b/plugins/modules/pacemaker_cluster.py index fdae7fc367..ffed13f9c5 100644 --- a/plugins/modules/pacemaker_cluster.py +++ b/plugins/modules/pacemaker_cluster.py @@ -13,6 +13,7 @@ module: pacemaker_cluster short_description: Manage pacemaker clusters author: - Mathieu Bultel (@matbu) + - Dexter Le (@munchtoast) description: - This module can manage a pacemaker cluster and nodes from Ansible using the pacemaker CLI. extends_documentation_fragment: @@ -26,17 +27,20 @@ options: state: description: - Indicate desired state of the cluster. - choices: [cleanup, offline, online, restart] + - The value V(maintenance) has been added in community.general 11.1.0. + choices: [cleanup, offline, online, restart, maintenance] type: str - node: + name: description: - - Specify which node of the cluster you want to manage. V(null) == the cluster status itself, V(all) == check the status of all nodes. + - Specify which node of the cluster you want to manage. V(null) == the cluster status itself, V(all) == check the status + of all nodes. type: str + aliases: ['node'] timeout: description: - - Timeout when the module should considered that the action has failed. - default: 300 + - Timeout period (in seconds) for polling the cluster operation. type: int + default: 300 force: description: - Force the change of the cluster state. @@ -62,132 +66,104 @@ out: returned: always """ -import time - -from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode -_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node" +class PacemakerCluster(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', choices=[ + 'cleanup', 'offline', 'online', 'restart', 'maintenance']), + name=dict(type='str', aliases=['node']), + timeout=dict(type='int', default=300), + force=dict(type='bool', default=True) + ), + supports_check_mode=True, + ) + default_state = "" + def __init_module__(self): + self.runner = pacemaker_runner(self.module) + self.vars.set('apply_all', True if not self.module.params['name'] else False) + get_args = dict([('cli_action', 'cluster'), ('state', 'status'), ('name', None), ('apply_all', self.vars.apply_all)]) + if self.module.params['state'] == "maintenance": + get_args['cli_action'] = "property" + get_args['state'] = "config" + get_args['name'] = "maintenance-mode" + elif self.module.params['state'] == "cleanup": + get_args['cli_action'] = "resource" + get_args['name'] = self.module.params['name'] -def get_cluster_status(module): - cmd = ["pcs", "cluster", "status"] - rc, out, err = module.run_command(cmd) - if out in _PCS_CLUSTER_DOWN: - return 'offline' - else: - return 'online' + self.vars.set('get_args', get_args) + self.vars.set('previous_value', self._get()['out']) + self.vars.set('value', self.vars.previous_value, change=True, diff=True) + if not self.module.params['state']: + self.module.deprecate( + 'Parameter "state" values not set is being deprecated. Make sure to provide a value for "state"', + version='12.0.0', + collection_name='community.general' + ) -def get_node_status(module, node='all'): - node_l = ["all"] if node == "all" else [] - cmd = ["pcs", "cluster", "pcsd-status"] + node_l - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - status = [] - for o in out.splitlines(): - status.append(o.split(':')) - return status + def __quit_module__(self): + self.vars.set('value', self._get()['out']) + def _process_command_output(self, fail_on_err, ignore_err_msg=""): + def process(rc, out, err): + if fail_on_err and rc != 0 and err and ignore_err_msg not in err: + self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err)) + out = out.rstrip() + return None if out == "" else out + return process -def clean_cluster(module, timeout): - cmd = ["pcs", "resource", "cleanup"] - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) + def _get(self): + with self.runner('cli_action state name') as ctx: + result = ctx.run(cli_action=self.vars.get_args['cli_action'], state=self.vars.get_args['state'], name=self.vars.get_args['name']) + return dict([('rc', result[0]), + ('out', result[1] if result[1] != "" else None), + ('err', result[2])]) + def state_cleanup(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') -def set_cluster(module, state, timeout, force): - if state == 'online': - cmd = ["pcs", "cluster", "start"] - if state == 'offline': - cmd = ["pcs", "cluster", "stop"] - if force: - cmd = cmd + ["--force"] - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) + def state_offline(self): + with self.runner('cli_action state name apply_all wait', + output_process=self._process_command_output(True, "not currently running"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) - t = time.time() - ready = False - while time.time() < t + timeout: - cluster_state = get_cluster_status(module) - if cluster_state == state: - ready = True - break - if not ready: - module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) + def state_online(self): + with self.runner('cli_action state name apply_all wait', + output_process=self._process_command_output(True, "currently running"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + + if get_pacemaker_maintenance_mode(self.runner): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: + ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false') + + def state_maintenance(self): + with self.runner('cli_action state name', + output_process=self._process_command_output(True, "Fail"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='property', name='maintenance-mode=true') + + def state_restart(self): + with self.runner('cli_action state name apply_all wait', + output_process=self._process_command_output(True, "not currently running"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='cluster', state='offline', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + ctx.run(cli_action='cluster', state='online', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + + if get_pacemaker_maintenance_mode(self.runner): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: + ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false') def main(): - argument_spec = dict( - state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']), - node=dict(type='str'), - timeout=dict(type='int', default=300), - force=dict(type='bool', default=True), - ) - - module = AnsibleModule( - argument_spec, - supports_check_mode=True, - ) - changed = False - state = module.params['state'] - node = module.params['node'] - force = module.params['force'] - timeout = module.params['timeout'] - - if state in ['online', 'offline']: - # Get cluster status - if node is None: - cluster_state = get_cluster_status(module) - if cluster_state == state: - module.exit_json(changed=changed, out=cluster_state) - else: - if module.check_mode: - module.exit_json(changed=True) - set_cluster(module, state, timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == state: - module.exit_json(changed=True, out=cluster_state) - else: - module.fail_json(msg="Fail to bring the cluster %s" % state) - else: - cluster_state = get_node_status(module, node) - # Check cluster state - for node_state in cluster_state: - if node_state[1].strip().lower() == state: - module.exit_json(changed=changed, out=cluster_state) - else: - if module.check_mode: - module.exit_json(changed=True) - # Set cluster status if needed - set_cluster(module, state, timeout, force) - cluster_state = get_node_status(module, node) - module.exit_json(changed=True, out=cluster_state) - - elif state == 'restart': - if module.check_mode: - module.exit_json(changed=True) - set_cluster(module, 'offline', timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == 'offline': - set_cluster(module, 'online', timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == 'online': - module.exit_json(changed=True, out=cluster_state) - else: - module.fail_json(msg="Failed during the restart of the cluster, the cluster cannot be started") - else: - module.fail_json(msg="Failed during the restart of the cluster, the cluster cannot be stopped") - - elif state == 'cleanup': - if module.check_mode: - module.exit_json(changed=True) - clean_cluster(module, timeout) - cluster_state = get_cluster_status(module) - module.exit_json(changed=True, out=cluster_state) + PacemakerCluster.execute() if __name__ == '__main__': diff --git a/plugins/modules/pacemaker_resource.py b/plugins/modules/pacemaker_resource.py new file mode 100644 index 0000000000..2fdf785487 --- /dev/null +++ b/plugins/modules/pacemaker_resource.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2025, Dexter Le +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r""" +module: pacemaker_resource +short_description: Manage pacemaker resources +author: + - Dexter Le (@munchtoast) +version_added: 10.5.0 +description: + - This module can manage resources in a Pacemaker cluster using the pacemaker CLI. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Indicate desired state for cluster resource. + choices: [present, absent, enabled, disabled] + default: present + type: str + name: + description: + - Specify the resource name to create. + required: true + type: str + resource_type: + description: + - Resource type to create. + type: dict + suboptions: + resource_name: + description: + - Specify the resource type name. + type: str + resource_standard: + description: + - Specify the resource type standard. + type: str + resource_provider: + description: + - Specify the resource type providers. + type: str + resource_option: + description: + - Specify the resource option to create. + type: list + elements: str + default: [] + resource_operation: + description: + - List of operations to associate with resource. + type: list + elements: dict + default: [] + suboptions: + operation_action: + description: + - Operation action to associate with resource. + type: str + operation_option: + description: + - Operation option to associate with action. + type: list + elements: str + resource_meta: + description: + - List of meta to associate with resource. + type: list + elements: str + resource_argument: + description: + - Action to associate with resource. + type: dict + suboptions: + argument_action: + description: + - Action to apply to resource. + type: str + choices: [clone, master, group, promotable] + argument_option: + description: + - Options to associate with resource action. + type: list + elements: str + wait: + description: + - Timeout period for polling the resource creation. + type: int + default: 300 +""" + +EXAMPLES = r""" +--- +- name: Create pacemaker resource + hosts: localhost + gather_facts: false + tasks: + - name: Create virtual-ip resource + community.general.pacemaker_resource: + state: present + name: virtual-ip + resource_type: + resource_name: IPaddr2 + resource_option: + - "ip=[192.168.2.1]" + resource_argument: + argument_action: group + argument_option: + - master + resource_operation: + - operation_action: monitor + operation_option: + - interval=20 +""" + +RETURN = r""" +cluster_resources: + description: The cluster resource output message. + type: str + sample: "Assumed agent name ocf:heartbeat:IPaddr2 (deduced from IPaddr2)" + returned: always +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode + + +class PacemakerResource(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', default='present', choices=[ + 'present', 'absent', 'enabled', 'disabled']), + name=dict(type='str', required=True), + resource_type=dict(type='dict', options=dict( + resource_name=dict(type='str'), + resource_standard=dict(type='str'), + resource_provider=dict(type='str'), + )), + resource_option=dict(type='list', elements='str', default=list()), + resource_operation=dict(type='list', elements='dict', default=list(), options=dict( + operation_action=dict(type='str'), + operation_option=dict(type='list', elements='str'), + )), + resource_meta=dict(type='list', elements='str'), + resource_argument=dict(type='dict', options=dict( + argument_action=dict(type='str', choices=['clone', 'master', 'group', 'promotable']), + argument_option=dict(type='list', elements='str'), + )), + wait=dict(type='int', default=300), + ), + required_if=[('state', 'present', ['resource_type', 'resource_option'])], + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = pacemaker_runner(self.module) + self.vars.set('previous_value', self._get()['out']) + self.vars.set('value', self.vars.previous_value, change=True, diff=True) + self.module.params['name'] = self.module.params['name'] or None + + def __quit_module__(self): + self.vars.set('value', self._get()['out']) + + def _process_command_output(self, fail_on_err, ignore_err_msg=""): + def process(rc, out, err): + if fail_on_err and rc != 0 and err and ignore_err_msg not in err: + self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err)) + out = out.rstrip() + return None if out == "" else out + return process + + def _get(self): + with self.runner('cli_action state name') as ctx: + result = ctx.run(cli_action="resource", state='status') + return dict([('rc', result[0]), + ('out', result[1] if result[1] != "" else None), + ('err', result[2])]) + + def state_absent(self): + force = get_pacemaker_maintenance_mode(self.runner) + with self.runner('cli_action state name force', output_process=self._process_command_output(True, "does not exist"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource', force=force) + + def state_present(self): + with self.runner( + 'cli_action state name resource_type resource_option resource_operation resource_meta resource_argument wait', + output_process=self._process_command_output(not get_pacemaker_maintenance_mode(self.runner), "already exists"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') + + def state_enabled(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Starting"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') + + def state_disabled(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') + + +def main(): + PacemakerResource.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packet_device.py b/plugins/modules/packet_device.py index 59851ea74b..f17db56c8c 100644 --- a/plugins/modules/packet_device.py +++ b/plugins/modules/packet_device.py @@ -111,8 +111,10 @@ options: state: description: - Desired state of the device. - - If set to V(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns. - - If set to V(active), the module call will block until all the specified devices are in state active due to the Packet API, or until O(wait_timeout). + - If set to V(present) (the default), the module call returns immediately after the device-creating HTTP request successfully + returns. + - If set to V(active), the module call blocks until all the specified devices are in state active due to the Packet + API, or until O(wait_timeout). choices: [present, absent, active, inactive, rebooted] default: present type: str @@ -125,15 +127,16 @@ options: wait_for_public_IPv: description: - Whether to wait for the instance to be assigned a public IPv4/IPv6 address. - - If set to 4, it will wait until IPv4 is assigned to the instance. - - If set to 6, wait until public IPv6 is assigned to the instance. + - If set to V(4), it waits until IPv4 is assigned to the instance. + - If set to V(6), it waits until public IPv6 is assigned to the instance. choices: [4, 6] type: int wait_timeout: description: - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the V(active) state. - - If O(wait_for_public_IPv) is set and O(state=active), the module will wait for both events consequently, applying the timeout twice. + - If O(wait_for_public_IPv) is set and O(state=active), the module waits for both events consequently, applying the + timeout twice. default: 900 type: int @@ -255,26 +258,18 @@ EXAMPLES = r""" """ RETURN = r""" -changed: - description: True if a device was altered in any way (created, modified or removed). - type: bool - sample: true - returned: success - devices: - description: Information about each device that was processed + description: Information about each device that was processed. type: list sample: - - { - "hostname": "my-server.com", - "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7", - "public_ipv4": "147.229.15.12", - "private-ipv4": "10.0.15.12", - "tags": [], - "locked": false, - "state": "provisioning", - "public_ipv6": "2604:1380:2:5200::3" - } + - "hostname": "my-server.com" + "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7" + "public_ipv4": "147.229.15.12" + "private-ipv4": "10.0.15.12" + "tags": [] + "locked": false + "state": "provisioning" + "public_ipv6": "2604:1380:2:5200::3" returned: success """ @@ -419,12 +414,12 @@ def get_hostname_list(module): # at this point, hostnames is a list hostnames = [h.strip() for h in hostnames] - if (len(hostnames) > 1) and (count > 1): + if len(hostnames) > 1 and count > 1: _msg = ("If you set count>1, you should only specify one hostname " "with the %d formatter, not a list of hostnames.") raise Exception(_msg) - if (len(hostnames) == 1) and (count > 0): + if len(hostnames) == 1 and count > 0: hostname_spec = hostnames[0] count_range = range(count_offset, count_offset + count) if re.search(r"%\d{0,2}d", hostname_spec): diff --git a/plugins/modules/packet_ip_subnet.py b/plugins/modules/packet_ip_subnet.py index 04d4aede11..0029623a10 100644 --- a/plugins/modules/packet_ip_subnet.py +++ b/plugins/modules/packet_ip_subnet.py @@ -75,10 +75,11 @@ options: state: description: - Desired state of the IP subnet on the specified device. - - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR will then be assigned to the specified - device. - - With O(state=absent), you can specify either O(hostname) or O(device_id). The subnet will be removed from specified devices. - - If you leave both O(hostname) and O(device_id) empty, the subnet will be removed from any device it is assigned to. + - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR is then assigned + to the specified device. + - With O(state=absent), you can specify either O(hostname) or O(device_id). The subnet is then removed from specified + devices. + - If you leave both O(hostname) and O(device_id) empty, the subnet is then removed from any device it is assigned to. choices: ['present', 'absent'] default: 'present' type: str @@ -122,12 +123,6 @@ EXAMPLES = r""" """ RETURN = r""" -changed: - description: True if an IP address assignments were altered in any way (created or removed). - type: bool - sample: true - returned: success - device_id: type: str description: UUID of the device associated with the specified IP address. diff --git a/plugins/modules/packet_project.py b/plugins/modules/packet_project.py index d61c9e598b..afadec36be 100644 --- a/plugins/modules/packet_project.py +++ b/plugins/modules/packet_project.py @@ -110,12 +110,6 @@ EXAMPLES = r""" """ RETURN = r""" -changed: - description: True if a project was created or removed. - type: bool - sample: true - returned: success - name: description: Name of addressed project. type: str diff --git a/plugins/modules/packet_sshkey.py b/plugins/modules/packet_sshkey.py index 8172482108..ec76a17b4c 100644 --- a/plugins/modules/packet_sshkey.py +++ b/plugins/modules/packet_sshkey.py @@ -35,7 +35,7 @@ options: type: str label: description: - - Label for the key. If you keep it empty, it will be read from key string. + - Label for the key. If you keep it empty, it is read from key string. type: str aliases: [name] id: @@ -85,15 +85,11 @@ EXAMPLES = r""" """ RETURN = r""" -changed: - description: True if a sshkey was created or removed. - type: bool - sample: true - returned: always sshkeys: - description: Information about sshkeys that were created/removed. - type: list - sample: [ + description: Information about sshkeys that were created/removed. + type: list + sample: + [ { "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", @@ -101,7 +97,7 @@ sshkeys: "label": "mynewkey33" } ] - returned: always + returned: always """ import os diff --git a/plugins/modules/packet_volume_attachment.py b/plugins/modules/packet_volume_attachment.py index 0423cc879d..7537c1c3fe 100644 --- a/plugins/modules/packet_volume_attachment.py +++ b/plugins/modules/packet_volume_attachment.py @@ -18,8 +18,8 @@ short_description: Attach/detach a volume to a device in the Packet host description: - Attach/detach a volume to a device in the Packet host. - API is documented at U(https://www.packet.com/developers/api/volumes/). - - This module creates the attachment route in the Packet API. In order to discover the block devices on the server, you have to run the Attach - Scripts, as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux). + - This module creates the attachment route in the Packet API. In order to discover the block devices on the server, you + have to run the Attach Scripts, as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux). version_added: '0.2.0' author: diff --git a/plugins/modules/pacman.py b/plugins/modules/pacman.py index a4a9370ae0..359cbc51d1 100644 --- a/plugins/modules/pacman.py +++ b/plugins/modules/pacman.py @@ -32,7 +32,8 @@ attributes: options: name: description: - - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. Cannot be used in combination with O(upgrade). + - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. Cannot be used in combination with + O(upgrade). aliases: [package, pkg] type: list elements: str @@ -40,9 +41,9 @@ options: state: description: - Whether to install (V(present) or V(installed), V(latest)), or remove (V(absent) or V(removed)) a package. - - V(present) and V(installed) will simply ensure that a desired package is installed. - - V(latest) will update the specified package if it is not of the latest available version. - - V(absent) and V(removed) will remove the specified package. + - V(present) and V(installed) simply ensure that a desired package is installed. + - V(latest) updates the specified package if it is not of the latest available version. + - V(absent) and V(removed) remove the specified package. default: present choices: [absent, installed, latest, present, removed] type: str @@ -50,7 +51,8 @@ options: force: description: - When removing packages, forcefully remove them, without any checks. Same as O(extra_args="--nodeps --nodeps"). - - When combined with O(update_cache), force a refresh of all package databases. Same as O(update_cache_extra_args="--refresh --refresh"). + - When combined with O(update_cache), force a refresh of all package databases. Same as O(update_cache_extra_args="--refresh + --refresh"). default: false type: bool @@ -64,8 +66,8 @@ options: executable: description: - Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper. - - Pacman compatibility is unfortunately ill defined, in particular, this modules makes extensive use of the C(--print-format) directive - which is known not to be implemented by some AUR helpers (notably, C(yay)). + - Pacman compatibility is unfortunately ill defined, in particular, this modules makes extensive use of the C(--print-format) + directive which is known not to be implemented by some AUR helpers (notably, C(yay)). - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. default: pacman type: str @@ -82,8 +84,8 @@ options: - Whether or not to refresh the master package lists. - This can be run as part of a package installation or as a separate step. - If not specified, it defaults to V(false). - - Please note that this option only had an influence on the module's C(changed) state if O(name) and O(upgrade) are not specified before - community.general 5.0.0. See the examples for how to keep the old behavior. + - Please note that this option only had an influence on the module's C(changed) state if O(name) and O(upgrade) are + not specified before community.general 5.0.0. See the examples for how to keep the old behavior. type: bool update_cache_extra_args: @@ -114,31 +116,33 @@ options: reason_for: description: - Set the install reason for V(all) packages or only for V(new) packages. - - In case of O(state=latest) already installed packages which will be updated to a newer version are not counted as V(new). + - In case of O(state=latest) already installed packages which are updated to a newer version are not counted as V(new). default: new choices: [all, new] type: str version_added: 5.4.0 notes: - - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) - option. - - To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand. For example, a dedicated build user with - permissions to install packages could be necessary. - - 'In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages with the error: C(error: target not - found: ).' + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. + - To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand. For example, a dedicated + build user with permissions to install packages could be necessary. + - 'In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages with the error: + C(error: target not found: ).' + - The common return values `stdout` and `stderr` are returned upon success, when needed, since community.general 4.1.0. """ RETURN = r""" packages: description: - A list of packages that have been changed. - - Before community.general 4.5.0 this was only returned when O(upgrade=true). In community.general 4.5.0, it was sometimes omitted when the - package list is empty, but since community.general 4.6.0 it is always returned when O(name) is specified or O(upgrade=true). + - Before community.general 4.5.0 this was only returned when O(upgrade=true). In community.general 4.5.0, it was sometimes + omitted when the package list is empty, but since community.general 4.6.0 it is always returned when O(name) is specified + or O(upgrade=true). returned: success and O(name) is specified or O(upgrade=true) type: list elements: str - sample: [package, other-package] + sample: ["package", "other-package"] cache_updated: description: @@ -148,22 +152,6 @@ cache_updated: type: bool sample: false version_added: 4.6.0 - -stdout: - description: - - Output from pacman. - returned: success, when needed - type: str - sample: ":: Synchronizing package databases... core is up to date :: Starting full system upgrade..." - version_added: 4.1.0 - -stderr: - description: - - Error output from pacman. - returned: success, when needed - type: str - sample: "warning: libtool: local (2.4.6+44+gb9b44533-14) is newer than core (2.4.6+42+gb88cebd5-15)\nwarning ..." - version_added: 4.1.0 """ EXAMPLES = r""" diff --git a/plugins/modules/pacman_key.py b/plugins/modules/pacman_key.py index f98fb6f8a3..851655f9fc 100644 --- a/plugins/modules/pacman_key.py +++ b/plugins/modules/pacman_key.py @@ -18,9 +18,9 @@ description: - Add or remove gpg keys from the pacman keyring. notes: - Use full-length key ID (40 characters). - - Keys will be verified when using O(data), O(file), or O(url) unless O(verify) is overridden. - - Keys will be locally signed after being imported into the keyring. - - If the key ID exists in the keyring, the key will not be added unless O(force_update) is specified. + - Keys are verified when using O(data), O(file), or O(url) unless O(verify) is overridden. + - Keys are locally signed after being imported into the keyring. + - If the key ID exists in the keyring, the key is not added unless O(force_update) is specified. - O(data), O(file), O(url), and O(keyserver) are mutually exclusive. requirements: - gpg @@ -72,16 +72,22 @@ options: keyring: description: - The full path to the keyring folder on the remote server. - - If not specified, module will use pacman's default (V(/etc/pacman.d/gnupg)). + - If not specified, module uses pacman's default (V(/etc/pacman.d/gnupg)). - Useful if the remote system requires an alternative gnupg directory. type: path default: /etc/pacman.d/gnupg state: description: - - Ensures that the key is present (added) or absent (revoked). + - Ensures that the key is V(present) (added) or V(absent) (revoked). default: present choices: [absent, present] type: str + ensure_trusted: + description: + - Ensure that the key is trusted (signed by the Pacman machine key and not expired). + type: bool + default: false + version_added: 11.0.0 """ EXAMPLES = r""" @@ -129,12 +135,55 @@ from ansible.module_utils.urls import fetch_url from ansible.module_utils.common.text.converters import to_native +class GpgListResult(object): + """Wraps gpg --list-* output.""" + + def __init__(self, line): + self._parts = line.split(':') + + @property + def kind(self): + return self._parts[0] + + @property + def valid(self): + return self._parts[1] + + @property + def is_fully_valid(self): + return self.valid == 'f' + + @property + def key(self): + return self._parts[4] + + @property + def user_id(self): + return self._parts[9] + + +def gpg_get_first_attr_of_kind(lines, kind, attr): + for line in lines: + glr = GpgListResult(line) + if glr.kind == kind: + return getattr(glr, attr) + + +def gpg_get_all_attrs_of_kind(lines, kind, attr): + result = [] + for line in lines: + glr = GpgListResult(line) + if glr.kind == kind: + result.append(getattr(glr, attr)) + return result + + class PacmanKey(object): def __init__(self, module): self.module = module # obtain binary paths for gpg & pacman-key - self.gpg = module.get_bin_path('gpg', required=True) - self.pacman_key = module.get_bin_path('pacman-key', required=True) + self.gpg_binary = module.get_bin_path('gpg', required=True) + self.pacman_key_binary = module.get_bin_path('pacman-key', required=True) # obtain module parameters keyid = module.params['id'] @@ -146,47 +195,71 @@ class PacmanKey(object): force_update = module.params['force_update'] keyring = module.params['keyring'] state = module.params['state'] + ensure_trusted = module.params['ensure_trusted'] self.keylength = 40 # sanitise key ID & check if key exists in the keyring keyid = self.sanitise_keyid(keyid) - key_present = self.key_in_keyring(keyring, keyid) + key_validity = self.key_validity(keyring, keyid) + key_present = len(key_validity) > 0 + key_valid = any(key_validity) # check mode if module.check_mode: - if state == "present": + if state == 'present': changed = (key_present and force_update) or not key_present + if not changed and ensure_trusted: + changed = not (key_valid and self.key_is_trusted(keyring, keyid)) module.exit_json(changed=changed) - elif state == "absent": - if key_present: - module.exit_json(changed=True) - module.exit_json(changed=False) + if state == 'absent': + module.exit_json(changed=key_present) - if state == "present": - if key_present and not force_update: + if state == 'present': + trusted = key_valid and self.key_is_trusted(keyring, keyid) + if not force_update and key_present and (not ensure_trusted or trusted): module.exit_json(changed=False) - + changed = False if data: file = self.save_key(data) self.add_key(keyring, file, keyid, verify) - module.exit_json(changed=True) + changed = True elif file: self.add_key(keyring, file, keyid, verify) - module.exit_json(changed=True) + changed = True elif url: data = self.fetch_key(url) file = self.save_key(data) self.add_key(keyring, file, keyid, verify) - module.exit_json(changed=True) + changed = True elif keyserver: self.recv_key(keyring, keyid, keyserver) - module.exit_json(changed=True) - elif state == "absent": + changed = True + if changed or (ensure_trusted and not trusted): + self.lsign_key(keyring=keyring, keyid=keyid) + changed = True + module.exit_json(changed=changed) + elif state == 'absent': if key_present: self.remove_key(keyring, keyid) module.exit_json(changed=True) module.exit_json(changed=False) + def gpg(self, args, keyring=None, **kwargs): + cmd = [self.gpg_binary] + if keyring: + cmd.append('--homedir={keyring}'.format(keyring=keyring)) + cmd.extend(['--no-permission-warning', '--with-colons', '--quiet', '--batch', '--no-tty']) + return self.module.run_command(cmd + args, **kwargs) + + def pacman_key(self, args, keyring, **kwargs): + return self.module.run_command( + [self.pacman_key_binary, '--gpgdir', keyring] + args, + **kwargs) + + def pacman_machine_key(self, keyring): + unused_rc, stdout, unused_stderr = self.gpg(['--list-secret-key'], keyring=keyring) + return gpg_get_first_attr_of_kind(stdout.splitlines(), 'sec', 'key') + def is_hexadecimal(self, string): """Check if a given string is valid hexadecimal""" try: @@ -216,14 +289,11 @@ class PacmanKey(object): def recv_key(self, keyring, keyid, keyserver): """Receives key via keyserver""" - cmd = [self.pacman_key, '--gpgdir', keyring, '--keyserver', keyserver, '--recv-keys', keyid] - self.module.run_command(cmd, check_rc=True) - self.lsign_key(keyring, keyid) + self.pacman_key(['--keyserver', keyserver, '--recv-keys', keyid], keyring=keyring, check_rc=True) def lsign_key(self, keyring, keyid): """Locally sign key""" - cmd = [self.pacman_key, '--gpgdir', keyring] - self.module.run_command(cmd + ['--lsign-key', keyid], check_rc=True) + self.pacman_key(['--lsign-key', keyid], keyring=keyring, check_rc=True) def save_key(self, data): "Saves key data to a temporary file" @@ -238,14 +308,11 @@ class PacmanKey(object): """Add key to pacman's keyring""" if verify: self.verify_keyfile(keyfile, keyid) - cmd = [self.pacman_key, '--gpgdir', keyring, '--add', keyfile] - self.module.run_command(cmd, check_rc=True) - self.lsign_key(keyring, keyid) + self.pacman_key(['--add', keyfile], keyring=keyring, check_rc=True) def remove_key(self, keyring, keyid): """Remove key from pacman's keyring""" - cmd = [self.pacman_key, '--gpgdir', keyring, '--delete', keyid] - self.module.run_command(cmd, check_rc=True) + self.pacman_key(['--delete', keyid], keyring=keyring, check_rc=True) def verify_keyfile(self, keyfile, keyid): """Verify that keyfile matches the specified key ID""" @@ -254,48 +321,29 @@ class PacmanKey(object): elif keyid is None: self.module.fail_json(msg="expected a key ID, got none") - rc, stdout, stderr = self.module.run_command( - [ - self.gpg, - '--with-colons', - '--with-fingerprint', - '--batch', - '--no-tty', - '--show-keys', - keyfile - ], + rc, stdout, stderr = self.gpg( + ['--with-fingerprint', '--show-keys', keyfile], check_rc=True, ) - extracted_keyid = None - for line in stdout.splitlines(): - if line.startswith('fpr:'): - extracted_keyid = line.split(':')[9] - break - + extracted_keyid = gpg_get_first_attr_of_kind(stdout.splitlines(), 'fpr', 'user_id') if extracted_keyid != keyid: self.module.fail_json(msg="key ID does not match. expected %s, got %s" % (keyid, extracted_keyid)) - def key_in_keyring(self, keyring, keyid): - "Check if the key ID is in pacman's keyring" - rc, stdout, stderr = self.module.run_command( - [ - self.gpg, - '--with-colons', - '--batch', - '--no-tty', - '--no-default-keyring', - '--keyring=%s/pubring.gpg' % keyring, - '--list-keys', keyid - ], - check_rc=False, - ) + def key_validity(self, keyring, keyid): + "Check if the key ID is in pacman's keyring and not expired" + rc, stdout, stderr = self.gpg(['--no-default-keyring', '--list-keys', keyid], keyring=keyring, check_rc=False) if rc != 0: if stderr.find("No public key") >= 0: - return False + return [] else: self.module.fail_json(msg="gpg returned an error: %s" % stderr) - return True + return gpg_get_all_attrs_of_kind(stdout.splitlines(), 'uid', 'is_fully_valid') + + def key_is_trusted(self, keyring, keyid): + """Check if key is signed and not expired.""" + unused_rc, stdout, unused_stderr = self.gpg(['--check-signatures', keyid], keyring=keyring) + return self.pacman_machine_key(keyring) in gpg_get_all_attrs_of_kind(stdout.splitlines(), 'sig', 'key') def main(): @@ -309,6 +357,7 @@ def main(): verify=dict(type='bool', default=True), force_update=dict(type='bool', default=False), keyring=dict(type='path', default='/etc/pacman.d/gnupg'), + ensure_trusted=dict(type='bool', default=False), state=dict(type='str', default='present', choices=['absent', 'present']), ), supports_check_mode=True, diff --git a/plugins/modules/pagerduty.py b/plugins/modules/pagerduty.py index 8d83374c34..78443e8410 100644 --- a/plugins/modules/pagerduty.py +++ b/plugins/modules/pagerduty.py @@ -13,7 +13,7 @@ DOCUMENTATION = r""" module: pagerduty short_description: Create PagerDuty maintenance windows description: - - This module will let you create PagerDuty maintenance windows. + - This module lets you create PagerDuty maintenance windows. author: - "Andrew Newdigate (@suprememoocow)" - "Dylan Silva (@thaumos)" @@ -79,7 +79,8 @@ options: default: Created by Ansible validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool default: true """ @@ -202,7 +203,7 @@ class PagerDutyRequest(object): return False, json_out, True def _create_services_payload(self, service): - if (isinstance(service, list)): + if isinstance(service, list): return [{'id': s, 'type': 'service_reference'} for s in service] else: return [{'id': service, 'type': 'service_reference'}] @@ -241,15 +242,15 @@ def main(): module = AnsibleModule( argument_spec=dict( state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']), - name=dict(required=False), - user=dict(required=False), + name=dict(), + user=dict(), token=dict(required=True, no_log=True), - service=dict(required=False, type='list', elements='str', aliases=["services"]), - window_id=dict(required=False), - requester_id=dict(required=False), - hours=dict(default='1', required=False), # @TODO change to int? - minutes=dict(default='0', required=False), # @TODO change to int? - desc=dict(default='Created by Ansible', required=False), + service=dict(type='list', elements='str', aliases=["services"]), + window_id=dict(), + requester_id=dict(), + hours=dict(default='1'), # @TODO change to int? + minutes=dict(default='0'), # @TODO change to int? + desc=dict(default='Created by Ansible'), validate_certs=dict(default=True, type='bool'), ) ) diff --git a/plugins/modules/pagerduty_alert.py b/plugins/modules/pagerduty_alert.py index 050dcd17e9..e3d93e8718 100644 --- a/plugins/modules/pagerduty_alert.py +++ b/plugins/modules/pagerduty_alert.py @@ -12,7 +12,7 @@ DOCUMENTATION = r""" module: pagerduty_alert short_description: Trigger, acknowledge or resolve PagerDuty incidents description: - - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events. + - This module lets you trigger, acknowledge or resolve a PagerDuty incident by sending events. author: - "Amanpreet Singh (@ApsOps)" - "Xiao Shen (@xshen1)" @@ -43,7 +43,7 @@ options: service_id: type: str description: - - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved. + - ID of PagerDuty service when incidents are triggered, acknowledged or resolved. - Required if O(api_version=v1). service_key: type: str @@ -73,68 +73,58 @@ options: type: str description: - The name of the monitoring client that is triggering this event. - required: false client_url: type: str description: - The URL of the monitoring client that is triggering this event. - required: false component: type: str description: - Component of the source machine that is responsible for the event, for example C(mysql) or C(eth0). - required: false version_added: 7.4.0 custom_details: type: dict description: - Additional details about the event and affected system. - A dictionary with custom keys and values. - required: false version_added: 7.4.0 desc: type: str description: - - For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will - be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The - maximum length is 1024 characters. - - For O(state=acknowledged) or O(state=resolved) - Text that will appear in the incident's log associated with this event. - required: false + - For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated + version) is used when generating phone calls, SMS messages and alert emails. It also appears on the incidents tables + in the PagerDuty UI. The maximum length is 1024 characters. + - For O(state=acknowledged) or O(state=resolved) - Text that appears in the incident's log associated with this event. default: Created via Ansible incident_class: type: str description: - The class/type of the event, for example C(ping failure) or C(cpu load). - required: false version_added: 7.4.0 incident_key: type: str description: - Identifies the incident to which this O(state) should be applied. - - For O(state=triggered) - If there is no open (in other words unresolved) incident with this key, a new one will be created. If there is already an - open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to 'de-dup' - problem reports. If no O(incident_key) is provided, then it will be generated by PagerDuty. - - For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident was first opened - by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. - required: false + - For O(state=triggered) - If there is no open (in other words unresolved) incident with this key, a new one is created. + If there is already an open incident with a matching key, this event is appended to that incident's log. The event + key provides an easy way to 'de-dup' problem reports. If no O(incident_key) is provided, then it is generated by PagerDuty. + - For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident + was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents is discarded. link_url: type: str description: - Relevant link URL to the alert. For example, the website or the job link. - required: false version_added: 7.4.0 link_text: type: str description: - A short description of the O(link_url). - required: false version_added: 7.4.0 source: type: str description: - The unique location of the affected system, preferably a hostname or FQDN. - Required in case of O(state=trigger) and O(api_version=v2). - required: false version_added: 7.4.0 severity: type: str @@ -330,25 +320,25 @@ def send_event_v2(module, service_key, event_type, payload, link, def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=False), - api_key=dict(required=False, no_log=True), - integration_key=dict(required=False, no_log=True), - service_id=dict(required=False), - service_key=dict(required=False, no_log=True), + name=dict(), + api_key=dict(no_log=True), + integration_key=dict(no_log=True), + service_id=dict(), + service_key=dict(no_log=True), state=dict( required=True, choices=['triggered', 'acknowledged', 'resolved'] ), api_version=dict(type='str', default='v1', choices=['v1', 'v2']), - client=dict(required=False), - client_url=dict(required=False), - component=dict(required=False), - custom_details=dict(required=False, type='dict'), - desc=dict(required=False, default='Created via Ansible'), - incident_class=dict(required=False), - incident_key=dict(required=False, no_log=False), - link_url=dict(required=False), - link_text=dict(required=False), - source=dict(required=False), + client=dict(), + client_url=dict(), + component=dict(), + custom_details=dict(type='dict'), + desc=dict(default='Created via Ansible'), + incident_class=dict(), + incident_key=dict(no_log=False), + link_url=dict(), + link_text=dict(), + source=dict(), severity=dict( default='critical', choices=['critical', 'warning', 'error', 'info'] ), diff --git a/plugins/modules/pagerduty_change.py b/plugins/modules/pagerduty_change.py index 39353f7575..de77016969 100644 --- a/plugins/modules/pagerduty_change.py +++ b/plugins/modules/pagerduty_change.py @@ -13,8 +13,8 @@ module: pagerduty_change short_description: Track a code or infrastructure change as a PagerDuty change event version_added: 1.3.0 description: - - This module will let you create a PagerDuty change event each time the module is run. - - This is not an idempotent action and a new change event will be created each time it is run. + - This module lets you create a PagerDuty change event each time the module is run. + - This is not an idempotent action and a new change event is created each time it is run. author: - Adam Vaughan (@adamvaughan) requirements: @@ -31,7 +31,8 @@ attributes: options: integration_key: description: - - The integration key that identifies the service the change was made to. This can be found by adding an integration to a service in PagerDuty. + - The integration key that identifies the service the change was made to. This can be found by adding an integration + to a service in PagerDuty. required: true type: str summary: @@ -81,8 +82,8 @@ options: type: str validate_certs: description: - - If V(false), SSL certificates for the target URL will not be validated. This should only be used on personally controlled sites using - self-signed certificates. + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. required: false default: true type: bool @@ -120,15 +121,14 @@ def main(): argument_spec=dict( integration_key=dict(required=True, type='str', no_log=True), summary=dict(required=True, type='str'), - source=dict(required=False, default='Ansible', type='str'), - user=dict(required=False, type='str'), - repo=dict(required=False, type='str'), - revision=dict(required=False, type='str'), - environment=dict(required=False, type='str'), - link_url=dict(required=False, type='str'), - link_text=dict(required=False, type='str'), - url=dict(required=False, - default='https://events.pagerduty.com/v2/change/enqueue', type='str'), + source=dict(default='Ansible', type='str'), + user=dict(type='str'), + repo=dict(type='str'), + revision=dict(type='str'), + environment=dict(type='str'), + link_url=dict(type='str'), + link_text=dict(type='str'), + url=dict(default='https://events.pagerduty.com/v2/change/enqueue', type='str'), validate_certs=dict(default=True, type='bool') ), supports_check_mode=True diff --git a/plugins/modules/pagerduty_user.py b/plugins/modules/pagerduty_user.py index e03342c792..0830af97f3 100644 --- a/plugins/modules/pagerduty_user.py +++ b/plugins/modules/pagerduty_user.py @@ -188,7 +188,7 @@ def main(): state=dict(type='str', default='present', choices=['present', 'absent']), pd_role=dict(type='str', default='responder', choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']), - pd_teams=dict(type='list', elements='str', required=False)), + pd_teams=dict(type='list', elements='str')), required_if=[['state', 'present', ['pd_teams']], ], supports_check_mode=True, ) diff --git a/plugins/modules/pam_limits.py b/plugins/modules/pam_limits.py index 516b61fec1..536ba59662 100644 --- a/plugins/modules/pam_limits.py +++ b/plugins/modules/pam_limits.py @@ -73,21 +73,24 @@ options: required: true backup: description: - - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. required: false type: bool default: false use_min: description: - - If set to V(true), the minimal value will be used or conserved. - - If the specified value is inferior to the value in the file, file content is replaced with the new value, else content is not modified. + - If set to V(true), the minimal value is used or conserved. + - If the specified value is inferior to the value in the file, file content is replaced with the new value, else content + is not modified. required: false type: bool default: false use_max: description: - - If set to V(true), the maximal value will be used or conserved. - - If the specified value is superior to the value in the file, file content is replaced with the new value, else content is not modified. + - If set to V(true), the maximal value is used or conserved. + - If the specified value is superior to the value in the file, file content is replaced with the new value, else content + is not modified. required: false type: bool default: false @@ -180,7 +183,7 @@ def main(): use_min=dict(default=False, type='bool'), backup=dict(default=False, type='bool'), dest=dict(default=limits_conf, type='str'), - comment=dict(required=False, default='', type='str') + comment=dict(default='', type='str') ), supports_check_mode=True, ) diff --git a/plugins/modules/pamd.py b/plugins/modules/pamd.py index 6502922bc1..327316aa37 100644 --- a/plugins/modules/pamd.py +++ b/plugins/modules/pamd.py @@ -16,7 +16,8 @@ author: short_description: Manage PAM Modules description: - Edit PAM service's type, control, module path and module arguments. - - In order for a PAM rule to be modified, the type, control and module_path must match an existing rule. See man(5) pam.d for details. + - In order for a PAM rule to be modified, the type, control and module_path must match an existing rule. See man(5) pam.d + for details. notes: - This module does not handle authselect profiles. extends_documentation_fragment: @@ -67,20 +68,20 @@ options: type: str module_arguments: description: - - When O(state=updated), the O(module_arguments) will replace existing module_arguments. - - When O(state=args_absent) args matching those listed in O(module_arguments) will be removed. + - When O(state=updated), the O(module_arguments) replace existing module_arguments. + - When O(state=args_absent) args matching those listed in O(module_arguments) are removed. - When O(state=args_present) any args listed in O(module_arguments) are added if missing from the existing rule. - - Furthermore, if the module argument takes a value denoted by C(=), the value will be changed to that specified in module_arguments. + - Furthermore, if the module argument takes a value denoted by C(=), the value changes to that specified in module_arguments. type: list elements: str state: description: - - The default of V(updated) will modify an existing rule if type, control and module_path all match an existing rule. - - With V(before), the new rule will be inserted before a rule matching type, control and module_path. - - Similarly, with V(after), the new rule will be inserted after an existing rulematching type, control and module_path. + - The default of V(updated) modifies an existing rule if type, control and module_path all match an existing rule. + - With V(before), the new rule is inserted before a rule matching type, control and module_path. + - Similarly, with V(after), the new rule is inserted after an existing rulematching type, control and module_path. - With either V(before) or V(after) O(new_type), O(new_control), and O(new_module_path) must all be specified. - - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) will be ignored. - - State V(absent) will remove the rule. + - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) are ignored. + - State V(absent) removes the rule. type: str choices: [absent, before, after, args_absent, args_present, updated] default: updated @@ -91,7 +92,8 @@ options: default: /etc/pam.d backup: description: - - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. type: bool default: false """ @@ -151,11 +153,7 @@ EXAMPLES = r""" type: auth control: required module_path: pam_faillock.so - module_arguments: 'preauth - silent - deny=3 - unlock_time=604800 - fail_interval=900' + module_arguments: 'preauth silent deny=3 unlock_time=604800 fail_interval=900' state: updated - name: Remove specific arguments from a rule diff --git a/plugins/modules/parted.py b/plugins/modules/parted.py index 43c34ff9e5..4bf0897afc 100644 --- a/plugins/modules/parted.py +++ b/plugins/modules/parted.py @@ -15,8 +15,8 @@ author: module: parted short_description: Configure block device partitions description: - - This module allows configuring block device partition using the C(parted) command line tool. For a full description of the fields and the - options check the GNU parted manual. + - This module allows configuring block device partition using the C(parted) command line tool. For a full description of + the fields and the options check the GNU parted manual. requirements: - This module requires C(parted) version 1.8.3 and above. - Option O(align) (except V(undefined)) requires C(parted) 2.1 or above. @@ -33,7 +33,8 @@ options: device: description: - The block device (disk) where to operate. - - Regular files can also be partitioned, but it is recommended to create a loopback device using C(losetup) to easily access its partitions. + - Regular files can also be partitioned, but it is recommended to create a loopback device using C(losetup) to easily + access its partitions. type: str required: true align: @@ -49,8 +50,8 @@ options: type: int unit: description: - - Selects the current default unit that Parted will use to display locations and capacities on the disk and to interpret those given by - the user if they are not suffixed by an unit. + - Selects the current default unit that Parted uses to display locations and capacities on the disk and to interpret + those given by the user if they are not suffixed by an unit. - When fetching information about a disk, it is recommended to always specify a unit. type: str choices: [s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact] @@ -58,7 +59,7 @@ options: label: description: - Disk label type or partition table to use. - - If O(device) already contains a different label, it will be changed to O(label) and any previous partitions will be lost. + - If O(device) already contains a different label, it is changed to O(label) and any previous partitions are lost. - A O(name) must be specified for a V(gpt) partition table. type: str choices: [aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun] @@ -72,17 +73,19 @@ options: default: primary part_start: description: - - Where the partition will start as offset from the beginning of the disk, that is, the "distance" from the start of the disk. Negative - numbers specify distance from the end of the disk. - - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for example V(10GiB), V(15%). + - Where the partition starts as offset from the beginning of the disk, that is, the "distance" from the start of the + disk. Negative numbers specify distance from the end of the disk. + - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for + example V(10GiB), V(15%). - Using negative values may require setting of O(fs_type) (see notes). type: str default: 0% part_end: description: - - Where the partition will end as offset from the beginning of the disk, that is, the "distance" from the start of the disk. Negative numbers - specify distance from the end of the disk. - - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for example V(10GiB), V(15%). + - Where the partition ends as offset from the beginning of the disk, that is, the "distance" from the start of the disk. + Negative numbers specify distance from the end of the disk. + - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for + example V(10GiB), V(15%). type: str default: 100% name: @@ -96,13 +99,13 @@ options: state: description: - Whether to create or delete a partition. - - If set to V(info) the module will only return the device information. + - If set to V(info) the module only returns the device information. type: str choices: [absent, present, info] default: info fs_type: description: - - If specified and the partition does not exist, will set filesystem type to given partition. + - If specified and the partition does not exist, sets filesystem type to given partition. - Parameter optional, but see notes below about negative O(part_start) values. type: str version_added: '0.2.0' @@ -114,10 +117,11 @@ options: version_added: '1.3.0' notes: - - When fetching information about a new disk and when the version of parted installed on the system is before version 3.1, the module queries - the kernel through C(/sys/) to obtain disk information. In this case the units CHS and CYL are not supported. - - Negative O(part_start) start values were rejected if O(fs_type) was not given. This bug was fixed in parted 3.2.153. If you want to use negative - O(part_start), specify O(fs_type) as well or make sure your system contains newer parted. + - When fetching information about a new disk and when the version of parted installed on the system is before version 3.1, + the module queries the kernel through C(/sys/) to obtain disk information. In this case the units CHS and CYL are not + supported. + - Negative O(part_start) start values were rejected if O(fs_type) was not given. This bug was fixed in parted 3.2.153. If + you want to use negative O(part_start), specify O(fs_type) as well or make sure your system contains newer parted. """ RETURN = r""" @@ -135,35 +139,31 @@ partition_info: script: description: Parted script executed by module. type: str - sample: { - "disk": { - "dev": "/dev/sdb", - "logical_block": 512, - "model": "VMware Virtual disk", - "physical_block": 512, - "size": 5.0, - "table": "msdos", - "unit": "gib" - }, - "partitions": [{ - "begin": 0.0, - "end": 1.0, - "flags": ["boot", "lvm"], - "fstype": "", - "name": "", - "num": 1, + sample: + "disk": + "dev": "/dev/sdb" + "logical_block": 512 + "model": "VMware Virtual disk" + "physical_block": 512 + "size": 5.0 + "table": "msdos" + "unit": "gib" + "partitions": + - "begin": 0.0 + "end": 1.0 + "flags": ["boot", "lvm"] + "fstype": "" + "name": "" + "num": 1 "size": 1.0 - }, { - "begin": 1.0, - "end": 5.0, - "flags": [], - "fstype": "", - "name": "", - "num": 2, + - "begin": 1.0 + "end": 5.0 + "flags": [] + "fstype": "" + "name": "" + "num": 2 "size": 4.0 - }], - "script": "unit KiB print " - } + "script": "unit KiB print " """ EXAMPLES = r""" @@ -583,11 +583,8 @@ def read_record(file_path, default=None): Reads the first line of a file and returns it. """ try: - f = open(file_path, 'r') - try: + with open(file_path, 'r') as f: return f.readline().strip() - finally: - f.close() except IOError: return default diff --git a/plugins/modules/pear.py b/plugins/modules/pear.py index e8f3d96305..5eb84b509d 100644 --- a/plugins/modules/pear.py +++ b/plugins/modules/pear.py @@ -45,14 +45,16 @@ options: - Path to the pear executable. prompts: description: - - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question. - - Prompts will be processed in the same order as the packages list. + - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected + question. + - Prompts are processed in the same order as the packages list. - You can optionally specify an answer to any question in the list. - - If no answer is provided, the list item will only contain the regular expression. - - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')." + - If no answer is provided, the list item must contain only the regular expression. + - "To specify an answer, the item must be a dictionary with the regular expression as key and the answer as value C(my_regular_expression: + 'an_answer')." - You can provide a list containing items with or without answer. - - A prompt list can be shorter or longer than the packages list but will issue a warning. - - If you want to specify that a package will not need prompts in the middle of a list, V(null). + - A prompt list can be shorter or longer than the packages list but it issues a warning. + - If you want to specify that a package does not need prompts in the middle of a list, V(null). type: list elements: raw version_added: 0.2.0 @@ -83,9 +85,9 @@ EXAMPLES = r""" prompts: - (.*)Enable internal debugging in APCu \[no\]: "yes" -- name: Install multiple pear/pecl packages at once with prompts. Prompts will be processed on the same order as the packages order. If there - is more prompts than packages, packages without prompts will be installed without any prompt expected. If there is more packages than prompts, - additional prompts will be ignored. +- name: Install multiple pear/pecl packages at once with prompts. Prompts will be processed on the same order as the packages + order. If there is more prompts than packages, packages without prompts will be installed without any prompt expected. + If there is more packages than prompts, additional prompts will be ignored. community.general.pear: name: pecl/gnupg, pecl/apcu state: present @@ -93,9 +95,9 @@ EXAMPLES = r""" - I am a test prompt because gnupg doesnt asks anything - (.*)Enable internal debugging in APCu \[no\]: "yes" -- name: Install multiple pear/pecl packages at once skipping the first prompt. Prompts will be processed on the same order as the packages order. - If there is more prompts than packages, packages without prompts will be installed without any prompt expected. If there is more packages - than prompts, additional prompts will be ignored. +- name: Install multiple pear/pecl packages at once skipping the first prompt. Prompts will be processed on the same order + as the packages order. If there is more prompts than packages, packages without prompts will be installed without any + prompt expected. If there is more packages than prompts, additional prompts will be ignored. community.general.pear: name: pecl/gnupg, pecl/apcu state: present @@ -291,8 +293,8 @@ def main(): argument_spec=dict( name=dict(aliases=['pkg'], required=True), state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']), - executable=dict(default=None, required=False, type='path'), - prompts=dict(default=None, required=False, type='list', elements='raw'), + executable=dict(type='path'), + prompts=dict(type='list', elements='raw'), ), supports_check_mode=True) diff --git a/plugins/modules/pids.py b/plugins/modules/pids.py index aea4d82d37..2db5dbfa23 100644 --- a/plugins/modules/pids.py +++ b/plugins/modules/pids.py @@ -9,8 +9,8 @@ __metaclass__ = type DOCUMENTATION = r""" module: pids -description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in - that name exists." +description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines. Returns an empty list + if no process in that name exists." short_description: Retrieves process IDs list if the process is running otherwise return empty list author: - Saranya Sridharan (@saranyasridharan) diff --git a/plugins/modules/pingdom.py b/plugins/modules/pingdom.py index 192dd244f2..7c82063ab9 100644 --- a/plugins/modules/pingdom.py +++ b/plugins/modules/pingdom.py @@ -12,7 +12,7 @@ DOCUMENTATION = r""" module: pingdom short_description: Pause/unpause Pingdom alerts description: - - This module will let you pause/unpause Pingdom alerts. + - This module lets you pause/unpause Pingdom alerts. author: - "Dylan Silva (@thaumos)" - "Justin Johns (!UNKNOWN)" @@ -132,10 +132,10 @@ def main(): passwd = module.params['passwd'] key = module.params['key'] - if (state == "paused" or state == "stopped"): + if state == "paused" or state == "stopped": (rc, name, result) = pause(checkid, uid, passwd, key) - if (state == "running" or state == "started"): + if state == "running" or state == "started": (rc, name, result) = unpause(checkid, uid, passwd, key) if rc != 0: diff --git a/plugins/modules/pip_package_info.py b/plugins/modules/pip_package_info.py index 0bc08e7ce1..80bdedf7fe 100644 --- a/plugins/modules/pip_package_info.py +++ b/plugins/modules/pip_package_info.py @@ -20,8 +20,8 @@ extends_documentation_fragment: options: clients: description: - - A list of the pip executables that will be used to get the packages. They can be supplied with the full path or just the executable name, - for example V(pip3.7). + - A list of the pip executables that are used to get the packages. They can be supplied with the full path or just the + executable name, for example V(pip3.7). default: ['pip'] required: false type: list @@ -59,37 +59,39 @@ packages: returned: always type: dict sample: - "packages": { + { + "packages": { "pip": { - "Babel": [ - { - "name": "Babel", - "source": "pip", - "version": "2.6.0" - } - ], - "Flask": [ - { - "name": "Flask", - "source": "pip", - "version": "1.0.2" - } - ], - "Flask-SQLAlchemy": [ - { - "name": "Flask-SQLAlchemy", - "source": "pip", - "version": "2.3.2" - } - ], - "Jinja2": [ - { - "name": "Jinja2", - "source": "pip", - "version": "2.10" - } - ], - }, + "Babel": [ + { + "name": "Babel", + "source": "pip", + "version": "2.6.0" + } + ], + "Flask": [ + { + "name": "Flask", + "source": "pip", + "version": "1.0.2" + } + ], + "Flask-SQLAlchemy": [ + { + "name": "Flask-SQLAlchemy", + "source": "pip", + "version": "2.3.2" + } + ], + "Jinja2": [ + { + "name": "Jinja2", + "source": "pip", + "version": "2.10" + } + ] + } + } } """ diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index aa4309ce6c..778810be0c 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -47,24 +47,35 @@ options: description: - Desired state for the application. - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively. - - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). It was added in community.general - 5.5.0. - - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0), make sure to have - a compatible version when using this option. These states have been added in community.general 9.4.0. + - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). It was added + in community.general 5.5.0. + - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0), + make sure to have a compatible version when using this option. These states have been added in community.general 9.4.0. name: type: str description: - - The name of the application. In C(pipx) documentation it is also referred to as the name of the virtual environment where the application - will be installed. - - If O(name) is a simple package name without version specifiers, then that name is used as the Python package name to be installed. - - Use O(source) for passing package specifications or installing from URLs or directories. + - The name of the application and also the name of the Python package being installed. + - In C(pipx) documentation it is also referred to as the name of the virtual environment where the application is installed. + - If O(name) is a simple package name without version specifiers, then that name is used as the Python package name + to be installed. + - Starting in community.general 10.7.0, you can use package specifiers when O(state=present) or O(state=install). For + example, O(name=tox<4.0.0) or O(name=tox>3.0.27). + - Please note that when you use O(state=present) and O(name) with version specifiers, contrary to the behavior of C(pipx), + this module honors the version specifier and installs a version of the application that satisfies it. If you want + to ensure the reinstallation of the application even when the version specifier is met, then you must use O(force=true), + or perhaps use O(state=upgrade) instead. + - Use O(source) for installing from URLs or directories. source: type: str description: - - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other states. - - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed file. + - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other + states. + - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed + file. - The value of this option is passed as-is to C(pipx). - - O(name) is still required when using O(source) to establish the application name without fetching the package from a remote source. + - O(name) is still required when using O(source) to establish the application name without fetching the package from + a remote source. + - The module is not idempotent when using O(source). install_apps: description: - Add apps from the injected packages. @@ -88,6 +99,7 @@ options: description: - Force modification of the application's virtual environment. See C(pipx) for details. - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject). + - The module is not idempotent when O(force=true). type: bool default: false include_injected: @@ -136,14 +148,14 @@ options: spec_metadata: description: - Spec metadata file for O(state=install_all). - - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info) with - O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output). + - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info) + with O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output). type: path version_added: 9.4.0 -notes: - - This first implementation does not verify whether a specified version constraint has been installed or not. Hence, when using version operators, - C(pipx) module will always try to execute the operation, even when the application was previously installed. This feature will be added in - the future. +requirements: + - When using O(name) with version specifiers, the Python package C(packaging) is required. + - If the package C(packaging) is at a version lesser than C(22.0.0), it fails silently when processing invalid specifiers, + like C(tox<<<<4.0). author: - "Alexei Znamensky (@russoz)" """ @@ -163,6 +175,12 @@ EXAMPLES = r""" name: tox state: upgrade +- name: Install or upgrade tox with dependency group 'docs' + community.general.pipx: + name: tox + source: tox[docs] + state: latest + - name: Reinstall black with specific Python version community.general.pipx: name: black @@ -197,7 +215,9 @@ version: from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper -from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_list +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict +from ansible_collections.community.general.plugins.module_utils.pkg_req import PackageRequirement +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible.module_utils.facts.compat import ansible_facts @@ -251,21 +271,15 @@ class PipX(StateModuleHelper): ), supports_check_mode=True, ) - use_old_vardict = False def _retrieve_installed(self): - name = _make_name(self.vars.name, self.vars.suffix) - output_process = make_process_list(self, include_injected=True, name=name) - installed = self.runner('_list global', output_process=output_process).run() + output_process = make_process_dict(include_injected=True) + installed, dummy = self.runner('_list global', output_process=output_process).run() - if name is not None: - app_list = [app for app in installed if app['name'] == name] - if app_list: - return {name: app_list[0]} - else: - return {} + if self.app_name is None: + return installed - return installed + return {k: v for k, v in installed.items() if k == self.app_name} def __init_module__(self): if self.vars.executable: @@ -275,12 +289,20 @@ class PipX(StateModuleHelper): self.command = [facts['python']['executable'], '-m', 'pipx'] self.runner = pipx_runner(self.module, self.command) + pkg_req = PackageRequirement(self.module, self.vars.name) + self.parsed_name = pkg_req.parsed_name + self.parsed_req = pkg_req.requirement + self.app_name = _make_name(self.parsed_name, self.vars.suffix) + self.vars.set('application', self._retrieve_installed(), change=True, diff=True) with self.runner("version") as ctx: rc, out, err = ctx.run() self.vars.version = out.strip() + if LooseVersion(self.vars.version) < LooseVersion("1.7.0"): + self.do_raise("The pipx tool must be at least at version 1.7.0") + def __quit_module__(self): self.vars.application = self._retrieve_installed() @@ -291,12 +313,27 @@ class PipX(StateModuleHelper): self.vars.set('run_info', ctx.run_info, verbosity=4) def state_install(self): - if not self.vars.application or self.vars.force: - self.changed = True - args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source' - with self.runner(args_order, check_mode_skip=True) as ctx: - ctx.run(name_source=[self.vars.name, self.vars.source]) - self._capture_results(ctx) + # If we have a version spec and no source, use the version spec as source + if self.parsed_req and not self.vars.source: + self.vars.source = self.vars.name + + if self.vars.application.get(self.app_name): + is_installed = True + version_match = self.vars.application[self.app_name]['version'] in self.parsed_req.specifier if self.parsed_req else True + force = self.vars.force or (not version_match) + else: + is_installed = False + version_match = False + force = self.vars.force + + if is_installed and version_match and not force: + return + + self.changed = True + args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source' + with self.runner(args_order, check_mode_skip=True) as ctx: + ctx.run(name_source=[self.parsed_name, self.vars.source], force=force) + self._capture_results(ctx) state_present = state_install @@ -378,12 +415,12 @@ class PipX(StateModuleHelper): def state_latest(self): if not self.vars.application or self.vars.force: self.changed = True - args_order = 'state index_url install_deps force python system_site_packages editable pip_args suffix name_source' + args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source' with self.runner(args_order, check_mode_skip=True) as ctx: ctx.run(state='install', name_source=[self.vars.name, self.vars.source]) self._capture_results(ctx) - with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: + with self.runner('state global include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: ctx.run(state='upgrade') self._capture_results(ctx) diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py index 91d2fdb21c..fc74df496a 100644 --- a/plugins/modules/pipx_info.py +++ b/plugins/modules/pipx_info.py @@ -115,7 +115,15 @@ cmd: returned: success type: list elements: str - sample: ["/usr/bin/python3.10", "-m", "pipx", "list", "--include-injected", "--json"] + sample: + [ + "/usr/bin/python3.10", + "-m", + "pipx", + "list", + "--include-injected", + "--json" + ] version: description: Version of pipx. @@ -126,7 +134,8 @@ version: """ from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper -from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_list +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible.module_utils.facts.compat import ansible_facts @@ -144,7 +153,6 @@ class PipXInfo(ModuleHelper): argument_spec=argument_spec, supports_check_mode=True, ) - use_old_vardict = False def __init_module__(self): if self.vars.executable: @@ -157,10 +165,24 @@ class PipXInfo(ModuleHelper): rc, out, err = ctx.run() self.vars.version = out.strip() + if LooseVersion(self.vars.version) < LooseVersion("1.7.0"): + self.do_raise("The pipx tool must be at least at version 1.7.0") + def __run__(self): - output_process = make_process_list(self, **self.vars.as_dict()) + output_process = make_process_dict(self.vars.include_injected, self.vars.include_deps) with self.runner('_list global', output_process=output_process) as ctx: - self.vars.application = ctx.run() + applications, raw_data = ctx.run() + if self.vars.include_raw: + self.vars.raw_output = raw_data + + if self.vars.name: + self.vars.application = [ + v + for k, v in applications.items() + if k == self.vars.name + ] + else: + self.vars.application = list(applications.values()) self._capture_results(ctx) def _capture_results(self, ctx): diff --git a/plugins/modules/pkg5_publisher.py b/plugins/modules/pkg5_publisher.py index 01c9d48cce..26abded4e2 100644 --- a/plugins/modules/pkg5_publisher.py +++ b/plugins/modules/pkg5_publisher.py @@ -16,7 +16,7 @@ author: "Peter Oliver (@mavit)" short_description: Manages Solaris 11 Image Packaging System publishers description: - IPS packages are the native packages in Solaris 11 and higher. - - This modules will configure which publishers a client will download IPS packages from. + - This module configures which publishers a client downloads IPS packages from. extends_documentation_fragment: - community.general.attributes attributes: diff --git a/plugins/modules/pkgin.py b/plugins/modules/pkgin.py index 51071aefc7..8695f1b5af 100644 --- a/plugins/modules/pkgin.py +++ b/plugins/modules/pkgin.py @@ -26,7 +26,8 @@ author: - "Shaun Zinck (@szinck)" - "Jasper Lievisse Adriaanse (@jasperla)" notes: - - 'Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package will be silently removed as well.' + - 'Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package is silently + removed as well.' extends_documentation_fragment: - community.general.attributes attributes: diff --git a/plugins/modules/pkgng.py b/plugins/modules/pkgng.py index ae333b492b..58eafb9e0c 100644 --- a/plugins/modules/pkgng.py +++ b/plugins/modules/pkgng.py @@ -30,7 +30,7 @@ options: name: description: - Name or list of names of packages to install/remove. - - With O(name=*), O(state=latest) will operate, but O(state=present) and O(state=absent) will be noops. + - With O(name=*), O(state=latest) operates, but O(state=present) and O(state=absent) are noops. required: true aliases: [pkg] type: list @@ -50,33 +50,34 @@ options: default: false annotation: description: - - A list of keyvalue-pairs of the form C(<+/-/:>[=]). A V(+) denotes adding an annotation, a V(-) denotes removing an annotation, - and V(:) denotes modifying an annotation. If setting or modifying annotations, a value must be provided. + - A list of keyvalue-pairs of the form C(<+/-/:>[=]). A V(+) denotes adding an annotation, a V(-) denotes + removing an annotation, and V(:) denotes modifying an annotation. If setting or modifying annotations, a value must + be provided. required: false type: list elements: str pkgsite: description: - - For C(pkgng) versions before 1.1.4, specify C(packagesite) to use for downloading packages. - If not specified, use settings from C(/usr/local/etc/pkg.conf). + - For C(pkgng) versions before 1.1.4, specify C(packagesite) to use for downloading packages. If not specified, use + settings from C(/usr/local/etc/pkg.conf). - For newer C(pkgng) versions, specify a the name of a repository configured in C(/usr/local/etc/pkg/repos). required: false type: str rootdir: description: - - For C(pkgng) versions 1.5 and later, pkg will install all packages within the specified root directory. + - For C(pkgng) versions 1.5 and later, pkg installs all packages within the specified root directory. - Can not be used together with O(chroot) or O(jail) options. required: false type: path chroot: description: - - Pkg will chroot in the specified environment. + - Pkg chroots in the specified environment. - Can not be used together with O(rootdir) or O(jail) options. required: false type: path jail: description: - - Pkg will execute in the given jail name or id. + - Pkg executes in the given jail name or ID. - Can not be used together with O(chroot) or O(rootdir) options. type: str autoremove: @@ -102,9 +103,9 @@ options: version_added: 9.3.0 author: "bleader (@bleader)" notes: - - When using pkgsite, be careful that already in cache packages will not be downloaded again. - - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) - option. + - When using pkgsite, be careful that already in cache packages are not downloaded again. + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. """ EXAMPLES = r""" @@ -421,17 +422,17 @@ def autoremove_packages(module, run_pkgng): def main(): module = AnsibleModule( argument_spec=dict( - state=dict(default="present", choices=["present", "latest", "absent"], required=False), + state=dict(default="present", choices=["present", "latest", "absent"]), name=dict(aliases=["pkg"], required=True, type='list', elements='str'), cached=dict(default=False, type='bool'), - ignore_osver=dict(default=False, required=False, type='bool'), - annotation=dict(required=False, type='list', elements='str'), - pkgsite=dict(required=False), - rootdir=dict(required=False, type='path'), - chroot=dict(required=False, type='path'), - jail=dict(required=False, type='str'), + ignore_osver=dict(default=False, type='bool'), + annotation=dict(type='list', elements='str'), + pkgsite=dict(), + rootdir=dict(type='path'), + chroot=dict(type='path'), + jail=dict(type='str'), autoremove=dict(default=False, type='bool'), - use_globs=dict(default=True, required=False, type='bool'), + use_globs=dict(default=True, type='bool'), ), supports_check_mode=True, mutually_exclusive=[["rootdir", "chroot", "jail"]]) diff --git a/plugins/modules/pkgutil.py b/plugins/modules/pkgutil.py index 8efc294d7a..a40bff06ec 100644 --- a/plugins/modules/pkgutil.py +++ b/plugins/modules/pkgutil.py @@ -17,7 +17,7 @@ module: pkgutil short_description: OpenCSW package management on Solaris description: - This module installs, updates and removes packages from the OpenCSW project for Solaris. - - Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies. + - Unlike the M(community.general.svr4pkg) module, it resolves and downloads dependencies. - See U(https://www.opencsw.org/) for more information about the project. author: - Alexander Winkler (@dermute) @@ -28,7 +28,8 @@ attributes: check_mode: support: full details: - - In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even in check mode. + - In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even + in check mode. diff_mode: support: none options: @@ -49,7 +50,7 @@ options: state: description: - Whether to install (V(present)/V(installed)), or remove (V(absent)/V(removed)) packages. - - The upgrade (V(latest)) operation will update/install the packages to the latest version available. + - The upgrade (V(latest)) operation updates/installs the packages to the latest version available. type: str required: true choices: [absent, installed, latest, present, removed] diff --git a/plugins/modules/pmem.py b/plugins/modules/pmem.py index fe414122ef..1555ec842e 100644 --- a/plugins/modules/pmem.py +++ b/plugins/modules/pmem.py @@ -14,7 +14,8 @@ module: pmem short_description: Configure Intel Optane Persistent Memory modules version_added: 4.5.0 description: - - This module allows Configuring Intel Optane Persistent Memory modules (PMem) using C(ipmctl) and C(ndctl) command line tools. + - This module allows Configuring Intel Optane Persistent Memory modules (PMem) using C(ipmctl) and C(ndctl) command line + tools. requirements: - C(ipmctl) and C(ndctl) command line tools - xmltodict @@ -29,7 +30,8 @@ options: appdirect: description: - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)). - - Create AppDirect capacity utilizing hardware interleaving across the requested PMem modules if applicable given the specified target. + - Create AppDirect capacity utilizing hardware interleaving across the requested PMem modules if applicable given the + specified target. - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100). type: int appdirect_interleaved: @@ -44,9 +46,9 @@ options: type: int reserved: description: - - Percentage of the capacity to reserve (V(0)-V(100)). O(reserved) will not be mapped into the system physical address space and will be - presented as reserved capacity with Show Device and Show Memory Resources Commands. - - O(reserved) will be set automatically if this is not configured. + - Percentage of the capacity to reserve (V(0)-V(100)). O(reserved) is not mapped into the system physical address space + and is presented as reserved capacity with Show Device and Show Memory Resources Commands. + - O(reserved) is set automatically if this is not configured. type: int required: false socket: @@ -100,8 +102,8 @@ options: choices: ['pmem', 'blk'] size: description: - - The size of namespace. This option supports the suffixes V(k) or V(K) or V(KB) for KiB, V(m) or V(M) or V(MB) for MiB, V(g) or V(G) - or V(GB) for GiB and V(t) or V(T) or V(TB) for TiB. + - The size of namespace. This option supports the suffixes V(k) or V(K) or V(KB) for KiB, V(m) or V(M) or V(MB) + for MiB, V(g) or V(G) or V(GB) for GiB and V(t) or V(T) or V(TB) for TiB. - This option is required if multiple namespaces are configured. - If this option is not set, all of the available space of a region is configured. type: str @@ -145,20 +147,21 @@ result: namespace: description: The list of the detail of namespace. type: list - sample: [ - { - "appdirect": 111669149696, - "memorymode": 970662608896, - "reserved": 3626500096, - "socket": 0 - }, - { - "appdirect": 111669149696, - "memorymode": 970662608896, - "reserved": 3626500096, - "socket": 1 - } - ] + sample: + [ + { + "appdirect": 111669149696, + "memorymode": 970662608896, + "reserved": 3626500096, + "socket": 0 + }, + { + "appdirect": 111669149696, + "memorymode": 970662608896, + "reserved": 3626500096, + "socket": 1 + } + ] """ EXAMPLES = r""" diff --git a/plugins/modules/pnpm.py b/plugins/modules/pnpm.py index c4dbf55dff..2dad63a608 100644 --- a/plugins/modules/pnpm.py +++ b/plugins/modules/pnpm.py @@ -77,14 +77,14 @@ options: production: description: - Install dependencies in production mode. - - Pnpm will ignore any dependencies under C(devDependencies) in package.json. + - Pnpm ignores any dependencies under C(devDependencies) in package.json. required: false type: bool default: false dev: description: - Install dependencies in development mode. - - Pnpm will ignore any regular dependencies in C(package.json). + - Pnpm ignores any regular dependencies in C(package.json). required: false default: false type: bool @@ -339,11 +339,11 @@ class Pnpm(object): def main(): arg_spec = dict( - name=dict(default=None), - alias=dict(default=None), - path=dict(default=None, type="path"), - version=dict(default=None), - executable=dict(default=None, type="path"), + name=dict(), + alias=dict(), + path=dict(type="path"), + version=dict(), + executable=dict(type="path"), ignore_scripts=dict(default=False, type="bool"), no_optional=dict(default=False, type="bool"), production=dict(default=False, type="bool"), diff --git a/plugins/modules/portage.py b/plugins/modules/portage.py index f0b7220836..8a00b934dd 100644 --- a/plugins/modules/portage.py +++ b/plugins/modules/portage.py @@ -173,7 +173,8 @@ options: loadavg: description: - - Specifies that no new builds should be started if there are other builds running and the load average is at least LOAD. + - Specifies that no new builds should be started if there are other builds running and the load average is at least + LOAD. - 'Since version 2.6: Value of 0 or False resets any previously added C(--load-average) setting values.' type: float @@ -192,7 +193,7 @@ options: quietfail: description: - Suppresses display of the build log on stdout (--quiet-fail). - - Only the die message and the path of the build log will be displayed on stdout. + - Only the die message and the path of the build log are displayed on stdout. type: bool default: false @@ -509,13 +510,13 @@ portage_absent_states = ['absent', 'unmerged', 'removed'] def main(): module = AnsibleModule( argument_spec=dict( - package=dict(type='list', elements='str', default=None, aliases=['name']), + package=dict(type='list', elements='str', aliases=['name']), state=dict( default=portage_present_states[0], choices=portage_present_states + portage_absent_states, ), update=dict(default=False, type='bool'), - backtrack=dict(default=None, type='int'), + backtrack=dict(type='int'), deep=dict(default=False, type='bool'), newuse=dict(default=False, type='bool'), changed_use=dict(default=False, type='bool'), @@ -524,18 +525,18 @@ def main(): nodeps=dict(default=False, type='bool'), onlydeps=dict(default=False, type='bool'), depclean=dict(default=False, type='bool'), - select=dict(default=None, type='bool'), + select=dict(type='bool'), quiet=dict(default=False, type='bool'), verbose=dict(default=False, type='bool'), - sync=dict(default=None, choices=['yes', 'web', 'no']), + sync=dict(choices=['yes', 'web', 'no']), getbinpkgonly=dict(default=False, type='bool'), getbinpkg=dict(default=False, type='bool'), usepkgonly=dict(default=False, type='bool'), usepkg=dict(default=False, type='bool'), keepgoing=dict(default=False, type='bool'), - jobs=dict(default=None, type='int'), - loadavg=dict(default=None, type='float'), - withbdeps=dict(default=None, type='bool'), + jobs=dict(type='int'), + loadavg=dict(type='float'), + withbdeps=dict(type='bool'), quietbuild=dict(default=False, type='bool'), quietfail=dict(default=False, type='bool'), ), diff --git a/plugins/modules/pritunl_org.py b/plugins/modules/pritunl_org.py index d97f9567cd..f87813031b 100644 --- a/plugins/modules/pritunl_org.py +++ b/plugins/modules/pritunl_org.py @@ -35,8 +35,9 @@ options: type: bool default: false description: - - If O(force) is V(true) and O(state) is V(absent), the module will delete the organization, no matter if it contains users or not. By default - O(force) is V(false), which will cause the module to fail the deletion of the organization when it contains users. + - If O(force) is V(true) and O(state) is V(absent), the module deletes the organization, no matter if it contains users + or not. By default O(force) is V(false), which causes the module to fail the deletion of the organization when it + contains users. state: type: str default: 'present' @@ -44,8 +45,8 @@ options: - present - absent description: - - If V(present), the module adds organization O(name) to Pritunl. If V(absent), attempt to delete the organization from Pritunl (please - read about O(force) usage). + - If V(present), the module adds organization O(name) to Pritunl. If V(absent), attempt to delete the organization from + Pritunl (please read about O(force) usage). """ EXAMPLES = r""" @@ -62,18 +63,18 @@ EXAMPLES = r""" RETURN = r""" response: - description: JSON representation of a Pritunl Organization. - returned: success - type: dict - sample: - { - "auth_api": false, - "name": "Foo", - "auth_token": null, - "user_count": 0, - "auth_secret": null, - "id": "csftwlu6uhralzi2dpmhekz3", - } + description: JSON representation of a Pritunl Organization. + returned: success + type: dict + sample: + { + "auth_api": false, + "name": "Foo", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "csftwlu6uhralzi2dpmhekz3" + } """ @@ -175,10 +176,8 @@ def main(): argument_spec.update( dict( name=dict(required=True, type="str", aliases=["org"]), - force=dict(required=False, type="bool", default=False), - state=dict( - required=False, choices=["present", "absent"], default="present" - ), + force=dict(type="bool", default=False), + state=dict(choices=["present", "absent"], default="present"), ) ) diff --git a/plugins/modules/pritunl_org_info.py b/plugins/modules/pritunl_org_info.py index dc198bc9cc..952acd8963 100644 --- a/plugins/modules/pritunl_org_info.py +++ b/plugins/modules/pritunl_org_info.py @@ -27,7 +27,7 @@ options: - org default: null description: - - Name of the Pritunl organization to search for. If none provided, the module will return all Pritunl organizations. + - Name of the Pritunl organization to search for. If none provided, the module returns all Pritunl organizations. """ EXAMPLES = r""" @@ -41,37 +41,37 @@ EXAMPLES = r""" RETURN = r""" organizations: - description: List of Pritunl organizations. - returned: success - type: list - elements: dict - sample: - [ - { - "auth_api": false, - "name": "FooOrg", - "auth_token": null, - "user_count": 0, - "auth_secret": null, - "id": "csftwlu6uhralzi2dpmhekz3", - }, - { - "auth_api": false, - "name": "MyOrg", - "auth_token": null, - "user_count": 3, - "auth_secret": null, - "id": "58070daee63f3b2e6e472c36", - }, - { - "auth_api": false, - "name": "BarOrg", - "auth_token": null, - "user_count": 0, - "auth_secret": null, - "id": "v1sncsxxybnsylc8gpqg85pg", - } - ] + description: List of Pritunl organizations. + returned: success + type: list + elements: dict + sample: + [ + { + "auth_api": false, + "name": "FooOrg", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "csftwlu6uhralzi2dpmhekz3" + }, + { + "auth_api": false, + "name": "MyOrg", + "auth_token": null, + "user_count": 3, + "auth_secret": null, + "id": "58070daee63f3b2e6e472c36" + }, + { + "auth_api": false, + "name": "BarOrg", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "v1sncsxxybnsylc8gpqg85pg" + } + ] """ from ansible.module_utils.basic import AnsibleModule @@ -113,7 +113,7 @@ def main(): argument_spec.update( dict( - organization=dict(required=False, type="str", default=None, aliases=["org"]) + organization=dict(type="str", aliases=["org"]) ) ) diff --git a/plugins/modules/pritunl_user.py b/plugins/modules/pritunl_user.py index 932c4c1322..45de07eba6 100644 --- a/plugins/modules/pritunl_user.py +++ b/plugins/modules/pritunl_user.py @@ -38,8 +38,8 @@ options: - present - absent description: - - If V(present), the module adds user O(user_name) to the Pritunl O(organization). If V(absent), removes the user O(user_name) from the - Pritunl O(organization). + - If V(present), the module adds user O(user_name) to the Pritunl O(organization). If V(absent), removes the user O(user_name) + from the Pritunl O(organization). user_name: type: str required: true @@ -115,35 +115,36 @@ EXAMPLES = r""" RETURN = r""" response: - description: JSON representation of Pritunl Users. - returned: success - type: dict - sample: - { - "audit": false, - "auth_type": "google", - "bypass_secondary": false, - "client_to_client": false, - "disabled": false, - "dns_mapping": null, - "dns_servers": null, - "dns_suffix": null, - "email": "foo@bar.com", - "gravatar": true, - "groups": [ - "foo", "bar" - ], - "id": "5d070dafe63q3b2e6s472c3b", - "name": "foo@acme.com", - "network_links": [], - "organization": "58070daee6sf342e6e4s2c36", - "organization_name": "Acme", - "otp_auth": true, - "otp_secret": "35H5EJA3XB2$4CWG", - "pin": false, - "port_forwarding": [], - "servers": [], - } + description: JSON representation of Pritunl Users. + returned: success + type: dict + sample: + { + "audit": false, + "auth_type": "google", + "bypass_secondary": false, + "client_to_client": false, + "disabled": false, + "dns_mapping": null, + "dns_servers": null, + "dns_suffix": null, + "email": "foo@bar.com", + "gravatar": true, + "groups": [ + "foo", + "bar" + ], + "id": "5d070dafe63q3b2e6s472c3b", + "name": "foo@acme.com", + "network_links": [], + "organization": "58070daee6sf342e6e4s2c36", + "organization_name": "Acme", + "otp_auth": true, + "otp_secret": "35H5EJA3XB2$4CWG", + "pin": false, + "port_forwarding": [], + "servers": [] + } """ @@ -319,18 +320,14 @@ def main(): argument_spec.update( dict( organization=dict(required=True, type="str", aliases=["org"]), - state=dict( - required=False, choices=["present", "absent"], default="present" - ), + state=dict(choices=["present", "absent"], default="present"), user_name=dict(required=True, type="str"), - user_type=dict( - required=False, choices=["client", "server"], default="client" - ), - user_email=dict(required=False, type="str", default=None), - user_groups=dict(required=False, type="list", elements="str", default=None), - user_disabled=dict(required=False, type="bool", default=None), - user_gravatar=dict(required=False, type="bool", default=None), - user_mac_addresses=dict(required=False, type="list", elements="str", default=None), + user_type=dict(choices=["client", "server"], default="client"), + user_email=dict(type="str"), + user_groups=dict(type="list", elements="str"), + user_disabled=dict(type="bool"), + user_gravatar=dict(type="bool"), + user_mac_addresses=dict(type="list", elements="str"), ) ) diff --git a/plugins/modules/pritunl_user_info.py b/plugins/modules/pritunl_user_info.py index 02d8512315..2e8180675a 100644 --- a/plugins/modules/pritunl_user_info.py +++ b/plugins/modules/pritunl_user_info.py @@ -58,38 +58,39 @@ EXAMPLES = r""" RETURN = r""" users: - description: List of Pritunl users. - returned: success - type: list - elements: dict - sample: - [ - { - "audit": false, - "auth_type": "google", - "bypass_secondary": false, - "client_to_client": false, - "disabled": false, - "dns_mapping": null, - "dns_servers": null, - "dns_suffix": null, - "email": "foo@bar.com", - "gravatar": true, - "groups": [ - "foo", "bar" - ], - "id": "5d070dafe63q3b2e6s472c3b", - "name": "foo@acme.com", - "network_links": [], - "organization": "58070daee6sf342e6e4s2c36", - "organization_name": "Acme", - "otp_auth": true, - "otp_secret": "35H5EJA3XB2$4CWG", - "pin": false, - "port_forwarding": [], - "servers": [], - } - ] + description: List of Pritunl users. + returned: success + type: list + elements: dict + sample: + [ + { + "audit": false, + "auth_type": "google", + "bypass_secondary": false, + "client_to_client": false, + "disabled": false, + "dns_mapping": null, + "dns_servers": null, + "dns_suffix": null, + "email": "foo@bar.com", + "gravatar": true, + "groups": [ + "foo", + "bar" + ], + "id": "5d070dafe63q3b2e6s472c3b", + "name": "foo@acme.com", + "network_links": [], + "organization": "58070daee6sf342e6e4s2c36", + "organization_name": "Acme", + "otp_auth": true, + "otp_secret": "35H5EJA3XB2$4CWG", + "pin": false, + "port_forwarding": [], + "servers": [] + } + ] """ from ansible.module_utils.basic import AnsibleModule @@ -150,12 +151,8 @@ def main(): argument_spec.update( dict( organization=dict(required=True, type="str", aliases=["org"]), - user_name=dict(required=False, type="str", default=None), - user_type=dict( - required=False, - choices=["client", "server"], - default="client", - ), + user_name=dict(type="str"), + user_type=dict(choices=["client", "server"], default="client"), ) ) diff --git a/plugins/modules/profitbricks.py b/plugins/modules/profitbricks.py deleted file mode 100644 index e912db6985..0000000000 --- a/plugins/modules/profitbricks.py +++ /dev/null @@ -1,664 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: profitbricks -short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine -description: - - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait - for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - auto_increment: - description: - - Whether or not to increment a single number in the name for created virtual machines. - type: bool - default: true - name: - description: - - The name of the virtual machine. - type: str - image: - description: - - The system image ID for creating the virtual machine, for example V(a3eae284-a2fe-11e4-b187-5f1f641608c8). - type: str - image_password: - description: - - Password set for the administrative user. - type: str - ssh_keys: - description: - - Public SSH keys allowing access to the virtual machine. - type: list - elements: str - default: [] - datacenter: - description: - - The datacenter to provision this virtual machine. - type: str - cores: - description: - - The number of CPU cores to allocate to the virtual machine. - default: 2 - type: int - ram: - description: - - The amount of memory to allocate to the virtual machine. - default: 2048 - type: int - cpu_family: - description: - - The CPU family type to allocate to the virtual machine. - type: str - default: AMD_OPTERON - choices: ["AMD_OPTERON", "INTEL_XEON"] - volume_size: - description: - - The size in GB of the boot volume. - type: int - default: 10 - bus: - description: - - The bus type for the volume. - type: str - default: VIRTIO - choices: ["IDE", "VIRTIO"] - instance_ids: - description: - - List of instance ids, currently only used when state='absent' to remove instances. - type: list - elements: str - default: [] - count: - description: - - The number of virtual machines to create. - type: int - default: 1 - location: - description: - - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored. - type: str - default: us/las - choices: ["us/las", "de/fra", "de/fkb"] - assign_public_ip: - description: - - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created. - type: bool - default: false - lan: - description: - - The ID of the LAN you wish to add the servers to. - type: int - default: 1 - subscription_user: - description: - - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable. - type: str - subscription_password: - description: - - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable. - type: str - wait: - description: - - Wait for the instance to be in state 'running' before returning. - type: bool - default: true - wait_timeout: - description: - - How long before wait gives up, in seconds. - type: int - default: 600 - remove_boot_volume: - description: - - Remove the bootVolume of the virtual machine you are destroying. - type: bool - default: true - state: - description: - - Create or terminate instances. - - 'The choices available are: V(running), V(stopped), V(absent), V(present).' - type: str - default: 'present' - disk_type: - description: - - The type of disk to be allocated. - type: str - choices: [SSD, HDD] - default: HDD - -requirements: - - "profitbricks" -author: Matt Baldwin (@baldwinSPC) -""" - -EXAMPLES = r""" -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Provisioning example -- name: Create three servers and enumerate their names - community.general.profitbricks: - datacenter: Tardis One - name: web%02d.stackpointcloud.com - cores: 4 - ram: 2048 - volume_size: 50 - cpu_family: INTEL_XEON - image: a3eae284-a2fe-11e4-b187-5f1f641608c8 - location: us/las - count: 3 - assign_public_ip: true - -- name: Remove virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: absent - -- name: Start virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: running - -- name: Stop virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: stopped -""" - -import re -import uuid -import time -import traceback - -HAS_PB_SDK = True - -try: - from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.common.text.converters import to_native - - -LOCATIONS = ['us/las', - 'de/fra', - 'de/fkb'] - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _create_machine(module, profitbricks, datacenter, name): - cores = module.params.get('cores') - ram = module.params.get('ram') - cpu_family = module.params.get('cpu_family') - volume_size = module.params.get('volume_size') - disk_type = module.params.get('disk_type') - image_password = module.params.get('image_password') - ssh_keys = module.params.get('ssh_keys') - bus = module.params.get('bus') - lan = module.params.get('lan') - assign_public_ip = module.params.get('assign_public_ip') - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - location = module.params.get('location') - image = module.params.get('image') - assign_public_ip = module.boolean(module.params.get('assign_public_ip')) - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - if assign_public_ip: - public_found = False - - lans = profitbricks.list_lans(datacenter) - for lan in lans['items']: - if lan['properties']['public']: - public_found = True - lan = lan['id'] - - if not public_found: - i = LAN( - name='public', - public=True) - - lan_response = profitbricks.create_lan(datacenter, i) - _wait_for_completion(profitbricks, lan_response, - wait_timeout, "_create_machine") - lan = lan_response['id'] - - v = Volume( - name=str(uuid.uuid4()).replace('-', '')[:10], - size=volume_size, - image=image, - image_password=image_password, - ssh_keys=ssh_keys, - disk_type=disk_type, - bus=bus) - - n = NIC( - lan=int(lan) - ) - - s = Server( - name=name, - ram=ram, - cores=cores, - cpu_family=cpu_family, - create_volumes=[v], - nics=[n], - ) - - try: - create_server_response = profitbricks.create_server( - datacenter_id=datacenter, server=s) - - _wait_for_completion(profitbricks, create_server_response, - wait_timeout, "create_virtual_machine") - - server_response = profitbricks.get_server( - datacenter_id=datacenter, - server_id=create_server_response['id'], - depth=3 - ) - except Exception as e: - module.fail_json(msg="failed to create the new server: %s" % str(e)) - else: - return server_response - - -def _startstop_machine(module, profitbricks, datacenter_id, server_id): - state = module.params.get('state') - - try: - if state == 'running': - profitbricks.start_server(datacenter_id, server_id) - else: - profitbricks.stop_server(datacenter_id, server_id) - - return True - except Exception as e: - module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e))) - - -def _create_datacenter(module, profitbricks): - datacenter = module.params.get('datacenter') - location = module.params.get('location') - wait_timeout = module.params.get('wait_timeout') - - i = Datacenter( - name=datacenter, - location=location - ) - - try: - datacenter_response = profitbricks.create_datacenter(datacenter=i) - - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "_create_datacenter") - - return datacenter_response - except Exception as e: - module.fail_json(msg="failed to create the new server(s): %s" % str(e)) - - -def create_virtual_machine(module, profitbricks): - """ - Create new virtual machine - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object - - Returns: - True if a new virtual machine was created, false otherwise - """ - datacenter = module.params.get('datacenter') - name = module.params.get('name') - auto_increment = module.params.get('auto_increment') - count = module.params.get('count') - lan = module.params.get('lan') - wait_timeout = module.params.get('wait_timeout') - failed = True - datacenter_found = False - - virtual_machines = [] - virtual_machine_ids = [] - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if datacenter_id: - datacenter_found = True - - if not datacenter_found: - datacenter_response = _create_datacenter(module, profitbricks) - datacenter_id = datacenter_response['id'] - - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "create_virtual_machine") - - if auto_increment: - numbers = set() - count_offset = 1 - - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message, exception=traceback.format_exc()) - - number_range = xrange(count_offset, count_offset + count + len(numbers)) - available_numbers = list(set(number_range).difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] - - # Prefetch a list of servers for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for name in names: - # Skip server creation if the server already exists. - if _get_server_id(server_list, name): - continue - - create_response = _create_machine(module, profitbricks, str(datacenter_id), name) - nics = profitbricks.list_nics(datacenter_id, create_response['id']) - for n in nics['items']: - if lan == n['properties']['lan']: - create_response.update({'public_ip': n['properties']['ips'][0]}) - - virtual_machines.append(create_response) - - failed = False - - results = { - 'failed': failed, - 'machines': virtual_machines, - 'action': 'create', - 'instance_ids': { - 'instances': [i['id'] for i in virtual_machines], - } - } - - return results - - -def remove_virtual_machine(module, profitbricks): - """ - Removes a virtual machine. - - This will remove the virtual machine along with the bootVolume. - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object. - - Not yet supported: handle deletion of attached data disks. - - Returns: - True if a new virtual server was deleted, false otherwise - """ - datacenter = module.params.get('datacenter') - instance_ids = module.params.get('instance_ids') - remove_boot_volume = module.params.get('remove_boot_volume') - changed = False - - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if not datacenter_id: - module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) - - # Prefetch server list for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for instance in instance_ids: - # Locate UUID for server if referenced by name. - server_id = _get_server_id(server_list, instance) - if server_id: - # Remove the server's boot volume - if remove_boot_volume: - _remove_boot_volume(module, profitbricks, datacenter_id, server_id) - - # Remove the server - try: - server_response = profitbricks.delete_server(datacenter_id, server_id) - except Exception as e: - module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc()) - else: - changed = True - - return changed - - -def _remove_boot_volume(module, profitbricks, datacenter_id, server_id): - """ - Remove the boot volume from the server - """ - try: - server = profitbricks.get_server(datacenter_id, server_id) - volume_id = server['properties']['bootVolume']['id'] - volume_response = profitbricks.delete_volume(datacenter_id, volume_id) - except Exception as e: - module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc()) - - -def startstop_machine(module, profitbricks, state): - """ - Starts or Stops a virtual machine. - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object. - - Returns: - True when the servers process the action successfully, false otherwise. - """ - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - changed = False - - datacenter = module.params.get('datacenter') - instance_ids = module.params.get('instance_ids') - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if not datacenter_id: - module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) - - # Prefetch server list for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for instance in instance_ids: - # Locate UUID of server if referenced by name. - server_id = _get_server_id(server_list, instance) - if server_id: - _startstop_machine(module, profitbricks, datacenter_id, server_id) - changed = True - - if wait: - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - matched_instances = [] - for res in profitbricks.list_servers(datacenter_id)['items']: - if state == 'running': - if res['properties']['vmState'].lower() == state: - matched_instances.append(res) - elif state == 'stopped': - if res['properties']['vmState'].lower() == 'shutoff': - matched_instances.append(res) - - if len(matched_instances) < len(instance_ids): - time.sleep(5) - else: - break - - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime()) - - return (changed) - - -def _get_datacenter_id(datacenters, identity): - """ - Fetch and return datacenter UUID by datacenter name if found. - """ - for datacenter in datacenters['items']: - if identity in (datacenter['properties']['name'], datacenter['id']): - return datacenter['id'] - return None - - -def _get_server_id(servers, identity): - """ - Fetch and return server UUID by server name if found. - """ - for server in servers['items']: - if identity in (server['properties']['name'], server['id']): - return server['id'] - return None - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - name=dict(), - image=dict(), - cores=dict(type='int', default=2), - ram=dict(type='int', default=2048), - cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'], - default='AMD_OPTERON'), - volume_size=dict(type='int', default=10), - disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), - image_password=dict(no_log=True), - ssh_keys=dict(type='list', elements='str', default=[], no_log=False), - bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), - lan=dict(type='int', default=1), - count=dict(type='int', default=1), - auto_increment=dict(type='bool', default=True), - instance_ids=dict(type='list', elements='str', default=[]), - subscription_user=dict(), - subscription_password=dict(no_log=True), - location=dict(choices=LOCATIONS, default='us/las'), - assign_public_ip=dict(type='bool', default=False), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - remove_boot_volume=dict(type='bool', default=True), - state=dict(default='present'), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required ' + - 'for running or stopping machines.') - - try: - (changed) = remove_virtual_machine(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state in ('running', 'stopped'): - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for ' + - 'running or stopping machines.') - try: - (changed) = startstop_machine(module, profitbricks, state) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state == 'present': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new instance') - if not module.params.get('image'): - module.fail_json(msg='image parameter is required for new instance') - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is ' + - 'required for new instance') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is ' + - 'required for new instance') - - try: - (machine_dict_array) = create_virtual_machine(module, profitbricks) - module.exit_json(**machine_dict_array) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/profitbricks_datacenter.py b/plugins/modules/profitbricks_datacenter.py deleted file mode 100644 index 93c27b5d8d..0000000000 --- a/plugins/modules/profitbricks_datacenter.py +++ /dev/null @@ -1,265 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: profitbricks_datacenter -short_description: Create or destroy a ProfitBricks Virtual Datacenter -description: - - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency - on profitbricks >= 1.0.0. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - name: - description: - - The name of the virtual datacenter. - type: str - description: - description: - - The description of the virtual datacenter. - type: str - required: false - location: - description: - - The datacenter location. - type: str - required: false - default: us/las - choices: ["us/las", "de/fra", "de/fkb"] - subscription_user: - description: - - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable. - type: str - required: false - wait: - description: - - Wait for the datacenter to be created before returning. - required: false - default: true - type: bool - wait_timeout: - description: - - How long before wait gives up, in seconds. - type: int - default: 600 - state: - description: - - Create or terminate datacenters. - - 'The available choices are: V(present), V(absent).' - type: str - required: false - default: 'present' - -requirements: ["profitbricks"] -author: Matt Baldwin (@baldwinSPC) -""" - -EXAMPLES = r""" -- name: Create a datacenter - community.general.profitbricks_datacenter: - datacenter: Tardis One - wait_timeout: 500 - -- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter) - community.general.profitbricks_datacenter: - datacenter: Tardis One - wait_timeout: 500 - state: absent -""" - -import re -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, Datacenter -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -LOCATIONS = ['us/las', - 'de/fra', - 'de/fkb'] - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _remove_datacenter(module, profitbricks, datacenter): - try: - profitbricks.delete_datacenter(datacenter) - except Exception as e: - module.fail_json(msg="failed to remove the datacenter: %s" % str(e)) - - -def create_datacenter(module, profitbricks): - """ - Creates a Datacenter - - This will create a new Datacenter in the specified location. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if a new datacenter was created, false otherwise - """ - name = module.params.get('name') - location = module.params.get('location') - description = module.params.get('description') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - i = Datacenter( - name=name, - location=location, - description=description - ) - - try: - datacenter_response = profitbricks.create_datacenter(datacenter=i) - - if wait: - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "_create_datacenter") - - results = { - 'datacenter_id': datacenter_response['id'] - } - - return results - - except Exception as e: - module.fail_json(msg="failed to create the new datacenter: %s" % str(e)) - - -def remove_datacenter(module, profitbricks): - """ - Removes a Datacenter. - - This will remove a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the datacenter was deleted, false otherwise - """ - name = module.params.get('name') - changed = False - - if uuid_match.match(name): - _remove_datacenter(module, profitbricks, name) - changed = True - else: - datacenters = profitbricks.list_datacenters() - - for d in datacenters['items']: - vdc = profitbricks.get_datacenter(d['id']) - - if name == vdc['properties']['name']: - name = d['id'] - _remove_datacenter(module, profitbricks, name) - changed = True - - return changed - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(), - description=dict(), - location=dict(choices=LOCATIONS, default='us/las'), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=600, type='int'), - state=dict(default='present'), # @TODO add choices - ) - ) - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required deleting a virtual datacenter.') - - try: - (changed) = remove_datacenter(module, profitbricks) - module.exit_json( - changed=changed) - except Exception as e: - module.fail_json(msg='failed to set datacenter state: %s' % str(e)) - - elif state == 'present': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for a new datacenter') - if not module.params.get('location'): - module.fail_json(msg='location parameter is required for a new datacenter') - - try: - (datacenter_dict_array) = create_datacenter(module, profitbricks) - module.exit_json(**datacenter_dict_array) - except Exception as e: - module.fail_json(msg='failed to set datacenter state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/profitbricks_nic.py b/plugins/modules/profitbricks_nic.py deleted file mode 100644 index 0b559a6fed..0000000000 --- a/plugins/modules/profitbricks_nic.py +++ /dev/null @@ -1,296 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: profitbricks_nic -short_description: Create or Remove a NIC -description: - - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - datacenter: - description: - - The datacenter in which to operate. - type: str - required: true - server: - description: - - The server name or ID. - type: str - required: true - name: - description: - - The name or ID of the NIC. This is only required on deletes, but not on create. - - If not specified, it defaults to a value based on UUID4. - type: str - lan: - description: - - The LAN to place the NIC on. You can pass a LAN that does not exist and it will be created. Required on create. - type: str - subscription_user: - description: - - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable. - type: str - required: true - subscription_password: - description: - - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable. - type: str - required: true - wait: - description: - - Wait for the operation to complete before returning. - required: false - default: true - type: bool - wait_timeout: - description: - - How long before wait gives up, in seconds. - type: int - default: 600 - state: - description: - - Indicate desired state of the resource. - - 'The available choices are: V(present), V(absent).' - type: str - required: false - default: 'present' - -requirements: ["profitbricks"] -author: Matt Baldwin (@baldwinSPC) -""" - -EXAMPLES = r""" -- name: Create a NIC - community.general.profitbricks_nic: - datacenter: Tardis One - server: node002 - lan: 2 - wait_timeout: 500 - state: present - -- name: Remove a NIC - community.general.profitbricks_nic: - datacenter: Tardis One - server: node002 - name: 7341c2454f - wait_timeout: 500 - state: absent -""" - -import re -import uuid -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, NIC -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _make_default_name(): - return str(uuid.uuid4()).replace('-', '')[:10] - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def create_nic(module, profitbricks): - """ - Creates a NIC. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the nic creates, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - lan = module.params.get('lan') - name = module.params.get('name') - if name is None: - name = _make_default_name() - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - try: - n = NIC( - name=name, - lan=lan - ) - - nic_response = profitbricks.create_nic(datacenter, server, n) - - if wait: - _wait_for_completion(profitbricks, nic_response, - wait_timeout, "create_nic") - - return nic_response - - except Exception as e: - module.fail_json(msg="failed to create the NIC: %s" % str(e)) - - -def delete_nic(module, profitbricks): - """ - Removes a NIC - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the NIC was removed, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - name = module.params.get('name') - if name is None: - name = _make_default_name() - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - server_found = False - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server_found = True - server = s['id'] - break - - if not server_found: - return False - - # Locate UUID for NIC - nic_found = False - if not (uuid_match.match(name)): - nic_list = profitbricks.list_nics(datacenter, server) - for n in nic_list['items']: - if name == n['properties']['name']: - nic_found = True - name = n['id'] - break - - if not nic_found: - return False - - try: - nic_response = profitbricks.delete_nic(datacenter, server, name) - return nic_response - except Exception as e: - module.fail_json(msg="failed to remove the NIC: %s" % str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(required=True), - server=dict(required=True), - name=dict(), - lan=dict(), - subscription_user=dict(required=True), - subscription_password=dict(required=True, no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ), - required_if=( - ('state', 'absent', ['name']), - ('state', 'present', ['lan']), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - try: - (changed) = delete_nic(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set nic state: %s' % str(e)) - - elif state == 'present': - try: - (nic_dict) = create_nic(module, profitbricks) - module.exit_json(nics=nic_dict) # @FIXME changed not calculated? - except Exception as e: - module.fail_json(msg='failed to set nic state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/profitbricks_volume.py b/plugins/modules/profitbricks_volume.py deleted file mode 100644 index 48bacb2061..0000000000 --- a/plugins/modules/profitbricks_volume.py +++ /dev/null @@ -1,439 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: profitbricks_volume -short_description: Create or destroy a volume -description: - - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - datacenter: - description: - - The datacenter in which to create the volumes. - type: str - name: - description: - - The name of the volumes. You can enumerate the names using auto_increment. - type: str - size: - description: - - The size of the volume. - type: int - required: false - default: 10 - bus: - description: - - The bus type. - type: str - required: false - default: VIRTIO - choices: ["IDE", "VIRTIO"] - image: - description: - - The system image ID for the volume, for example V(a3eae284-a2fe-11e4-b187-5f1f641608c8). This can also be a snapshot image ID. - type: str - image_password: - description: - - Password set for the administrative user. - type: str - required: false - ssh_keys: - description: - - Public SSH keys allowing access to the virtual machine. - type: list - elements: str - default: [] - disk_type: - description: - - The disk type of the volume. - type: str - required: false - default: HDD - choices: ["HDD", "SSD"] - licence_type: - description: - - The licence type for the volume. This is used when the image is non-standard. - - 'The available choices are: V(LINUX), V(WINDOWS), V(UNKNOWN), V(OTHER).' - type: str - required: false - default: UNKNOWN - count: - description: - - The number of volumes you wish to create. - type: int - required: false - default: 1 - auto_increment: - description: - - Whether or not to increment a single number in the name for created virtual machines. - default: true - type: bool - instance_ids: - description: - - List of instance ids, currently only used when state='absent' to remove instances. - type: list - elements: str - default: [] - subscription_user: - description: - - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable. - type: str - required: false - wait: - description: - - Wait for the datacenter to be created before returning. - required: false - default: true - type: bool - wait_timeout: - description: - - How long before wait gives up, in seconds. - type: int - default: 600 - state: - description: - - Create or terminate datacenters. - - 'The available choices are: V(present), V(absent).' - type: str - required: false - default: 'present' - server: - description: - - Server name to attach the volume to. - type: str - -requirements: ["profitbricks"] -author: Matt Baldwin (@baldwinSPC) -""" - -EXAMPLES = r""" -- name: Create multiple volumes - community.general.profitbricks_volume: - datacenter: Tardis One - name: vol%02d - count: 5 - auto_increment: true - wait_timeout: 500 - state: present - -- name: Remove Volumes - community.general.profitbricks_volume: - datacenter: Tardis One - instance_ids: - - 'vol01' - - 'vol02' - wait_timeout: 500 - state: absent -""" - -import re -import time -import traceback - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, Volume -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.common.text.converters import to_native - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _create_volume(module, profitbricks, datacenter, name): - size = module.params.get('size') - bus = module.params.get('bus') - image = module.params.get('image') - image_password = module.params.get('image_password') - ssh_keys = module.params.get('ssh_keys') - disk_type = module.params.get('disk_type') - licence_type = module.params.get('licence_type') - wait_timeout = module.params.get('wait_timeout') - wait = module.params.get('wait') - - try: - v = Volume( - name=name, - size=size, - bus=bus, - image=image, - image_password=image_password, - ssh_keys=ssh_keys, - disk_type=disk_type, - licence_type=licence_type - ) - - volume_response = profitbricks.create_volume(datacenter, v) - - if wait: - _wait_for_completion(profitbricks, volume_response, - wait_timeout, "_create_volume") - - except Exception as e: - module.fail_json(msg="failed to create the volume: %s" % str(e)) - - return volume_response - - -def _delete_volume(module, profitbricks, datacenter, volume): - try: - profitbricks.delete_volume(datacenter, volume) - except Exception as e: - module.fail_json(msg="failed to remove the volume: %s" % str(e)) - - -def create_volume(module, profitbricks): - """ - Creates a volume. - - This will create a volume in a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was created, false otherwise - """ - datacenter = module.params.get('datacenter') - name = module.params.get('name') - auto_increment = module.params.get('auto_increment') - count = module.params.get('count') - - datacenter_found = False - failed = True - volumes = [] - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - datacenter_found = True - break - - if not datacenter_found: - module.fail_json(msg='datacenter could not be found.') - - if auto_increment: - numbers = set() - count_offset = 1 - - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message, exception=traceback.format_exc()) - - number_range = xrange(count_offset, count_offset + count + len(numbers)) - available_numbers = list(set(number_range).difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] * count - - for name in names: - create_response = _create_volume(module, profitbricks, str(datacenter), name) - volumes.append(create_response) - _attach_volume(module, profitbricks, datacenter, create_response['id']) - failed = False - - results = { - 'failed': failed, - 'volumes': volumes, - 'action': 'create', - 'instance_ids': { - 'instances': [i['id'] for i in volumes], - } - } - - return results - - -def delete_volume(module, profitbricks): - """ - Removes a volume. - - This will create a volume in a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was removed, false otherwise - """ - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - datacenter = module.params.get('datacenter') - changed = False - instance_ids = module.params.get('instance_ids') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - for n in instance_ids: - if uuid_match.match(n): - _delete_volume(module, profitbricks, datacenter, n) - changed = True - else: - volumes = profitbricks.list_volumes(datacenter) - for v in volumes['items']: - if n == v['properties']['name']: - volume_id = v['id'] - _delete_volume(module, profitbricks, datacenter, volume_id) - changed = True - - return changed - - -def _attach_volume(module, profitbricks, datacenter, volume): - """ - Attaches a volume. - - This will attach a volume to the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was attached, false otherwise - """ - server = module.params.get('server') - - # Locate UUID for Server - if server: - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - try: - return profitbricks.attach_volume(datacenter, server, volume) - except Exception as e: - module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc()) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - server=dict(), - name=dict(), - size=dict(type='int', default=10), - bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), - image=dict(), - image_password=dict(no_log=True), - ssh_keys=dict(type='list', elements='str', default=[], no_log=False), - disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), - licence_type=dict(default='UNKNOWN'), - count=dict(type='int', default=1), - auto_increment=dict(type='bool', default=True), - instance_ids=dict(type='list', elements='str', default=[]), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ) - ) - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for running or stopping machines.') - - try: - (changed) = delete_volume(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state == 'present': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for new instance') - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new instance') - - try: - (volume_dict_array) = create_volume(module, profitbricks) - module.exit_json(**volume_dict_array) - except Exception as e: - module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/profitbricks_volume_attachments.py b/plugins/modules/profitbricks_volume_attachments.py deleted file mode 100644 index 63ca6775ab..0000000000 --- a/plugins/modules/profitbricks_volume_attachments.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: profitbricks_volume_attachments -short_description: Attach or detach a volume -description: - - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - datacenter: - description: - - The datacenter in which to operate. - type: str - server: - description: - - The name of the server you wish to detach or attach the volume. - type: str - volume: - description: - - The volume name or ID. - type: str - subscription_user: - description: - - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable. - type: str - required: false - wait: - description: - - Wait for the operation to complete before returning. - required: false - default: true - type: bool - wait_timeout: - description: - - How long before wait gives up, in seconds. - type: int - default: 600 - state: - description: - - Indicate desired state of the resource. - - 'The available choices are: V(present), V(absent).' - type: str - required: false - default: 'present' - -requirements: ["profitbricks"] -author: Matt Baldwin (@baldwinSPC) -""" - -EXAMPLES = r""" -- name: Attach a volume - community.general.profitbricks_volume_attachments: - datacenter: Tardis One - server: node002 - volume: vol01 - wait_timeout: 500 - state: present - -- name: Detach a volume - community.general.profitbricks_volume_attachments: - datacenter: Tardis One - server: node002 - volume: vol01 - wait_timeout: 500 - state: absent -""" - -import re -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def attach_volume(module, profitbricks): - """ - Attaches a volume. - - This will attach a volume to the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was attached, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - volume = module.params.get('volume') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - # Locate UUID for Volume - if not (uuid_match.match(volume)): - volume_list = profitbricks.list_volumes(datacenter) - for v in volume_list['items']: - if volume == v['properties']['name']: - volume = v['id'] - break - - return profitbricks.attach_volume(datacenter, server, volume) - - -def detach_volume(module, profitbricks): - """ - Detaches a volume. - - This will remove a volume from the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was detached, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - volume = module.params.get('volume') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - # Locate UUID for Volume - if not (uuid_match.match(volume)): - volume_list = profitbricks.list_volumes(datacenter) - for v in volume_list['items']: - if volume == v['properties']['name']: - volume = v['id'] - break - - return profitbricks.detach_volume(datacenter, server, volume) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - server=dict(), - volume=dict(), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required') - if not module.params.get('server'): - module.fail_json(msg='server parameter is required') - if not module.params.get('volume'): - module.fail_json(msg='volume parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - try: - (changed) = detach_volume(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) - elif state == 'present': - try: - attach_volume(module, profitbricks) - module.exit_json() - except Exception as e: - module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py deleted file mode 100644 index 8ca3f60ba4..0000000000 --- a/plugins/modules/proxmox.py +++ /dev/null @@ -1,1321 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r""" -module: proxmox -short_description: Management of instances in Proxmox VE cluster -description: - - Allows you to create/delete/stop instances in Proxmox VE cluster. - - The module automatically detects containerization type (lxc for PVE 4, openvz for older). - - Since community.general 4.0.0 on, there are no more default values. -attributes: - check_mode: - support: none - diff_mode: - support: none - action_group: - version_added: 9.0.0 -options: - password: - description: - - The instance root password. - type: str - hostname: - description: - - The instance hostname. - - Required only for O(state=present). - - Must be unique if vmid is not passed. - type: str - ostemplate: - description: - - The template for VM creating. - - Required only for O(state=present). - type: str - disk: - description: - - This option was previously described as "hard disk size in GB for instance" however several formats describing a lxc mount are permitted. - - Older versions of Proxmox will accept a numeric value for size using the O(storage) parameter to automatically choose which storage to - allocate from, however new versions enforce the C(:) syntax. - - Additional options are available by using some combination of the following key-value pairs as a comma-delimited list C([volume=] - [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=]). - - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. - - This option is mutually exclusive with O(storage) and O(disk_volume). - type: str - disk_volume: - description: - - Specify a hash/dictionary of the C(rootfs) disk. - - See U(https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points) for a full description. - - This option is mutually exclusive with O(storage) and O(disk). - type: dict - version_added: 9.2.0 - suboptions: - storage: - description: - - O(disk_volume.storage) is the storage identifier of the storage to use for the C(rootfs). - - Mutually exclusive with O(disk_volume.host_path). - type: str - volume: - description: - - O(disk_volume.volume) is the name of an existing volume. - - If not defined, the module will check if one exists. If not, a new volume will be created. - - If defined, the volume must exist under that name. - - Required only if O(disk_volume.storage) is defined and mutually exclusive with O(disk_volume.host_path). - type: str - size: - description: - - O(disk_volume.size) is the size of the storage to use. - - The size is given in GB. - - Required only if O(disk_volume.storage) is defined and mutually exclusive with O(disk_volume.host_path). - type: int - host_path: - description: - - O(disk_volume.host_path) defines a bind or device path on the PVE host to use for the C(rootfs). - - Mutually exclusive with O(disk_volume.storage), O(disk_volume.volume), and O(disk_volume.size). - type: path - options: - description: - - O(disk_volume.options) is a dict of extra options. - - The value of any given option must be a string, for example V("1"). - type: dict - cores: - description: - - Specify number of cores per socket. - type: int - cpus: - description: - - Number of allocated cpus for instance. - type: int - memory: - description: - - Memory size in MB for instance. - type: int - swap: - description: - - Swap memory size in MB for instance. - type: int - netif: - description: - - Specifies network interfaces for the container. As a hash/dictionary defining interfaces. - type: dict - features: - description: - - Specifies a list of features to be enabled. For valid options, see U(https://pve.proxmox.com/wiki/Linux_Container#pct_options). - - Some features require the use of a privileged container. - type: list - elements: str - version_added: 2.0.0 - startup: - description: - - Specifies the startup order of the container. - - Use C(order=#) where C(#) is a non-negative number to define the general startup order. Shutdown in done with reverse ordering. - - Use C(up=#) where C(#) is in seconds, to specify a delay to wait before the next VM is started. - - Use C(down=#) where C(#) is in seconds, to specify a delay to wait before the next VM is stopped. - type: list - elements: str - version_added: 8.5.0 - mounts: - description: - - Specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points as strings. - - This Option is mutually exclusive with O(mount_volumes). - type: dict - mount_volumes: - description: - - Specify additional mounts (separate disks) for the container. As a hash/dictionary defining mount points. - - See U(https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points) for a full description. - - This Option is mutually exclusive with O(mounts). - type: list - elements: dict - version_added: 9.2.0 - suboptions: - id: - description: - - O(mount_volumes[].id) is the identifier of the mount point written as C(mp[n]). - type: str - required: true - storage: - description: - - O(mount_volumes[].storage) is the storage identifier of the storage to use. - - Mutually exclusive with O(mount_volumes[].host_path). - type: str - volume: - description: - - O(mount_volumes[].volume) is the name of an existing volume. - - If not defined, the module will check if one exists. If not, a new volume will be created. - - If defined, the volume must exist under that name. - - Required only if O(mount_volumes[].storage) is defined and mutually exclusive with O(mount_volumes[].host_path). - type: str - size: - description: - - O(mount_volumes[].size) is the size of the storage to use. - - The size is given in GB. - - Required only if O(mount_volumes[].storage) is defined and mutually exclusive with O(mount_volumes[].host_path). - type: int - host_path: - description: - - O(mount_volumes[].host_path) defines a bind or device path on the PVE host to use for the C(rootfs). - - Mutually exclusive with O(mount_volumes[].storage), O(mount_volumes[].volume), and O(mount_volumes[].size). - type: path - mountpoint: - description: - - O(mount_volumes[].mountpoint) is the mount point of the volume. - type: path - required: true - options: - description: - - O(mount_volumes[].options) is a dict of extra options. - - The value of any given option must be a string, for example V("1"). - type: dict - ip_address: - description: - - Specifies the address the container will be assigned. - type: str - onboot: - description: - - Specifies whether a VM will be started during system bootup. - type: bool - storage: - description: - - Target storage. - - This Option is mutually exclusive with O(disk) and O(disk_volume). - type: str - default: 'local' - ostype: - description: - - Specifies the C(ostype) of the LXC container. - - If set to V(auto), no C(ostype) will be provided on instance creation. - choices: ['auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged'] - type: str - default: 'auto' - version_added: 8.1.0 - cpuunits: - description: - - CPU weight for a VM. - type: int - nameserver: - description: - - Sets DNS server IP address for a container. - type: str - searchdomain: - description: - - Sets DNS search domain for a container. - type: str - tags: - description: - - List of tags to apply to the container. - - Tags must start with V([a-z0-9_]) followed by zero or more of the following characters V([a-z0-9_-+.]). - - Tags are only available in Proxmox 7+. - type: list - elements: str - version_added: 6.2.0 - timeout: - description: - - Timeout for operations. - type: int - default: 30 - update: - description: - - If V(true), the container will be updated with new values. - type: bool - default: false - version_added: 8.1.0 - force: - description: - - Forcing operations. - - Can be used only with states V(present), V(stopped), V(restarted). - - With O(state=present) force option allow to overwrite existing container. - - With states V(stopped), V(restarted) allow to force stop instance. - type: bool - default: false - purge: - description: - - Remove container from all related configurations. - - For example backup jobs, replication jobs, or HA. - - Related ACLs and Firewall entries will always be removed. - - Used with O(state=absent). - type: bool - default: false - version_added: 2.3.0 - state: - description: - - Indicate desired state of the instance. - - V(template) was added in community.general 8.1.0. - type: str - choices: ['present', 'started', 'absent', 'stopped', 'restarted', 'template'] - default: present - pubkey: - description: - - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions. - type: str - unprivileged: - description: - - Indicate if the container should be unprivileged. - - The default change to V(true) in community.general 7.0.0. It used to be V(false) before. - type: bool - default: true - description: - description: - - Specify the description for the container. Only used on the configuration web interface. - - This is saved as a comment inside the configuration file. - type: str - version_added: '0.2.0' - hookscript: - description: - - Script that will be executed during various steps in the containers lifetime. - type: str - version_added: '0.2.0' - timezone: - description: - - Timezone used by the container, accepts values like V(Europe/Paris). - - The special value V(host) configures the same timezone used by Proxmox host. - type: str - version_added: '7.1.0' - clone: - description: - - ID of the container to be cloned. - - O(description), O(hostname), and O(pool) will be copied from the cloned container if not specified. - - The type of clone created is defined by the O(clone_type) parameter. - - This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4). - type: int - version_added: 4.3.0 - clone_type: - description: - - Type of the clone created. - - V(full) creates a full clone, and O(storage) must be specified. - - V(linked) creates a linked clone, and the cloned container must be a template container. - - V(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not. O(storage) may be specified, - if not it will fall back to the default. - type: str - choices: ['full', 'linked', 'opportunistic'] - default: opportunistic - version_added: 4.3.0 -author: Sergei Antipov (@UnderGreen) -seealso: - - module: community.general.proxmox_vm_info -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.proxmox.selection - - community.general.attributes -""" - -EXAMPLES = r""" -- name: Create new container with minimal options - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with minimal options specifying disk storage location and size - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - disk: 'local-lvm:20' - -- name: Create new container with minimal options specifying disk storage location and size via disk_volume - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - disk_volume: - storage: local - size: 20 - -- name: Create new container with hookscript and description - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - hookscript: 'local:snippets/vm_hook.sh' - description: created with ansible - -- name: Create new container automatically selecting the next available vmid. - community.general.proxmox: - node: 'uk-mc02' - api_user: 'root@pam' - api_password: '1q2w3e' - api_host: 'node1' - password: '123456' - hostname: 'example.org' - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with minimal options with force(it will rewrite existing container) - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - force: true - -- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with minimal options defining network interface with dhcp - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: - net0: "name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0" - -- name: Create new container with minimal options defining network interface with static ip - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: - net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0" - -- name: Create new container with more options defining network interface with static ip4 and ip6 with vlan-tag and mtu - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: - net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,ip6=fe80::1227/64,gw6=fe80::1,bridge=vmbr0,firewall=1,tag=934,mtu=1500" - -- name: Create new container with minimal options defining a mount with 8GB - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - mounts: - mp0: "local:8,mp=/mnt/test/" - -- name: Create new container with minimal options defining a mount with 8GB using mount_volumes - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - mount_volumes: - - id: mp0 - storage: local - size: 8 - mountpoint: /mnt/test - -- name: Create new container with minimal options defining a cpu core limit - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - cores: 2 - -- name: Create new container with minimal options and same timezone as proxmox host - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - timezone: host - -- name: Create a new container with nesting enabled and allows the use of CIFS/NFS inside the container. - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - features: - - nesting=1 - - mount=cifs,nfs - -- name: > - Create a linked clone of the template container with id 100. The newly created container with be a - linked clone, because no storage parameter is defined - community.general.proxmox: - vmid: 201 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - clone: 100 - hostname: clone.example.org - -- name: Create a full clone of the container with id 100 - community.general.proxmox: - vmid: 201 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - clone: 100 - hostname: clone.example.org - storage: local - -- name: Update container configuration - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - netif: - net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.3/24,bridge=vmbr0" - update: true - -- name: Start container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: started - -- name: > - Start container with mount. You should enter a 90-second timeout because servers - with additional disks take longer to boot - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: started - timeout: 90 - -- name: Stop container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: stopped - -- name: Stop container with force - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - force: true - state: stopped - -- name: Restart container(stopped or mounted container you can't restart) - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: restarted - -- name: Convert container to template - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: template - -- name: Convert container to template (stop container if running) - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: template - force: true - -- name: Remove container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: absent -""" - -import re -import time - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native - - -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible) - -VZ_TYPE = None - - -class ProxmoxLxcAnsible(ProxmoxAnsible): - def content_check(self, node, ostemplate, template_store): - return [True for cnt in self.proxmox_api.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate] - - def is_template_container(self, node, vmid): - """Check if the specified container is a template.""" - proxmox_node = self.proxmox_api.nodes(node) - config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get() - return config.get('template', False) - - def update_config(self, vmid, node, disk, cpus, memory, swap, **kwargs): - if VZ_TYPE != "lxc": - self.module.fail_json( - changed=False, - msg="Updating configuration is only supported for LXC enabled proxmox clusters.", - ) - - def parse_disk_string(disk_string): - # Example strings: - # "acl=0,thin1:base-100-disk-1,size=8G" - # "thin1:10,backup=0" - # "local:20" - # "volume=local-lvm:base-100-disk-1,size=20G" - # "/mnt/bindmounts/shared,mp=/shared" - # "volume=/dev/USB01,mp=/mnt/usb01" - args = disk_string.split(",") - # If the volume is not explicitly defined but implicit by only passing a key, - # add the "volume=" key prefix for ease of parsing. - args = ["volume=" + arg if "=" not in arg else arg for arg in args] - # Then create a dictionary from the arguments - disk_kwargs = dict(map(lambda item: item.split("="), args)) - - VOLUME_PATTERN = r"""(?x) - (?:(?P[\w\-.]+): - (?:(?P\d+)| - (?P[^,\s]+)) - )| - (?P[^,\s]+) - """ - # DISCLAIMER: - # There are two things called a "volume": - # 1. The "volume" key which describes the storage volume, device or directory to mount into the container. - # 2. The storage volume of a storage-backed mount point in the PVE storage sub system. - # In this section, we parse the "volume" key and check which type of mount point we are dealing with. - pattern = re.compile(VOLUME_PATTERN) - match_dict = pattern.match(disk_kwargs.pop("volume")).groupdict() - match_dict = {k: v for k, v in match_dict.items() if v is not None} - - if "storage" in match_dict and "volume" in match_dict: - disk_kwargs["storage"] = match_dict["storage"] - disk_kwargs["volume"] = match_dict["volume"] - elif "storage" in match_dict and "size" in match_dict: - disk_kwargs["storage"] = match_dict["storage"] - disk_kwargs["size"] = match_dict["size"] - elif "host_path" in match_dict: - disk_kwargs["host_path"] = match_dict["host_path"] - - # Pattern matching only available in Python 3.10+ - # match match_dict: - # case {"storage": storage, "volume": volume}: - # disk_kwargs["storage"] = storage - # disk_kwargs["volume"] = volume - - # case {"storage": storage, "size": size}: - # disk_kwargs["storage"] = storage - # disk_kwargs["size"] = size - - # case {"host_path": host_path}: - # disk_kwargs["host_path"] = host_path - - return disk_kwargs - - def convert_mounts(mount_dict): - return_list = [] - for mount_key, mount_value in mount_dict.items(): - mount_config = parse_disk_string(mount_value) - return_list.append(dict(id=mount_key, **mount_config)) - - return return_list - - def build_volume( - key, - storage=None, - volume=None, - host_path=None, - size=None, - mountpoint=None, - options=None, - **kwargs - ): - if size is not None and isinstance(size, str): - size = size.strip("G") - # 1. Handle volume checks/creation - # 1.1 Check if defined volume exists - if volume is not None: - storage_content = self.get_storage_content(node, storage, vmid=vmid) - vol_ids = [vol["volid"] for vol in storage_content] - volid = "{storage}:{volume}".format(storage=storage, volume=volume) - if volid not in vol_ids: - self.module.fail_json( - changed=False, - msg="Storage {storage} does not contain volume {volume}".format( - storage=storage, - volume=volume, - ), - ) - vol_string = "{storage}:{volume},size={size}G".format( - storage=storage, volume=volume, size=size - ) - # 1.2 If volume not defined (but storage is), check if it exists - elif storage is not None: - api_node = self.proxmox_api.nodes( - node - ) # The node must exist, but not the LXC - try: - vol = api_node.lxc(vmid).get("config").get(key) - volume = parse_disk_string(vol).get("volume") - vol_string = "{storage}:{volume},size={size}G".format( - storage=storage, volume=volume, size=size - ) - - # If not, we have proxmox create one using the special syntax - except Exception: - vol_string = "{storage}:{size}".format(storage=storage, size=size) - else: - raise AssertionError('Internal error') - - # 1.3 If we have a host_path, we don't have storage, a volume, or a size - vol_string = ",".join( - [vol_string] + - ([] if host_path is None else [host_path]) + - ([] if mountpoint is None else ["mp={0}".format(mountpoint)]) + - ([] if options is None else ["{0}={1}".format(k, v) for k, v in options.items()]) + - ([] if not kwargs else ["{0}={1}".format(k, v) for k, v in kwargs.items()]) - ) - - return {key: vol_string} - - # Version limited features - minimum_version = {"tags": "6.1", "timezone": "6.3"} - proxmox_node = self.proxmox_api.nodes(node) - - pve_version = self.version() - - # Fail on unsupported features - for option, version in minimum_version.items(): - if pve_version < LooseVersion(version) and option in kwargs: - self.module.fail_json( - changed=False, - msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_version}".format( - option=option, version=version, pve_version=pve_version - ), - ) - - # Remove all empty kwarg entries - kwargs = {key: val for key, val in kwargs.items() if val is not None} - - if cpus is not None: - kwargs["cpulimit"] = cpus - if disk is not None: - kwargs["disk_volume"] = parse_disk_string(disk) - if "disk_volume" in kwargs: - disk_dict = build_volume(key="rootfs", **kwargs.pop("disk_volume")) - kwargs.update(disk_dict) - if memory is not None: - kwargs["memory"] = memory - if swap is not None: - kwargs["swap"] = swap - if "netif" in kwargs: - kwargs.update(kwargs.pop("netif")) - if "mounts" in kwargs: - kwargs["mount_volumes"] = convert_mounts(kwargs.pop("mounts")) - if "mount_volumes" in kwargs: - mounts_list = kwargs.pop("mount_volumes") - for mount_config in mounts_list: - key = mount_config.pop("id") - mount_dict = build_volume(key=key, **mount_config) - kwargs.update(mount_dict) - # LXC tags are expected to be valid and presented as a comma/semi-colon delimited string - if "tags" in kwargs: - re_tag = re.compile(r"^[a-z0-9_][a-z0-9_\-\+\.]*$") - for tag in kwargs["tags"]: - if not re_tag.match(tag): - self.module.fail_json(msg="%s is not a valid tag" % tag) - kwargs["tags"] = ",".join(kwargs["tags"]) - - # fetch the current config - current_config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get() - - # compare the requested config against the current - update_config = False - for (arg, value) in kwargs.items(): - # if the arg isn't in the current config, it needs to be updated - if arg not in current_config: - update_config = True - break - # some values are lists, the order isn't always the same, so split them and compare by key - if isinstance(value, str): - current_values = current_config[arg].split(",") - requested_values = value.split(",") - for new_value in requested_values: - if new_value not in current_values: - update_config = True - break - # if it is not a list (or string) just compare the current value - else: - # some types don't match with the API, so forcing to string for comparison - if str(value) != str(current_config[arg]): - update_config = True - break - - if update_config: - getattr(proxmox_node, VZ_TYPE)(vmid).config.put(vmid=vmid, node=node, **kwargs) - else: - self.module.exit_json(changed=False, msg="Container config is already up to date") - - def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs): - - # Version limited features - minimum_version = { - 'tags': '6.1', - 'timezone': '6.3' - } - proxmox_node = self.proxmox_api.nodes(node) - - # Remove all empty kwarg entries - kwargs = {k: v for k, v in kwargs.items() if v is not None} - - pve_version = self.version() - - # Fail on unsupported features - for option, version in minimum_version.items(): - if pve_version < LooseVersion(version) and option in kwargs: - self.module.fail_json(changed=False, msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_version}". - format(option=option, version=version, pve_version=pve_version)) - - if VZ_TYPE == 'lxc': - kwargs['cpulimit'] = cpus - kwargs['rootfs'] = disk - if 'netif' in kwargs: - kwargs.update(kwargs['netif']) - del kwargs['netif'] - if 'mounts' in kwargs: - kwargs.update(kwargs['mounts']) - del kwargs['mounts'] - if 'pubkey' in kwargs: - if self.version() >= LooseVersion('4.2'): - kwargs['ssh-public-keys'] = kwargs['pubkey'] - del kwargs['pubkey'] - else: - kwargs['cpus'] = cpus - kwargs['disk'] = disk - - # LXC tags are expected to be valid and presented as a comma/semi-colon delimited string - if 'tags' in kwargs: - re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$') - for tag in kwargs['tags']: - if not re_tag.match(tag): - self.module.fail_json(msg='%s is not a valid tag' % tag) - kwargs['tags'] = ",".join(kwargs['tags']) - - if kwargs.get('ostype') == 'auto': - kwargs.pop('ostype') - - if clone is not None: - if VZ_TYPE != 'lxc': - self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.") - - clone_is_template = self.is_template_container(node, clone) - - # By default, create a full copy only when the cloned container is not a template. - create_full_copy = not clone_is_template - - # Only accept parameters that are compatible with the clone endpoint. - valid_clone_parameters = ['hostname', 'pool', 'description'] - if self.module.params['storage'] is not None and clone_is_template: - # Cloning a template, so create a full copy instead of a linked copy - create_full_copy = True - elif self.module.params['storage'] is None and not clone_is_template: - # Not cloning a template, but also no defined storage. This isn't possible. - self.module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.") - - if self.module.params['clone_type'] == 'linked': - if not clone_is_template: - self.module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.") - # Don't need to do more, by default create_full_copy is set to false already - elif self.module.params['clone_type'] == 'opportunistic': - if not clone_is_template: - # Cloned container is not a template, so we need our 'storage' parameter - valid_clone_parameters.append('storage') - elif self.module.params['clone_type'] == 'full': - create_full_copy = True - valid_clone_parameters.append('storage') - - clone_parameters = {} - - if create_full_copy: - clone_parameters['full'] = '1' - else: - clone_parameters['full'] = '0' - for param in valid_clone_parameters: - if self.module.params[param] is not None: - clone_parameters[param] = self.module.params[param] - - taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters) - else: - taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) - - while timeout: - if self.api_task_ok(node, taskid): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(vmid=vmid, node=node, msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def start_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post() - while timeout: - if self.api_task_ok(vm['node'], taskid): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def stop_instance(self, vm, vmid, timeout, force): - if force: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) - else: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post() - while timeout: - if self.api_task_ok(vm['node'], taskid): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def convert_to_template(self, vm, vmid, timeout, force): - if getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running' and force: - self.stop_instance(vm, vmid, timeout, force) - # not sure why, but templating a container doesn't return a taskid - getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).template.post() - return True - - def umount_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post() - while timeout: - if self.api_task_ok(vm['node'], taskid): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - proxmox_args = dict( - vmid=dict(type='int', required=False), - node=dict(), - pool=dict(), - password=dict(no_log=True), - hostname=dict(), - ostemplate=dict(), - disk=dict(type='str'), - disk_volume=dict( - type="dict", - options=dict( - storage=dict(type="str"), - volume=dict(type="str"), - size=dict(type="int"), - host_path=dict(type="path"), - options=dict(type="dict"), - ), - required_together=[("storage", "size")], - required_by={ - "volume": ("storage", "size"), - }, - mutually_exclusive=[ - ("host_path", "storage"), - ("host_path", "volume"), - ("host_path", "size"), - ], - ), - cores=dict(type='int'), - cpus=dict(type='int'), - memory=dict(type='int'), - swap=dict(type='int'), - netif=dict(type='dict'), - mounts=dict(type='dict'), - mount_volumes=dict( - type="list", - elements="dict", - options=dict( - id=(dict(type="str", required=True)), - storage=dict(type="str"), - volume=dict(type="str"), - size=dict(type="int"), - host_path=dict(type="path"), - mountpoint=dict(type="path", required=True), - options=dict(type="dict"), - ), - required_together=[("storage", "size")], - required_by={ - "volume": ("storage", "size"), - }, - mutually_exclusive=[ - ("host_path", "storage"), - ("host_path", "volume"), - ("host_path", "size"), - ], - ), - ip_address=dict(), - ostype=dict(default='auto', choices=[ - 'auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged' - ]), - onboot=dict(type='bool'), - features=dict(type='list', elements='str'), - startup=dict(type='list', elements='str'), - storage=dict(default='local'), - cpuunits=dict(type='int'), - nameserver=dict(), - searchdomain=dict(), - timeout=dict(type='int', default=30), - update=dict(type='bool', default=False), - force=dict(type='bool', default=False), - purge=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'template']), - pubkey=dict(type='str'), - unprivileged=dict(type='bool', default=True), - description=dict(type='str'), - hookscript=dict(type='str'), - timezone=dict(type='str'), - clone=dict(type='int'), - clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']), - tags=dict(type='list', elements='str') - ) - module_args.update(proxmox_args) - - module = AnsibleModule( - argument_spec=module_args, - required_if=[ - ('state', 'present', ['node', 'hostname']), - # Require one of clone, ostemplate, or update. Together with mutually_exclusive this ensures that we - # either clone a container or create a new one from a template file. - ('state', 'present', ('clone', 'ostemplate', 'update'), True), - ], - required_together=[("api_token_id", "api_token_secret")], - required_one_of=[("api_password", "api_token_id")], - mutually_exclusive=[ - ( - "clone", - "ostemplate", - "update", - ), # Creating a new container is done either by cloning an existing one, or based on a template. - ("disk", "disk_volume", "storage"), - ("mounts", "mount_volumes"), - ], - ) - - proxmox = ProxmoxLxcAnsible(module) - - global VZ_TYPE - VZ_TYPE = 'openvz' if proxmox.version() < LooseVersion('4.0') else 'lxc' - - state = module.params['state'] - vmid = module.params['vmid'] - node = module.params['node'] - disk = module.params['disk'] - cpus = module.params['cpus'] - memory = module.params['memory'] - swap = module.params['swap'] - storage = module.params['storage'] - hostname = module.params['hostname'] - if module.params['ostemplate'] is not None: - template_store = module.params['ostemplate'].split(":")[0] - timeout = module.params['timeout'] - clone = module.params['clone'] - - # If vmid not set get the Next VM id from ProxmoxAPI - # If hostname is set get the VM id from ProxmoxAPI - if not vmid and state == 'present': - vmid = proxmox.get_nextvmid() - elif not vmid and hostname: - vmid = proxmox.get_vmid(hostname) - elif not vmid: - module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) - - # Create a new container - if state == 'present' and clone is None: - try: - if proxmox.get_vm(vmid, ignore_missing=True): - if module.params["update"]: - try: - proxmox.update_config(vmid, node, disk, cpus, memory, swap, - cores=module.params["cores"], - hostname=module.params["hostname"], - netif=module.params["netif"], - disk_volume=module.params["disk_volume"], - mounts=module.params["mounts"], - mount_volumes=module.params["mount_volumes"], - ip_address=module.params["ip_address"], - onboot=ansible_to_proxmox_bool(module.params["onboot"]), - cpuunits=module.params["cpuunits"], - nameserver=module.params["nameserver"], - searchdomain=module.params["searchdomain"], - features=",".join(module.params["features"]) - if module.params["features"] is not None - else None, - startup=",".join(module.params["startup"]) - if module.params["startup"] is not None - else None, - description=module.params["description"], - hookscript=module.params["hookscript"], - timezone=module.params["timezone"], - tags=module.params["tags"]) - module.exit_json( - changed=True, - vmid=vmid, - msg="Configured VM %s" % (vmid), - ) - except Exception as e: - module.fail_json( - vmid=vmid, - msg="Configuration of %s VM %s failed with exception: %s" - % (VZ_TYPE, vmid, e), - ) - if not module.params["force"]: - module.exit_json( - changed=False, - vmid=vmid, - msg="VM with vmid = %s is already exists" % vmid, - ) - # If no vmid was passed, there cannot be another VM named 'hostname' - if (not module.params['vmid'] and - proxmox.get_vmid(hostname, ignore_missing=True) and - not module.params['force']): - vmid = proxmox.get_vmid(hostname) - module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) - elif not proxmox.get_node(node): - module.fail_json(vmid=vmid, msg="node '%s' not exists in cluster" % node) - elif not proxmox.content_check(node, module.params['ostemplate'], template_store): - module.fail_json(vmid=vmid, msg="ostemplate '%s' not exists on node %s and storage %s" - % (module.params['ostemplate'], node, template_store)) - except Exception as e: - module.fail_json(vmid=vmid, msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) - - try: - proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone, - cores=module.params['cores'], - pool=module.params['pool'], - password=module.params['password'], - hostname=module.params['hostname'], - ostemplate=module.params['ostemplate'], - netif=module.params['netif'], - disk_volume=module.params["disk_volume"], - mounts=module.params['mounts'], - mount_volumes=module.params["mount_volumes"], - ostype=module.params['ostype'], - ip_address=module.params['ip_address'], - onboot=ansible_to_proxmox_bool(module.params['onboot']), - cpuunits=module.params['cpuunits'], - nameserver=module.params['nameserver'], - searchdomain=module.params['searchdomain'], - force=ansible_to_proxmox_bool(module.params['force']), - pubkey=module.params['pubkey'], - features=",".join(module.params['features']) if module.params['features'] is not None else None, - startup=",".join(module.params['startup']) if module.params['startup'] is not None else None, - unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']), - description=module.params['description'], - hookscript=module.params['hookscript'], - timezone=module.params['timezone'], - tags=module.params['tags']) - - module.exit_json(changed=True, vmid=vmid, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) - except Exception as e: - module.fail_json(vmid=vmid, msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) - - # Clone a container - elif state == 'present' and clone is not None: - try: - if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: - module.exit_json(changed=False, vmid=vmid, msg="VM with vmid = %s is already exists" % vmid) - # If no vmid was passed, there cannot be another VM named 'hostname' - if (not module.params['vmid'] and - proxmox.get_vmid(hostname, ignore_missing=True) and - not module.params['force']): - vmid = proxmox.get_vmid(hostname) - module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) - if not proxmox.get_vm(clone, ignore_missing=True): - module.exit_json(changed=False, vmid=vmid, msg="Container to be cloned does not exist") - except Exception as e: - module.fail_json(vmid=vmid, msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) - - try: - proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone) - - module.exit_json(changed=True, vmid=vmid, msg="Cloned VM %s from %s" % (vmid, clone)) - except Exception as e: - module.fail_json(vmid=vmid, msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) - - elif state == 'started': - try: - vm = proxmox.get_vm(vmid) - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid) - - if proxmox.start_instance(vm, vmid, timeout): - module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid) - except Exception as e: - module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'stopped': - try: - vm = proxmox.get_vm(vmid) - - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': - if module.params['force']: - if proxmox.umount_instance(vm, vmid, timeout): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid) - else: - module.exit_json(changed=False, vmid=vmid, - msg=("VM %s is already shutdown, but mounted. You can use force option to umount it.") % vmid) - - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already shutdown" % vmid) - - if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid) - except Exception as e: - module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'template': - try: - vm = proxmox.get_vm(vmid) - - proxmox.convert_to_template(vm, vmid, timeout, force=module.params['force']) - module.exit_json(changed=True, msg="VM %s is converted to template" % vmid) - except Exception as e: - module.fail_json(vmid=vmid, msg="conversion of VM %s to template failed with exception: %s" % (vmid, e)) - - elif state == 'restarted': - try: - vm = proxmox.get_vm(vmid) - - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] - if vm_status in ['stopped', 'mounted']: - module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid) - - if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and - proxmox.start_instance(vm, vmid, timeout)): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid) - except Exception as e: - module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'absent': - if not vmid: - module.exit_json(changed=False, vmid=vmid, msg='VM with hostname = %s is already absent' % hostname) - try: - vm = proxmox.get_vm(vmid, ignore_missing=True) - if not vm: - module.exit_json(changed=False, vmid=vmid, msg="VM %s does not exist" % vmid) - - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] - if vm_status == 'running': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion." % vmid) - - if vm_status == 'mounted': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) - - delete_params = {} - - if module.params['purge']: - delete_params['purge'] = 1 - - taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params) - - while timeout: - if proxmox.api_task_ok(vm['node'], taskid): - module.exit_json(changed=True, vmid=vmid, taskid=taskid, msg="VM %s removed" % vmid) - timeout -= 1 - if timeout == 0: - module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' - % proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - except Exception as e: - module.fail_json(vmid=vmid, msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_backup.py b/plugins/modules/proxmox_backup.py deleted file mode 100644 index b14dd529e8..0000000000 --- a/plugins/modules/proxmox_backup.py +++ /dev/null @@ -1,567 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2024, IamLunchbox -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r""" -module: proxmox_backup -author: "Raphael Grieger (@IamLunchbox) " -short_description: Start a VM backup in Proxmox VE cluster -version_added: 10.1.0 -description: - - Allows you to create backups of KVM and LXC guests in Proxmox VE cluster. - - Offers the GUI functionality of creating a single backup as well as using the run-now functionality from the cluster backup schedule. - - The mininum required privileges to use this module are C(VM.Backup) and C(Datastore.AllocateSpace) for the respective VMs and storage. - - Most options are optional and if unspecified will be chosen by the Cluster and its default values. - - Note that this module B(is not idempotent). It always starts a new backup (when not in check mode). -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - backup_mode: - description: - - The mode how Proxmox performs backups. The default is, to create a runtime snapshot including memory. - - Check U(https://pve.proxmox.com/pve-docs/chapter-vzdump.html#_backup_modes) for an explanation of the differences. - type: str - choices: ["snapshot", "suspend", "stop"] - default: snapshot - bandwidth: - description: - - Limit the I/O bandwidth (in KiB/s) to write backup. V(0) is unlimited. - type: int - change_detection_mode: - description: - - Set the change detection mode (available from Proxmox VE 8.3). - - It is only used when backing up containers, Proxmox silently ignores this option when applied to kvm guests. - type: str - choices: ["legacy", "data", "metadata"] - compress: - description: - - Enable additional compression of the backup archive. - - V(0) will use the Proxmox recommended value, depending on your storage target. - type: str - choices: ["0", "1", "gzip", "lzo", "zstd"] - compression_threads: - description: - - The number of threads zstd will use to compress the backup. - - V(0) uses 50% of the available cores, anything larger than V(0) will use exactly as many threads. - - Is ignored if you specify O(compress=gzip) or O(compress=lzo). - type: int - description: - description: - - Specify the description of the backup. - - Needs to be a single line, newline and backslash need to be escaped as V(\\n) and V(\\\\) respectively. - - If you need variable interpolation, you can set the content as usual through ansible jinja templating and/or let Proxmox substitute templates. - - Proxmox currently supports V({{cluster}}), V({{guestname}}), V({{node}}), and V({{vmid}}) as templating variables. Since this is also - a jinja delimiter, you need to set these values as raw jinja. - default: "{{guestname}}" - type: str - fleecing: - description: - - Enable backup fleecing. Works only for virtual machines and their disks. - - Must be entered as a string, containing key-value pairs in a list. - type: str - mode: - description: - - Specifices the mode to select backup targets. - choices: ["include", "all", "pool"] - required: true - type: str - node: - description: - - Only execute the backup job for the given node. - - This option is usually used if O(mode=all). - - If you specify a node ID and your vmids or pool do not reside there, they will not be backed up! - type: str - notification_mode: - description: - - Determine which notification system to use. - type: str - choices: ["auto", "legacy-sendmail", "notification-system"] - default: auto - performance_tweaks: - description: - - Enable other performance-related settings. - - Must be entered as a string, containing comma separated key-value pairs. - - 'For example: V(max-workers=2,pbs-entries-max=2).' - type: str - pool: - description: - - Specify a pool name to limit backups to guests to the given pool. - - Required, when O(mode=pool). - - Also required, when your user only has VM.Backup permission for this single pool. - type: str - protected: - description: - - Marks backups as protected. - - '"Might fail, when the PBS backend has verify enabled due to this bug: U(https://bugzilla.proxmox.com/show_bug.cgi?id=4289)".' - type: bool - retention: - description: - - Use custom retention options instead of those from the default cluster configuration (which is usually V("keep-all=1")). - - Always requires Datastore.Allocate permission at the storage endpoint. - - Specifying a retention time other than V(keep-all=1) might trigger pruning on the datastore, if an existing backup should be deleted - due to your specified timeframe. - - Deleting requires C(Datastore.Modify) or C(Datastore.Prune) permissions on the backup storage. - type: str - storage: - description: - - Store the backup archive on this storage. - type: str - required: true - vmids: - description: - - The instance ids to be backed up. - - Only valid, if O(mode=include). - type: list - elements: int - wait: - description: - - Wait for the backup to be finished. - - Fails, if job does not succeed successfully within the given timeout. - type: bool - default: false - wait_timeout: - description: - - Seconds to wait for the backup to be finished. - - Will only be evaluated, if O(wait=true). - type: int - default: 10 -requirements: ["proxmoxer", "requests"] -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes -""" - -EXAMPLES = r""" -- name: Backup all vms in the Proxmox cluster to storage mypbs - community.general.proxmox_backup: - api_user: root@pam - api_password: secret - api_host: node1 - storage: mypbs - mode: all - -- name: Backup VMID 100 by stopping it and set an individual retention - community.general.proxmox_backup: - api_user: root@pam - api_password: secret - api_host: node1 - backup-mode: stop - mode: include - retention: keep-daily=5, keep-last=14, keep-monthly=4, keep-weekly=4, keep-yearly=0 - storage: mypbs - vmid: [100] - -- name: Backup all vms on node node2 to storage mypbs and wait for the task to finish - community.general.proxmox_backup: - api_user: test@pve - api_password: 1q2w3e - api_host: node2 - storage: mypbs - mode: all - node: node2 - wait: true - wait_timeout: 30 - -- name: Use all the options - community.general.proxmox_backup: - api_user: root@pam - api_password: secret - api_host: node1 - bandwidth: 1000 - backup_mode: suspend - compress: zstd - compression_threads: 0 - description: A single backup for {% raw %}{{ guestname }}{% endraw %} - mode: include - notification_mode: notification-system - protected: true - retention: keep-monthly=1, keep-weekly=1 - storage: mypbs - vmids: - - 100 - - 101 -""" - -RETURN = r""" -backups: - description: List of nodes and their task IDs. - returned: on success - type: list - elements: dict - contains: - node: - description: Node ID. - returned: on success - type: str - status: - description: Last known task status. Will be unknown, if O(wait=false). - returned: on success - type: str - choices: ["unknown", "success", "failed"] - upid: - description: >- - Proxmox cluster UPID, which is needed to lookup task info. Returns OK, when a cluster node did not create a task after being called, for - example due to no matching targets. - returned: on success - type: str -""" - -import time - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.proxmox import ProxmoxAnsible, proxmox_auth_argument_spec - - -def has_permission(permission_tree, permission, search_scopes, default=0, expected=1): - return any(permission_tree.get(scope, {}).get(permission, default) == expected for scope in search_scopes) - - -class ProxmoxBackupAnsible(ProxmoxAnsible): - - def _get_permissions(self): - return self.proxmox_api.access.permissions.get() - - def _get_resources(self, resource_type=None): - return self.proxmox_api.cluster.resources.get(type=resource_type) - - def _get_tasklog(self, node, upid): - return self.proxmox_api.nodes(node).tasks(upid).log.get() - - def _get_taskok(self, node, upid): - return self.proxmox_api.nodes(node).tasks(upid).status.get() - - def _post_vzdump(self, node, request_body): - return self.proxmox_api.nodes(node).vzdump.post(**request_body) - - def request_backup( - self, - request_body, - node_endpoints): - task_ids = [] - - for node in node_endpoints: - upid = self._post_vzdump(node, request_body) - if upid != "OK": - tasklog = ", ".join(logentry["t"] for logentry in self._get_tasklog(node, upid)) - else: - tasklog = "" - task_ids.extend([{"node": node, "upid": upid, "status": "unknown", "log": "%s" % tasklog}]) - return task_ids - - def check_relevant_nodes(self, node): - nodes = [ - item["node"] - for item in self._get_resources("node") - if item["status"] == "online" - ] - if node and node not in nodes: - self.module.fail_json(msg="Node %s was specified, but does not exist on the cluster" % node) - elif node: - return [node] - return nodes - - def check_storage_permissions( - self, - permissions, - storage, - bandwidth, - performance, - retention): - # Check for Datastore.AllocateSpace in the permission tree - if not has_permission(permissions, "Datastore.AllocateSpace", search_scopes=["/", "/storage/", "/storage/" + storage]): - self.module.fail_json(changed=False, msg="Insufficient permission: Datastore.AllocateSpace is missing") - - if (bandwidth or performance) and has_permission(permissions, "Sys.Modify", search_scopes=["/"], expected=0): - self.module.fail_json(changed=False, msg="Insufficient permission: Performance_tweaks and bandwidth require 'Sys.Modify' permission for '/'") - - if retention: - if not has_permission(permissions, "Datastore.Allocate", search_scopes=["/", "/storage", "/storage/" + storage]): - self.module.fail_json(changed=False, msg="Insufficient permissions: Custom retention was requested, but Datastore.Allocate is missing") - - def check_vmid_backup_permission(self, permissions, vmids, pool): - sufficient_permissions = has_permission(permissions, "VM.Backup", search_scopes=["/", "/vms"]) - if pool and not sufficient_permissions: - sufficient_permissions = has_permission(permissions, "VM.Backup", search_scopes=["/pool/" + pool, "/pool/" + pool + "/vms"]) - - if not sufficient_permissions: - # Since VM.Backup can be given for each vmid at a time, iterate through all of them - # and check, if the permission is set - failed_vmids = [] - for vm in vmids: - vm_path = "/vms/" + str(vm) - if has_permission(permissions, "VM.Backup", search_scopes=[vm_path], default=1, expected=0): - failed_vmids.append(str(vm)) - if failed_vmids: - self.module.fail_json( - changed=False, msg="Insufficient permissions: " - "You dont have the VM.Backup permission for VMID %s" % - ", ".join(failed_vmids)) - sufficient_permissions = True - # Finally, when no check succeeded, fail - if not sufficient_permissions: - self.module.fail_json(changed=False, msg="Insufficient permissions: You do not have the VM.Backup permission") - - def check_general_backup_permission(self, permissions, pool): - if not has_permission(permissions, "VM.Backup", search_scopes=["/", "/vms"] + (["/pool/" + pool] if pool else [])): - self.module.fail_json(changed=False, msg="Insufficient permissions: You dont have the VM.Backup permission") - - def check_if_storage_exists(self, storage, node): - storages = self.get_storages(type=None) - # Loop through all cluster storages and get all matching storages - validated_storagepath = [storageentry for storageentry in storages if storageentry["storage"] == storage] - if not validated_storagepath: - self.module.fail_json( - changed=False, - msg="Storage %s does not exist in the cluster" % - storage) - - def check_vmids(self, vmids): - cluster_vmids = [vm["vmid"] for vm in self._get_resources("vm")] - if not cluster_vmids: - self.module.warn( - "VM.Audit permission is missing or there are no VMs. This task might fail if one VMID does not exist") - return - vmids_not_found = [str(vm) for vm in vmids if vm not in cluster_vmids] - if vmids_not_found: - self.module.warn( - "VMIDs %s not found. This task will fail if one VMID does not exist" % - ", ".join(vmids_not_found)) - - def wait_for_timeout(self, timeout, raw_tasks): - - # filter all entries, which did not get a task id from the Cluster - tasks = [] - ok_tasks = [] - for node in raw_tasks: - if node["upid"] != "OK": - tasks.append(node) - else: - ok_tasks.append(node) - - start_time = time.time() - # iterate through the task ids and check their values - while True: - for node in tasks: - if node["status"] == "unknown": - try: - # proxmox.api_task_ok does not suffice, since it only - # is true at `stopped` and `ok` - status = self._get_taskok(node["node"], node["upid"]) - if status["status"] == "stopped" and status["exitstatus"] == "OK": - node["status"] = "success" - if status["status"] == "stopped" and status["exitstatus"] == "job errors": - node["status"] = "failed" - except Exception as e: - self.module.fail_json(msg="Unable to retrieve API task ID from node %s: %s" % (node["node"], e)) - if len([item for item in tasks if item["status"] != "unknown"]) == len(tasks): - break - if time.time() > start_time + timeout: - timeouted_nodes = [ - node["node"] - for node in tasks - if node["status"] == "unknown" - ] - failed_nodes = [node["node"] for node in tasks if node["status"] == "failed"] - if failed_nodes: - self.module.fail_json( - msg="Reached timeout while waiting for backup task. " - "Nodes, who reached the timeout: %s. " - "Nodes, which failed: %s" % - (", ".join(timeouted_nodes), ", ".join(failed_nodes))) - self.module.fail_json( - msg="Reached timeout while waiting for creating VM snapshot. " - "Nodes who reached the timeout: %s" % - ", ".join(timeouted_nodes)) - time.sleep(1) - - error_logs = [] - for node in tasks: - if node["status"] == "failed": - tasklog = ", ".join([logentry["t"] for logentry in self._get_tasklog(node["node"], node["upid"])]) - error_logs.append("%s: %s" % (node, tasklog)) - if error_logs: - self.module.fail_json( - msg="An error occured creating the backups. " - "These are the last log lines from the failed nodes: %s" % - ", ".join(error_logs)) - - for node in tasks: - tasklog = ", ".join([logentry["t"] for logentry in self._get_tasklog(node["node"], node["upid"])]) - node["log"] = tasklog - - # Finally, reattach ok tasks to show, that all nodes were contacted - tasks.extend(ok_tasks) - return tasks - - def permission_check( - self, - storage, - mode, - node, - bandwidth, - performance_tweaks, - retention, - pool, - vmids): - permissions = self._get_permissions() - self.check_if_storage_exists(storage, node) - self.check_storage_permissions( - permissions, storage, bandwidth, performance_tweaks, retention) - if mode == "include": - self.check_vmid_backup_permission(permissions, vmids, pool) - else: - self.check_general_backup_permission(permissions, pool) - - def prepare_request_parameters(self, module_arguments): - # ensure only valid post parameters are passed to proxmox - # list of dict items to replace with (new_val, old_val) - post_params = [("bwlimit", "bandwidth"), - ("compress", "compress"), - ("fleecing", "fleecing"), - ("mode", "backup_mode"), - ("notes-template", "description"), - ("notification-mode", "notification_mode"), - ("pbs-change-detection-mode", "change_detection_mode"), - ("performance", "performance_tweaks"), - ("pool", "pool"), - ("protected", "protected"), - ("prune-backups", "retention"), - ("storage", "storage"), - ("zstd", "compression_threads"), - ("vmid", "vmids")] - request_body = {} - for new, old in post_params: - if module_arguments.get(old): - request_body.update({new: module_arguments[old]}) - - # Set mode specific values - if module_arguments["mode"] == "include": - request_body.pop("pool", None) - request_body["all"] = 0 - elif module_arguments["mode"] == "all": - request_body.pop("vmid", None) - request_body.pop("pool", None) - request_body["all"] = 1 - elif module_arguments["mode"] == "pool": - request_body.pop("vmid", None) - request_body["all"] = 0 - - # Create comma separated list from vmids, the API expects so - if request_body.get("vmid"): - request_body.update({"vmid": ",".join(str(vmid) for vmid in request_body["vmid"])}) - - # remove whitespaces from option strings - for key in ("prune-backups", "performance"): - if request_body.get(key): - request_body[key] = request_body[key].replace(" ", "") - # convert booleans to 0/1 - for key in ("protected",): - if request_body.get(key): - request_body[key] = 1 - return request_body - - def backup_create( - self, - module_arguments, - check_mode, - node_endpoints): - request_body = self.prepare_request_parameters(module_arguments) - # stop here, before anything gets changed - if check_mode: - return [] - - task_ids = self.request_backup(request_body, node_endpoints) - updated_task_ids = [] - if module_arguments["wait"]: - updated_task_ids = self.wait_for_timeout( - module_arguments["wait_timeout"], task_ids) - return updated_task_ids if updated_task_ids else task_ids - - -def main(): - module_args = proxmox_auth_argument_spec() - backup_args = { - "backup_mode": {"type": "str", "default": "snapshot", "choices": ["snapshot", "suspend", "stop"]}, - "bandwidth": {"type": "int"}, - "change_detection_mode": {"type": "str", "choices": ["legacy", "data", "metadata"]}, - "compress": {"type": "str", "choices": ["0", "1", "gzip", "lzo", "zstd"]}, - "compression_threads": {"type": "int"}, - "description": {"type": "str", "default": "{{guestname}}"}, - "fleecing": {"type": "str"}, - "mode": {"type": "str", "required": True, "choices": ["include", "all", "pool"]}, - "node": {"type": "str"}, - "notification_mode": {"type": "str", "default": "auto", "choices": ["auto", "legacy-sendmail", "notification-system"]}, - "performance_tweaks": {"type": "str"}, - "pool": {"type": "str"}, - "protected": {"type": "bool"}, - "retention": {"type": "str"}, - "storage": {"type": "str", "required": True}, - "vmids": {"type": "list", "elements": "int"}, - "wait": {"type": "bool", "default": False}, - "wait_timeout": {"type": "int", "default": 10}} - module_args.update(backup_args) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - required_if=[ - ("mode", "include", ("vmids",), True), - ("mode", "pool", ("pool",)) - ] - ) - proxmox = ProxmoxBackupAnsible(module) - bandwidth = module.params["bandwidth"] - mode = module.params["mode"] - node = module.params["node"] - performance_tweaks = module.params["performance_tweaks"] - pool = module.params["pool"] - retention = module.params["retention"] - storage = module.params["storage"] - vmids = module.params["vmids"] - - proxmox.permission_check( - storage, - mode, - node, - bandwidth, - performance_tweaks, - retention, - pool, - vmids) - if module.params["mode"] == "include": - proxmox.check_vmids(module.params["vmids"]) - node_endpoints = proxmox.check_relevant_nodes(module.params["node"]) - try: - result = proxmox.backup_create(module.params, module.check_mode, node_endpoints) - except Exception as e: - module.fail_json(msg="Creating backups failed with exception: %s" % to_native(e)) - - if module.check_mode: - module.exit_json(backups=result, changed=True, msg="Backups would be created") - - elif len([entry for entry in result if entry["upid"] == "OK"]) == len(result): - module.exit_json(backups=result, changed=False, msg="Backup request sent to proxmox, no tasks created") - - elif module.params["wait"]: - module.exit_json(backups=result, changed=True, msg="Backups succeeded") - - else: - module.exit_json(backups=result, changed=True, - msg="Backup tasks created") - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/proxmox_disk.py b/plugins/modules/proxmox_disk.py deleted file mode 100644 index a70d810983..0000000000 --- a/plugins/modules/proxmox_disk.py +++ /dev/null @@ -1,874 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2022, Castor Sky (@castorsky) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r""" -module: proxmox_disk -short_description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster -version_added: 5.7.0 -description: - - Allows you to perform some supported operations on a disk in Qemu(KVM) Virtual Machines in a Proxmox VE cluster. -author: "Castor Sky (@castorsky) " -attributes: - check_mode: - support: none - diff_mode: - support: none - action_group: - version_added: 9.0.0 -options: - name: - description: - - The unique name of the VM. - - You can specify either O(name) or O(vmid) or both of them. - type: str - vmid: - description: - - The unique ID of the VM. - - You can specify either O(vmid) or O(name) or both of them. - type: int - disk: - description: - - The disk key (V(unused[n]), V(ide[n]), V(sata[n]), V(scsi[n]) or V(virtio[n])) you want to operate on. - - Disk buses (IDE, SATA and so on) have fixed ranges of V(n) that accepted by Proxmox API. - - 'For IDE: 0-3; for SCSI: 0-30; for SATA: 0-5; for VirtIO: 0-15; for Unused: 0-255.' - type: str - required: true - state: - description: - - Indicates desired state of the disk. - - O(state=present) can be used to create, replace disk or update options in existing disk. It will create missing disk or update options - in existing one by default. See the O(create) parameter description to control behavior of this option. - - Some updates on options (like O(cache)) are not being applied instantly and require VM restart. - - Use O(state=detached) to detach existing disk from VM but do not remove it entirely. When O(state=detached) and disk is V(unused[n]) it - will be left in same state (not removed). - - O(state=moved) may be used to change backing storage for the disk in bounds of the same VM or to send the disk to another VM (using the - same backing storage). - - O(state=resized) intended to change the disk size. As of Proxmox 7.2 you can only increase the disk size because shrinking disks is not - supported by the PVE API and has to be done manually. - - To entirely remove the disk from backing storage use O(state=absent). - type: str - choices: ['present', 'resized', 'detached', 'moved', 'absent'] - default: present - create: - description: - - With O(create) flag you can control behavior of O(state=present). - - When O(create=disabled) it will not create new disk (if not exists) but will update options in existing disk. - - When O(create=regular) it will either create new disk (if not exists) or update options in existing disk. - - When O(create=forced) it will always create new disk (if disk exists it will be detached and left unused). - type: str - choices: ['disabled', 'regular', 'forced'] - default: regular - storage: - description: - - The drive's backing storage. - - Used only when O(state) is V(present). - type: str - size: - description: - - Desired volume size in GB to allocate when O(state=present) (specify O(size) without suffix). - - New (or additional) size of volume when O(state=resized). With the V(+) sign the value is added to the actual size of the volume and without - it, the value is taken as an absolute one. - type: str - bwlimit: - description: - - Override I/O bandwidth limit (in KB/s). - - Used only when O(state=moved). - type: int - delete_moved: - description: - - Delete the original disk after successful copy. - - By default the original disk is kept as unused disk. - - Used only when O(state=moved). - type: bool - target_disk: - description: - - The config key the disk will be moved to on the target VM (for example, V(ide0) or V(scsi1)). - - Default is the source disk key. - - Used only when O(state=moved). - type: str - target_storage: - description: - - Move the disk to this storage when O(state=moved). - - You can move between storages only in scope of one VM. - - Mutually exclusive with O(target_vmid). - - Consider increasing O(timeout) in case of large disk images or slow storage backend. - type: str - target_vmid: - description: - - The (unique) ID of the VM where disk will be placed when O(state=moved). - - You can move disk between VMs only when the same storage is used. - - Mutually exclusive with O(target_vmid). - type: int - timeout: - description: - - Timeout in seconds to wait for slow operations such as importing disk or moving disk between storages. - - Used only when O(state) is V(present) or V(moved). - type: int - default: 600 - aio: - description: - - AIO type to use. - type: str - choices: ['native', 'threads', 'io_uring'] - backup: - description: - - Whether the drive should be included when making backups. - type: bool - bps_max_length: - description: - - Maximum length of total r/w I/O bursts in seconds. - type: int - bps_rd_max_length: - description: - - Maximum length of read I/O bursts in seconds. - type: int - bps_wr_max_length: - description: - - Maximum length of write I/O bursts in seconds. - type: int - cache: - description: - - The drive's cache mode. - type: str - choices: ['none', 'writethrough', 'writeback', 'unsafe', 'directsync'] - cyls: - description: - - Force the drive's physical geometry to have a specific cylinder count. - type: int - detect_zeroes: - description: - - Control whether to detect and try to optimize writes of zeroes. - type: bool - discard: - description: - - Control whether to pass discard/trim requests to the underlying storage. - type: str - choices: ['ignore', 'on'] - format: - description: - - The drive's backing file's data format. - type: str - choices: ['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop'] - heads: - description: - - Force the drive's physical geometry to have a specific head count. - type: int - import_from: - description: - - Import volume from this existing one. - - Volume string format. - - V(:/) or V(/). - - Attention! Only root can use absolute paths. - - This parameter is mutually exclusive with O(size). - - Increase O(timeout) parameter when importing large disk images or using slow storage. - type: str - iops: - description: - - Maximum total r/w I/O in operations per second. - - You can specify either total limit or per operation (mutually exclusive with O(iops_rd) and O(iops_wr)). - type: int - iops_max: - description: - - Maximum unthrottled total r/w I/O pool in operations per second. - type: int - iops_max_length: - description: - - Maximum length of total r/w I/O bursts in seconds. - type: int - iops_rd: - description: - - Maximum read I/O in operations per second. - - You can specify either read or total limit (mutually exclusive with O(iops)). - type: int - iops_rd_max: - description: - - Maximum unthrottled read I/O pool in operations per second. - type: int - iops_rd_max_length: - description: - - Maximum length of read I/O bursts in seconds. - type: int - iops_wr: - description: - - Maximum write I/O in operations per second. - - You can specify either write or total limit (mutually exclusive with O(iops)). - type: int - iops_wr_max: - description: - - Maximum unthrottled write I/O pool in operations per second. - type: int - iops_wr_max_length: - description: - - Maximum length of write I/O bursts in seconds. - type: int - iothread: - description: - - Whether to use iothreads for this drive (only for SCSI and VirtIO). - type: bool - mbps: - description: - - Maximum total r/w speed in megabytes per second. - - Can be fractional but use with caution - fractionals less than 1 are not supported officially. - - You can specify either total limit or per operation (mutually exclusive with O(mbps_rd) and O(mbps_wr)). - type: float - mbps_max: - description: - - Maximum unthrottled total r/w pool in megabytes per second. - type: float - mbps_rd: - description: - - Maximum read speed in megabytes per second. - - You can specify either read or total limit (mutually exclusive with O(mbps)). - type: float - mbps_rd_max: - description: - - Maximum unthrottled read pool in megabytes per second. - type: float - mbps_wr: - description: - - Maximum write speed in megabytes per second. - - You can specify either write or total limit (mutually exclusive with O(mbps)). - type: float - mbps_wr_max: - description: - - Maximum unthrottled write pool in megabytes per second. - type: float - media: - description: - - The drive's media type. - type: str - choices: ['cdrom', 'disk'] - iso_image: - description: - - The ISO image to be mounted on the specified in O(disk) CD-ROM. - - O(media=cdrom) needs to be specified for this option to work. - - Use V(:iso/) to mount ISO. - - Use V(cdrom) to access the physical CD/DVD drive. - - Use V(none) to unmount image from existent CD-ROM or create empty CD-ROM drive. - type: str - version_added: 8.1.0 - queues: - description: - - Number of queues (SCSI only). - type: int - replicate: - description: - - Whether the drive should considered for replication jobs. - type: bool - rerror: - description: - - Read error action. - type: str - choices: ['ignore', 'report', 'stop'] - ro: - description: - - Whether the drive is read-only. - type: bool - scsiblock: - description: - - Whether to use scsi-block for full passthrough of host block device. - - Can lead to I/O errors in combination with low memory or high memory fragmentation on host. - type: bool - secs: - description: - - Force the drive's physical geometry to have a specific sector count. - type: int - serial: - description: - - The drive's reported serial number, url-encoded, up to 20 bytes long. - type: str - shared: - description: - - Mark this locally-managed volume as available on all nodes. - - This option does not share the volume automatically, it assumes it is shared already! - type: bool - snapshot: - description: - - Control qemu's snapshot mode feature. - - If activated, changes made to the disk are temporary and will be discarded when the VM is shutdown. - type: bool - ssd: - description: - - Whether to expose this drive as an SSD, rather than a rotational hard disk. - type: bool - trans: - description: - - Force disk geometry bios translation mode. - type: str - choices: ['auto', 'lba', 'none'] - werror: - description: - - Write error action. - type: str - choices: ['enospc', 'ignore', 'report', 'stop'] - wwn: - description: - - The drive's worldwide name, encoded as 16 bytes hex string, prefixed by V(0x). - type: str -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes -""" - -EXAMPLES = r""" -- name: Create new disk in VM (do not rewrite in case it exists already) - community.general.proxmox_disk: - api_host: node1 - api_user: root@pam - api_token_id: token1 - api_token_secret: some-token-data - name: vm-name - disk: scsi3 - backup: true - cache: none - storage: local-zfs - size: 5 - state: present - -- name: Create new disk in VM (force rewrite in case it exists already) - community.general.proxmox_disk: - api_host: node1 - api_user: root@pam - api_token_id: token1 - api_token_secret: some-token-data - vmid: 101 - disk: scsi3 - format: qcow2 - storage: local - size: 16 - create: forced - state: present - -- name: Update existing disk - community.general.proxmox_disk: - api_host: node1 - api_user: root@pam - api_token_id: token1 - api_token_secret: some-token-data - vmid: 101 - disk: ide0 - backup: false - ro: true - aio: native - state: present - -- name: Grow existing disk - community.general.proxmox_disk: - api_host: node1 - api_user: root@pam - api_token_id: token1 - api_token_secret: some-token-data - vmid: 101 - disk: sata4 - size: +5G - state: resized - -- name: Detach disk (leave it unused) - community.general.proxmox_disk: - api_host: node1 - api_user: root@pam - api_token_id: token1 - api_token_secret: some-token-data - name: vm-name - disk: virtio0 - state: detached - -- name: Move disk to another storage - community.general.proxmox_disk: - api_host: node1 - api_user: root@pam - api_password: secret - vmid: 101 - disk: scsi7 - target_storage: local - format: qcow2 - state: moved - -- name: Move disk from one VM to another - community.general.proxmox_disk: - api_host: node1 - api_user: root@pam - api_token_id: token1 - api_token_secret: some-token-data - vmid: 101 - disk: scsi7 - target_vmid: 201 - state: moved - -- name: Remove disk permanently - community.general.proxmox_disk: - api_host: node1 - api_user: root@pam - api_password: secret - vmid: 101 - disk: scsi4 - state: absent - -- name: Mount ISO image on CD-ROM (create drive if missing) - community.general.proxmox_disk: - api_host: node1 - api_user: root@pam - api_token_id: token1 - api_token_secret: some-token-data - vmid: 101 - disk: ide2 - media: cdrom - iso_image: local:iso/favorite_distro_amd64.iso - state: present -""" - -RETURN = r""" -vmid: - description: The VM vmid. - returned: success - type: int - sample: 101 -msg: - description: A short message on what the module did. - returned: always - type: str - sample: "Disk scsi3 created in VM 101" -""" - -from ansible.module_utils.basic import AnsibleModule - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, - ProxmoxAnsible) -from re import compile, match, sub - - -def disk_conf_str_to_dict(config_string): - """ - Transform Proxmox configuration string for disk element into dictionary which has - volume option parsed in '{ storage }:{ volume }' format and other options parsed - in '{ option }={ value }' format. This dictionary will be compared afterward with - attributes that user passed to this module in playbook.\n - config_string examples: - - local-lvm:vm-100-disk-0,ssd=1,discard=on,size=25G - - local:iso/new-vm-ignition.iso,media=cdrom,size=70k - - none,media=cdrom - :param config_string: Retrieved from Proxmox API configuration string - :return: Dictionary with volume option divided into parts ('volume_name', 'storage_name', 'volume') \n - and other options as key:value. - """ - config = config_string.split(',') - - # When empty CD-ROM drive present, the volume part of config string is "none". - storage_volume = config.pop(0) - if storage_volume in ["none", "cdrom"]: - config_current = dict( - volume=storage_volume, - storage_name=None, - volume_name=None, - size=None, - ) - else: - storage_volume = storage_volume.split(':') - storage_name = storage_volume[0] - volume_name = storage_volume[1] - config_current = dict( - volume='%s:%s' % (storage_name, volume_name), - storage_name=storage_name, - volume_name=volume_name, - ) - - config.sort() - for option in config: - k, v = option.split('=') - config_current[k] = v - - return config_current - - -class ProxmoxDiskAnsible(ProxmoxAnsible): - create_update_fields = [ - 'aio', 'backup', 'bps_max_length', 'bps_rd_max_length', 'bps_wr_max_length', - 'cache', 'cyls', 'detect_zeroes', 'discard', 'format', 'heads', 'import_from', 'iops', 'iops_max', - 'iops_max_length', 'iops_rd', 'iops_rd_max', 'iops_rd_max_length', 'iops_wr', 'iops_wr_max', - 'iops_wr_max_length', 'iothread', 'mbps', 'mbps_max', 'mbps_rd', 'mbps_rd_max', 'mbps_wr', 'mbps_wr_max', - 'media', 'queues', 'replicate', 'rerror', 'ro', 'scsiblock', 'secs', 'serial', 'shared', 'snapshot', - 'ssd', 'trans', 'werror', 'wwn' - ] - supported_bus_num_ranges = dict( - ide=range(0, 4), - scsi=range(0, 31), - sata=range(0, 6), - virtio=range(0, 16), - unused=range(0, 256) - ) - - def get_create_attributes(self): - # Sanitize parameters dictionary: - # - Remove not defined args - # - Ensure True and False converted to int. - # - Remove unnecessary parameters - params = { - k: int(v) if isinstance(v, bool) else v - for k, v in self.module.params.items() - if v is not None and k in self.create_update_fields - } - return params - - def create_disk(self, disk, vmid, vm, vm_config): - """Create a disk in the specified virtual machine. Check if creation is required, - and if so, compile the disk configuration and create it by updating the virtual - machine configuration. After calling the API function, wait for the result. - - :param disk: ID of the disk in format "". - :param vmid: ID of the virtual machine where the disk will be created. - :param vm: Name of the virtual machine where the disk will be created. - :param vm_config: Configuration of the virtual machine. - :return: (bool, string) Whether the task was successful or not - and the message to return to Ansible. - """ - create = self.module.params['create'] - if create == 'disabled' and disk not in vm_config: - # NOOP - return False, "Disk %s not found in VM %s and creation was disabled in parameters." % (disk, vmid) - - timeout_str = "Reached timeout. Last line in task before timeout: %s" - if (create == 'regular' and disk not in vm_config) or (create == 'forced'): - # CREATE - playbook_config = self.get_create_attributes() - import_string = playbook_config.pop('import_from', None) - iso_image = self.module.params.get('iso_image', None) - - if import_string: - # When 'import_from' option is present in task options. - config_str = "%s:%s,import-from=%s" % (self.module.params["storage"], "0", import_string) - timeout_str = "Reached timeout while importing VM disk. Last line in task before timeout: %s" - ok_str = "Disk %s imported into VM %s" - elif iso_image is not None: - # disk=, media=cdrom, iso_image= - config_str = iso_image - ok_str = "CD-ROM was created on %s bus in VM %s" - else: - config_str = self.module.params["storage"] - if self.module.params.get("media") != "cdrom": - config_str += ":%s" % (self.module.params["size"]) - ok_str = "Disk %s created in VM %s" - timeout_str = "Reached timeout while creating VM disk. Last line in task before timeout: %s" - - for k, v in playbook_config.items(): - config_str += ',%s=%s' % (k, v) - - disk_config_to_apply = {self.module.params["disk"]: config_str} - - if create in ['disabled', 'regular'] and disk in vm_config: - # UPDATE - ok_str = "Disk %s updated in VM %s" - iso_image = self.module.params.get('iso_image', None) - - proxmox_config = disk_conf_str_to_dict(vm_config[disk]) - # 'import_from' fails on disk updates - playbook_config = self.get_create_attributes() - playbook_config.pop('import_from', None) - - # Begin composing configuration string - if iso_image is not None: - config_str = iso_image - else: - config_str = proxmox_config["volume"] - # Append all mandatory fields from playbook_config - for k, v in playbook_config.items(): - config_str += ',%s=%s' % (k, v) - - # Append to playbook_config fields which are constants for disk images - for option in ['size', 'storage_name', 'volume', 'volume_name']: - playbook_config.update({option: proxmox_config[option]}) - # CD-ROM is special disk device and its disk image is subject to change - if iso_image is not None: - playbook_config['volume'] = iso_image - # Values in params are numbers, but strings are needed to compare with disk_config - playbook_config = {k: str(v) for k, v in playbook_config.items()} - - # Now compare old and new config to detect if changes are needed - if proxmox_config == playbook_config: - return False, "Disk %s is up to date in VM %s" % (disk, vmid) - - disk_config_to_apply = {self.module.params["disk"]: config_str} - - current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.post(**disk_config_to_apply) - task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout']) - - if task_success: - return True, ok_str % (disk, vmid) - else: - if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT: - self.module.fail_json( - msg=timeout_str % self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1] - ) - else: - self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason) - - def move_disk(self, disk, vmid, vm, vm_config): - """Call the `move_disk` API function that moves the disk to another storage and wait for the result. - - :param disk: ID of disk in format "". - :param vmid: ID of virtual machine which disk will be moved. - :param vm: Name of virtual machine which disk will be moved. - :param vm_config: Virtual machine configuration. - :return: (bool, string) Whether the task was successful or not - and the message to return to Ansible. - """ - disk_config = disk_conf_str_to_dict(vm_config[disk]) - disk_storage = disk_config["storage_name"] - - params = dict() - params['disk'] = disk - params['vmid'] = vmid - params['bwlimit'] = self.module.params['bwlimit'] - params['storage'] = self.module.params['target_storage'] - params['target-disk'] = self.module.params['target_disk'] - params['target-vmid'] = self.module.params['target_vmid'] - params['format'] = self.module.params['format'] - params['delete'] = 1 if self.module.params.get('delete_moved', False) else 0 - # Remove not defined args - params = {k: v for k, v in params.items() if v is not None} - - if params.get('storage', False): - # Check if the disk is already in the target storage. - disk_config = disk_conf_str_to_dict(vm_config[disk]) - if params['storage'] == disk_config['storage_name']: - return False, "Disk %s already at %s storage" % (disk, disk_storage) - - current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).move_disk.post(**params) - task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout']) - - if task_success: - return True, "Disk %s moved from VM %s storage %s" % (disk, vmid, disk_storage) - else: - if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT: - self.module.fail_json( - msg='Reached timeout while waiting for moving VM disk. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1] - ) - else: - self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason) - - def resize_disk(self, disk, vmid, vm, vm_config): - """Call the `resize` API function to change the disk size and wait for the result. - - :param disk: ID of disk in format "". - :param vmid: ID of virtual machine which disk will be resized. - :param vm: Name of virtual machine which disk will be resized. - :param vm_config: Virtual machine configuration. - :return: (Bool, string) Whether the task was successful or not - and the message to return to Ansible. - """ - size = self.module.params['size'] - if not match(r'^\+?\d+(\.\d+)?[KMGT]?$', size): - self.module.fail_json(msg="Unrecognized size pattern for disk %s: %s" % (disk, size)) - disk_config = disk_conf_str_to_dict(vm_config[disk]) - actual_size = disk_config['size'] - if size == actual_size: - return False, "Disk %s is already %s size" % (disk, size) - - # Resize disk API endpoint has changed at v8.0: PUT method become async. - version = self.version() - pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0] - if pve_major_version >= 8: - current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size) - task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout']) - if task_success: - return True, "Disk %s resized in VM %s" % (disk, vmid) - else: - if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT: - self.module.fail_json( - msg="Reached timeout while resizing disk. Last line in task before timeout: %s" % - self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1] - ) - else: - self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason) - else: - self.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size) - return True, "Disk %s resized in VM %s" % (disk, vmid) - - -def main(): - module_args = proxmox_auth_argument_spec() - disk_args = dict( - # Proxmox native parameters - aio=dict(type='str', choices=['native', 'threads', 'io_uring']), - backup=dict(type='bool'), - bps_max_length=dict(type='int'), - bps_rd_max_length=dict(type='int'), - bps_wr_max_length=dict(type='int'), - cache=dict(type='str', choices=['none', 'writethrough', 'writeback', 'unsafe', 'directsync']), - cyls=dict(type='int'), - detect_zeroes=dict(type='bool'), - discard=dict(type='str', choices=['ignore', 'on']), - format=dict(type='str', choices=['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']), - heads=dict(type='int'), - import_from=dict(type='str'), - iops=dict(type='int'), - iops_max=dict(type='int'), - iops_max_length=dict(type='int'), - iops_rd=dict(type='int'), - iops_rd_max=dict(type='int'), - iops_rd_max_length=dict(type='int'), - iops_wr=dict(type='int'), - iops_wr_max=dict(type='int'), - iops_wr_max_length=dict(type='int'), - iothread=dict(type='bool'), - iso_image=dict(type='str'), - mbps=dict(type='float'), - mbps_max=dict(type='float'), - mbps_rd=dict(type='float'), - mbps_rd_max=dict(type='float'), - mbps_wr=dict(type='float'), - mbps_wr_max=dict(type='float'), - media=dict(type='str', choices=['cdrom', 'disk']), - queues=dict(type='int'), - replicate=dict(type='bool'), - rerror=dict(type='str', choices=['ignore', 'report', 'stop']), - ro=dict(type='bool'), - scsiblock=dict(type='bool'), - secs=dict(type='int'), - serial=dict(type='str'), - shared=dict(type='bool'), - snapshot=dict(type='bool'), - ssd=dict(type='bool'), - trans=dict(type='str', choices=['auto', 'lba', 'none']), - werror=dict(type='str', choices=['enospc', 'ignore', 'report', 'stop']), - wwn=dict(type='str'), - - # Disk moving relates parameters - bwlimit=dict(type='int'), - target_storage=dict(type='str'), - target_disk=dict(type='str'), - target_vmid=dict(type='int'), - delete_moved=dict(type='bool'), - timeout=dict(type='int', default='600'), - - # Module related parameters - name=dict(type='str'), - vmid=dict(type='int'), - disk=dict(type='str', required=True), - storage=dict(type='str'), - size=dict(type='str'), - state=dict(type='str', choices=['present', 'resized', 'detached', 'moved', 'absent'], - default='present'), - create=dict(type='str', choices=['disabled', 'regular', 'forced'], default='regular'), - ) - - module_args.update(disk_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], - required_if=[ - ('create', 'forced', ['storage']), - ('state', 'resized', ['size']), - ], - required_by={ - 'target_disk': 'target_vmid', - 'mbps_max': 'mbps', - 'mbps_rd_max': 'mbps_rd', - 'mbps_wr_max': 'mbps_wr', - 'bps_max_length': 'mbps_max', - 'bps_rd_max_length': 'mbps_rd_max', - 'bps_wr_max_length': 'mbps_wr_max', - 'iops_max': 'iops', - 'iops_rd_max': 'iops_rd', - 'iops_wr_max': 'iops_wr', - 'iops_max_length': 'iops_max', - 'iops_rd_max_length': 'iops_rd_max', - 'iops_wr_max_length': 'iops_wr_max', - 'iso_image': 'media', - }, - supports_check_mode=False, - mutually_exclusive=[ - ('target_vmid', 'target_storage'), - ('mbps', 'mbps_rd'), - ('mbps', 'mbps_wr'), - ('iops', 'iops_rd'), - ('iops', 'iops_wr'), - ('import_from', 'size'), - ] - ) - - proxmox = ProxmoxDiskAnsible(module) - - disk = module.params['disk'] - # Verify disk name has appropriate name - disk_regex = compile(r'^([a-z]+)([0-9]+)$') - disk_bus = sub(disk_regex, r'\1', disk) - disk_number = int(sub(disk_regex, r'\2', disk)) - if disk_bus not in proxmox.supported_bus_num_ranges: - proxmox.module.fail_json(msg='Unsupported disk bus: %s' % disk_bus) - elif disk_number not in proxmox.supported_bus_num_ranges[disk_bus]: - bus_range = proxmox.supported_bus_num_ranges[disk_bus] - proxmox.module.fail_json(msg='Disk %s number not in range %s..%s ' % (disk, bus_range[0], bus_range[-1])) - - name = module.params['name'] - state = module.params['state'] - vmid = module.params['vmid'] or proxmox.get_vmid(name) - - # Ensure VM id exists and retrieve its config - vm = None - vm_config = None - try: - vm = proxmox.get_vm(vmid) - vm_config = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).config.get() - except Exception as e: - proxmox.module.fail_json(msg='Getting information for VM %s failed with exception: %s' % (vmid, str(e))) - - # Do not try to perform actions on missing disk - if disk not in vm_config and state in ['resized', 'moved']: - module.fail_json(vmid=vmid, msg='Unable to process missing disk %s in VM %s' % (disk, vmid)) - - if state == 'present': - try: - changed, message = proxmox.create_disk(disk, vmid, vm, vm_config) - module.exit_json(changed=changed, vmid=vmid, msg=message) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to create/update disk %s in VM %s: %s' % (disk, vmid, str(e))) - - elif state == 'detached': - try: - if disk_bus == 'unused': - module.exit_json(changed=False, vmid=vmid, msg='Disk %s already detached in VM %s' % (disk, vmid)) - if disk not in vm_config: - module.exit_json(changed=False, vmid=vmid, msg="Disk %s not present in VM %s config" % (disk, vmid)) - proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=0) - module.exit_json(changed=True, vmid=vmid, msg="Disk %s detached from VM %s" % (disk, vmid)) - except Exception as e: - module.fail_json(msg="Failed to detach disk %s from VM %s with exception: %s" % (disk, vmid, str(e))) - - elif state == 'moved': - try: - changed, message = proxmox.move_disk(disk, vmid, vm, vm_config) - module.exit_json(changed=changed, vmid=vmid, msg=message) - except Exception as e: - module.fail_json(msg="Failed to move disk %s in VM %s with exception: %s" % (disk, vmid, str(e))) - - elif state == 'resized': - try: - changed, message = proxmox.resize_disk(disk, vmid, vm, vm_config) - module.exit_json(changed=changed, vmid=vmid, msg=message) - except Exception as e: - module.fail_json(msg="Failed to resize disk %s in VM %s with exception: %s" % (disk, vmid, str(e))) - - elif state == 'absent': - try: - if disk not in vm_config: - module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already absent in VM %s" % (disk, vmid)) - proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=1) - module.exit_json(changed=True, vmid=vmid, msg="Disk %s removed from VM %s" % (disk, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to remove disk %s from VM %s: %s' % (disk, vmid, str(e))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_domain_info.py b/plugins/modules/proxmox_domain_info.py deleted file mode 100644 index d9836da277..0000000000 --- a/plugins/modules/proxmox_domain_info.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright Tristan Le Guern (@tleguern) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: proxmox_domain_info -short_description: Retrieve information about one or more Proxmox VE domains -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE domains. -attributes: - action_group: - version_added: 9.0.0 -options: - domain: - description: - - Restrict results to a specific authentication realm. - aliases: ['realm', 'name'] - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes - - community.general.attributes.info_module -""" - - -EXAMPLES = r""" -- name: List existing domains - community.general.proxmox_domain_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_domains - -- name: Retrieve information about the pve domain - community.general.proxmox_domain_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - domain: pve - register: proxmox_domain_pve -""" - - -RETURN = r""" -proxmox_domains: - description: List of authentication domains. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the realm. - returned: on success - type: str - realm: - description: Realm name. - returned: on success - type: str - type: - description: Realm type. - returned: on success - type: str - digest: - description: Realm hash. - returned: on success, can be absent - type: str -""" - - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxDomainInfoAnsible(ProxmoxAnsible): - def get_domain(self, realm): - try: - domain = self.proxmox_api.access.domains.get(realm) - except Exception: - self.module.fail_json(msg="Domain '%s' does not exist" % realm) - domain['realm'] = realm - return domain - - def get_domains(self): - domains = self.proxmox_api.access.domains.get() - return domains - - -def proxmox_domain_info_argument_spec(): - return dict( - domain=dict(type='str', aliases=['realm', 'name']), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - domain_info_args = proxmox_domain_info_argument_spec() - module_args.update(domain_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxDomainInfoAnsible(module) - domain = module.params['domain'] - - if domain: - domains = [proxmox.get_domain(realm=domain)] - else: - domains = proxmox.get_domains() - result['proxmox_domains'] = domains - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_group_info.py b/plugins/modules/proxmox_group_info.py deleted file mode 100644 index f62d467af8..0000000000 --- a/plugins/modules/proxmox_group_info.py +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright Tristan Le Guern -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: proxmox_group_info -short_description: Retrieve information about one or more Proxmox VE groups -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE groups. -attributes: - action_group: - version_added: 9.0.0 -options: - group: - description: - - Restrict results to a specific group. - aliases: ['groupid', 'name'] - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes - - community.general.attributes.info_module -""" - - -EXAMPLES = r""" -- name: List existing groups - community.general.proxmox_group_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_groups - -- name: Retrieve information about the admin group - community.general.proxmox_group_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - group: admin - register: proxmox_group_admin -""" - - -RETURN = r""" -proxmox_groups: - description: List of groups. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the group. - returned: on success, can be absent - type: str - groupid: - description: Group name. - returned: on success - type: str - users: - description: List of users in the group. - returned: on success - type: list - elements: str -""" - - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxGroupInfoAnsible(ProxmoxAnsible): - def get_group(self, groupid): - try: - group = self.proxmox_api.access.groups.get(groupid) - except Exception: - self.module.fail_json(msg="Group '%s' does not exist" % groupid) - group['groupid'] = groupid - return ProxmoxGroup(group) - - def get_groups(self): - groups = self.proxmox_api.access.groups.get() - return [ProxmoxGroup(group) for group in groups] - - -class ProxmoxGroup: - def __init__(self, group): - self.group = dict() - # Data representation is not the same depending on API calls - for k, v in group.items(): - if k == 'users' and isinstance(v, str): - self.group['users'] = v.split(',') - elif k == 'members': - self.group['users'] = group['members'] - else: - self.group[k] = v - - -def proxmox_group_info_argument_spec(): - return dict( - group=dict(type='str', aliases=['groupid', 'name']), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - group_info_args = proxmox_group_info_argument_spec() - module_args.update(group_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxGroupInfoAnsible(module) - group = module.params['group'] - - if group: - groups = [proxmox.get_group(groupid=group)] - else: - groups = proxmox.get_groups() - result['proxmox_groups'] = [group.group for group in groups] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py deleted file mode 100644 index 8258d4dd55..0000000000 --- a/plugins/modules/proxmox_kvm.py +++ /dev/null @@ -1,1607 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016, Abdoul Bah (@helldorado) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r""" -module: proxmox_kvm -short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster -description: - - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster. - - Since community.general 4.0.0 on, there are no more default values. -author: "Abdoul Bah (@helldorado) " -attributes: - check_mode: - support: none - diff_mode: - support: none - action_group: - version_added: 9.0.0 -options: - archive: - description: - - Specify a path to an archive to restore (instead of creating or cloning a VM). - type: str - version_added: 6.5.0 - acpi: - description: - - Specify if ACPI should be enabled/disabled. - type: bool - agent: - description: - - Specify if the QEMU Guest Agent should be enabled/disabled. - - Since community.general 5.5.0, this can also be a string instead of a boolean. - This allows to specify values such as V(enabled=1,fstrim_cloned_disks=1). - type: str - args: - description: - - Pass arbitrary arguments to kvm. - - This option is for experts only! - type: str - autostart: - description: - - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API). - type: bool - balloon: - description: - - Specify the amount of RAM for the VM in MB. - - Using zero disables the balloon driver. - type: int - bios: - description: - - Specify the BIOS implementation. - type: str - choices: ['seabios', 'ovmf'] - boot: - description: - - Specify the boot order -> boot on floppy V(a), hard disk V(c), CD-ROM V(d), or network V(n). - - For newer versions of Proxmox VE, use a boot order like V(order=scsi0;net0;hostpci0). - - You can combine to set order. - type: str - bootdisk: - description: - - Enable booting from specified disk. Format V((ide|sata|scsi|virtio\)\\d+). - type: str - cicustom: - description: - - 'Cloud-init: Specify custom files to replace the automatically generated ones at start.' - type: str - version_added: 1.3.0 - cipassword: - description: - - 'Cloud-init: password of default user to create.' - type: str - version_added: 1.3.0 - citype: - description: - - 'Cloud-init: Specifies the cloud-init configuration format.' - - The default depends on the configured operating system type (V(ostype)). - - We use the V(nocloud) format for Linux, and V(configdrive2) for Windows. - type: str - choices: ['nocloud', 'configdrive2'] - version_added: 1.3.0 - ciupgrade: - description: - - 'Cloud-init: do an automatic package upgrade after the first boot.' - type: bool - version_added: 10.0.0 - ciuser: - description: - - 'Cloud-init: username of default user to create.' - type: str - version_added: 1.3.0 - clone: - description: - - Name of VM to be cloned. If O(vmid) is set, O(clone) can take an arbitrary value but is required for initiating the clone. - type: str - cores: - description: - - Specify number of cores per socket. - type: int - cpu: - description: - - Specify emulated CPU type. - type: str - cpulimit: - description: - - Specify if CPU usage will be limited. Value V(0) indicates no CPU limit. - - If the computer has 2 CPUs, it has total of '2' CPU time. - type: int - cpuunits: - description: - - Specify CPU weight for a VM. - - You can disable fair-scheduler configuration by setting this to V(0). - type: int - delete: - description: - - Specify a list of settings you want to delete. - type: str - description: - description: - - Specify the description for the VM. Only used on the configuration web interface. - - This is saved as comment inside the configuration file. - type: str - digest: - description: - - Specify if to prevent changes if current configuration file has different SHA1 digest. - - This can be used to prevent concurrent modifications. - type: str - efidisk0: - description: - - Specify a hash/dictionary of EFI disk options. - - Requires O(bios=ovmf) to be set to be able to use it. - type: dict - suboptions: - storage: - description: - - V(storage) is the storage identifier where to create the disk. - type: str - format: - description: - - V(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage - (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format supported by - the provided storage backend. - type: str - efitype: - description: - - V(efitype) indicates the size of the EFI disk. - - V(2m) will allow for a 2MB EFI disk, which will be enough to persist boot order and new boot entries. - - V(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable Secure Boot. - type: str - choices: - - 2m - - 4m - pre_enrolled_keys: - description: - - V(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled V(1) in the VM firmware upon creation or not (0). - - If set to V(1), Secure Boot will also be enabled by default when the VM is created. - type: bool - version_added: 4.5.0 - force: - description: - - Allow to force stop VM. - - Can be used with states V(stopped), V(restarted), and V(absent). - - Requires parameter O(archive). - type: bool - format: - description: - - Target drive's backing file's data format. - - Used only with clone. - - Use O(format=unspecified) and O(full=false) for a linked clone. - - Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) - for the latest version, tables 3 to 14) to find out format supported by the provided storage backend. - - Not specifying this option is equivalent to setting it to V(unspecified). - type: str - choices: ["cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified"] - freeze: - description: - - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution). - type: bool - full: - description: - - Create a full copy of all disk. This is always done when you clone a normal VM. - - For VM templates, we try to create a linked clone by default. - - Used only with clone. - type: bool - default: true - hookscript: - description: - - Script that will be executed during various steps in the containers lifetime. - type: str - version_added: 8.1.0 - hostpci: - description: - - Specify a hash/dictionary of map host pci devices into guest. O(hostpci='{"key":"value", "key":"value"}'). - - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N. - - Values allowed are - V("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0""). - - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is V(bus:dev.func) (hexadecimal numbers). - - V(pcie=boolean) V(default=0) Choose the PCI-express bus (needs the q35 machine model). - - V(rombar=boolean) V(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map. - - V(x-vga=boolean) V(default=0) Enable vfio-vga device support. - - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care. - type: dict - hotplug: - description: - - Selectively enable hotplug features. - - This is a comma separated list of hotplug features V(network), V(disk), V(cpu), V(memory), and V(usb). - - Value 0 disables hotplug completely and value 1 is an alias for the default V(network,disk,usb). - type: str - hugepages: - description: - - Enable/disable hugepages memory. - type: str - choices: ['any', '2', '1024'] - ide: - description: - - A hash/dictionary of volume used as IDE hard disk or CD-ROM. O(ide='{"key":"value", "key":"value"}'). - - Keys allowed are - V(ide[n]) where 0 ≤ n ≤ 3. - - Values allowed are - V("storage:size,format=value"). - - V(storage) is the storage identifier where to create the disk. - - V(size) is the size of the disk in GB. - - V(format) is the drive's backing file's data format. V(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator Guide, section - Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format - supported by the provided storage backend. - type: dict - ipconfig: - description: - - 'Cloud-init: Set the IP configuration.' - - A hash/dictionary of network IP configurations. O(ipconfig='{"key":"value", "key":"value"}'). - - Keys allowed are - V(ipconfig[n]) where 0 ≤ n ≤ network interfaces. - - Values allowed are - V("[gw=] [,gw6=] [,ip=] [,ip6=]"). - - 'Cloud-init: Specify IP addresses and gateways for the corresponding interface.' - - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address. - - The special string V(dhcp) can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided. - - For IPv6 the special string V(auto) can be used to use stateless autoconfiguration. - - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4. - type: dict - version_added: 1.3.0 - keyboard: - description: - - Sets the keyboard layout for VNC server. - type: str - kvm: - description: - - Enable/disable KVM hardware virtualization. - type: bool - localtime: - description: - - Sets the real time clock to local time. - - This is enabled by default if ostype indicates a Microsoft OS. - type: bool - lock: - description: - - Lock/unlock the VM. - type: str - choices: ['migrate', 'backup', 'snapshot', 'rollback'] - machine: - description: - - Specifies the Qemu machine type. - - Type => V((pc|pc(-i440fx\)?-\\d+\\.\\d+(\\.pxe\)?|q35|pc-q35-\\d+\\.\\d+(\\.pxe\)?\)). - type: str - memory: - description: - - Memory size in MB for instance. - type: int - migrate: - description: - - Migrate the VM to O(node) if it is on another node. - type: bool - default: false - version_added: 7.0.0 - migrate_downtime: - description: - - Sets maximum tolerated downtime (in seconds) for migrations. - type: int - migrate_speed: - description: - - Sets maximum speed (in MB/s) for migrations. - - A value of 0 is no limit. - type: int - name: - description: - - Specifies the VM name. Name could be non-unique across the cluster. - - Required only for O(state=present). - - With O(state=present) if O(vmid) not provided and VM with name exists in the cluster then no changes will be made. - type: str - nameservers: - description: - - 'Cloud-init: DNS server IP address(es).' - - If unset, PVE host settings are used. - type: list - elements: str - version_added: 1.3.0 - net: - description: - - A hash/dictionary of network interfaces for the VM. O(net='{"key":"value", "key":"value"}'). - - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N. - - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid""). - - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3). - - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified. - - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'. - - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes - per second'. - - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services. - type: dict - newid: - description: - - VMID for the clone. Used only with clone. - - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI. - type: int - numa: - description: - - A hash/dictionaries of NUMA topology. O(numa='{"key":"value", "key":"value"}'). - - Keys allowed are - V(numa[n]) where 0 ≤ n ≤ N. - - Values allowed are - V("cpu="",hostnodes="",memory="number",policy="(bind|interleave|preferred)""). - - V(cpus) CPUs accessing this NUMA node. - - V(hostnodes) Host NUMA nodes to use. - - V(memory) Amount of memory this NUMA node provides. - - V(policy) NUMA allocation policy. - type: dict - numa_enabled: - description: - - Enables NUMA. - type: bool - onboot: - description: - - Specifies whether a VM will be started during system bootup. - type: bool - ostype: - description: - - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems. - - The l26 is Linux 2.6/3.X Kernel. - type: str - choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris'] - parallel: - description: - - A hash/dictionary of map host parallel devices. O(parallel='{"key":"value", "key":"value"}'). - - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2. - - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+"). - type: dict - protection: - description: - - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations. - type: bool - reboot: - description: - - Allow reboot. If set to V(true), the VM exit on reboot. - type: bool - revert: - description: - - Revert a pending change. - type: str - sata: - description: - - A hash/dictionary of volume used as sata hard disk or CD-ROM. O(sata='{"key":"value", "key":"value"}'). - - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator Guide, section - Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format - supported by the provided storage backend. - type: dict - scsi: - description: - - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. O(scsi='{"key":"value", "key":"value"}'). - - Keys allowed are - C(scsi[n]) where 0 ≤ n ≤ 13. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator Guide, section - Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format - supported by the provided storage backend. - type: dict - scsihw: - description: - - Specifies the SCSI controller model. - type: str - choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi'] - searchdomains: - description: - - 'Cloud-init: Sets DNS search domain(s).' - - If unset, PVE host settings are used. - type: list - elements: str - version_added: 1.3.0 - serial: - description: - - A hash/dictionary of serial device to create inside the VM. V('{"key":"value", "key":"value"}'). - - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3. - - Values allowed are - V((/dev/.+|socket\)). - - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care. - type: dict - shares: - description: - - Rets amount of memory shares for auto-ballooning. (0 - 50000). - - The larger the number is, the more memory this VM gets. - - The number is relative to weights of all other running VMs. - - Using 0 disables auto-ballooning, this means no limit. - type: int - skiplock: - description: - - Ignore locks. - - Only root is allowed to use this option. - type: bool - smbios: - description: - - Specifies SMBIOS type 1 fields. - - Comma separated, Base64 encoded (optional) SMBIOS properties:. - - V([base64=<1|0>] [,family=]). - - V([,manufacturer=]). - - V([,product=]). - - V([,serial=]). - - V([,sku=]). - - V([,uuid=]). - - V([,version=]). - type: str - snapname: - description: - - The name of the snapshot. Used only with clone. - type: str - sockets: - description: - - Sets the number of CPU sockets. (1 - N). - type: int - sshkeys: - description: - - 'Cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.' - type: str - version_added: 1.3.0 - startdate: - description: - - Sets the initial date of the real time clock. - - Valid format for date are V('now') or V('2016-09-25T16:01:21') or V('2016-09-25'). - type: str - startup: - description: - - Startup and shutdown behavior. V([[order=]\\d+] [,up=\\d+] [,down=\\d+]). - - Order is a non-negative number defining the general startup order. - - Shutdown in done with reverse ordering. - type: str - state: - description: - - Indicates desired state of the instance. - - If V(current), the current state of the VM will be fetched. You can access it with C(results.status). - - V(template) was added in community.general 8.1.0. - type: str - choices: ['present', 'started', 'absent', 'stopped', 'restarted', 'current', 'template'] - default: present - storage: - description: - - Target storage for full clone. - type: str - tablet: - description: - - Enables/disables the USB tablet device. - type: bool - tags: - description: - - List of tags to apply to the VM instance. - - Tags must start with V([a-z0-9_]) followed by zero or more of the following characters V([a-z0-9_-+.]). - - Tags are only available in Proxmox 6+. - type: list - elements: str - version_added: 2.3.0 - target: - description: - - Target node. Only allowed if the original VM is on shared storage. - - Used only with clone. - type: str - tdf: - description: - - Enables/disables time drift fix. - type: bool - template: - description: - - Enables/disables the template. - type: bool - timeout: - description: - - Timeout for operations. - - When used with O(state=stopped) the option sets a graceful timeout for VM stop after which a VM will be forcefully stopped. - type: int - default: 30 - tpmstate0: - description: - - A hash/dictionary of options for the Trusted Platform Module disk. - - A TPM state disk is required for Windows 11 installations. - suboptions: - storage: - description: - - O(tpmstate0.storage) is the storage identifier where to create the disk. - type: str - required: true - version: - description: - - The TPM version to use. - type: str - choices: ['1.2', '2.0'] - default: '2.0' - type: dict - version_added: 7.1.0 - usb: - description: - - A hash/dictionary of USB devices for the VM. O(usb='{"key":"value", "key":"value"}'). - - Keys allowed are - C(usb[n]) where 0 ≤ n ≤ N. - - Values allowed are - C(host="value|spice",mapping="value",usb3="1|0"). - - Host is either C(spice) or the USB id/port. - - Option C(mapping) is the mapped USB device name. - - Option C(usb3) enables USB 3 support. - type: dict - version_added: 9.0.0 - update: - description: - - If V(true), the VM will be updated with new value. - - Because of the operations of the API and security reasons, I have disabled the update of the following parameters O(net), O(virtio), O(ide), - O(sata), O(scsi). Per example updating O(net) update the MAC address and O(virtio) create always new disk... This security feature can - be disabled by setting the O(update_unsafe) to V(true). - - Update of O(pool) is disabled. It needs an additional API endpoint not covered by this module. - type: bool - default: false - update_unsafe: - description: - - If V(true), do not enforce limitations on parameters O(net), O(virtio), O(ide), O(sata), O(scsi), O(efidisk0), and O(tpmstate0). Use this - option with caution because an improper configuration might result in a permanent loss of data (for example disk recreated). - type: bool - default: false - version_added: 8.4.0 - vcpus: - description: - - Sets number of hotplugged vcpus. - type: int - vga: - description: - - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option V(std) or V(vmware). - type: str - choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4'] - virtio: - description: - - A hash/dictionary of volume used as VIRTIO hard disk. O(virtio='{"key":"value", "key":"value"}'). - - Keys allowed are - V(virtio[n]) where 0 ≤ n ≤ 15. - - Values allowed are - V(storage:size,format=value). - - V(storage) is the storage identifier where to create the disk. - - V(size) is the size of the disk in GB. - - V(format) is the drive's backing file's data format. V(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator Guide, section - Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format - supported by the provided storage backend. - type: dict - watchdog: - description: - - Creates a virtual hardware watchdog device. - type: str -seealso: - - module: community.general.proxmox_vm_info -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.proxmox.selection - - community.general.attributes -""" - -EXAMPLES = r""" -- name: Create new VM with minimal options - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - -- name: Create a VM from archive (backup) - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - archive: backup-storage:backup/vm/140/2023-03-08T06:41:23Z - name: spynal - -- name: Create new VM with minimal options and given vmid - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - vmid: 100 - -- name: Create new VM with two network interface options - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - net: - net0: 'virtio,bridge=vmbr1,rate=200' - net1: 'e1000,bridge=vmbr2' - -- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - net: - net0: 'virtio,bridge=vmbr1,rate=200' - virtio: - virtio0: 'VMs_LVM:10' - virtio1: 'VMs:2,format=qcow2' - virtio2: 'VMs:5,format=raw' - cores: 4 - vcpus: 2 - -- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot disabled by default - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - sata: - sata0: 'VMs_LVM:10,format=raw' - bios: ovmf - efidisk0: - storage: VMs_LVM_thin - format: raw - efitype: 4m - pre_enrolled_keys: false - -- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot enabled by default - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - sata: - sata0: 'VMs_LVM:10,format=raw' - bios: ovmf - efidisk0: - storage: VMs_LVM - format: raw - efitype: 4m - pre_enrolled_keys: 1 - -- name: > - Clone VM with only source VM name. - The VM source is spynal. - The target VM name is zavala - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: spynal - name: zavala - node: sabrewulf - storage: VMs - format: qcow2 - timeout: 500 - -- name: > - Create linked clone VM with only source VM name. - The VM source is spynal. - The target VM name is zavala - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: spynal - name: zavala - node: sabrewulf - storage: VMs - full: false - format: unspecified - timeout: 500 - -- name: Clone VM with source vmid and target newid and raw format - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: arbitrary_name - vmid: 108 - newid: 152 - name: zavala - node: sabrewulf - storage: LVM_STO - format: raw - timeout: 300 - -- name: Create new VM and lock it for snapshot - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - lock: snapshot - -- name: Create new VM and set protection to disable the remove VM and remove disk operations - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - protection: true - -- name: Create new VM using cloud-init with a username and password - community.general.proxmox_kvm: - node: sabrewulf - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - ide: - ide2: 'local:cloudinit,format=qcow2' - ciuser: mylinuxuser - cipassword: supersecret - searchdomains: 'mydomain.internal' - nameservers: 1.1.1.1 - net: - net0: 'virtio,bridge=vmbr1,tag=77' - ipconfig: - ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1' - -- name: Create new VM using Cloud-Init with an ssh key - community.general.proxmox_kvm: - node: sabrewulf - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - ide: - ide2: 'local:cloudinit,format=qcow2' - sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+' - searchdomains: 'mydomain.internal' - nameservers: - - '1.1.1.1' - - '8.8.8.8' - net: - net0: 'virtio,bridge=vmbr1,tag=77' - ipconfig: - ipconfig0: 'ip=192.168.1.1/24' - -- name: Start VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: started - -- name: Stop VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: stopped - -- name: Stop VM with force - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: stopped - force: true - -- name: Restart VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: restarted - -- name: Convert VM to template - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: template - -- name: Convert VM to template (stop VM if running) - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: template - force: true - -- name: Remove VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: absent - -- name: Get VM current state - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: current - -- name: Update VM configuration - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - cores: 8 - memory: 16384 - update: true - -- name: Update VM configuration (incl. unsafe options) - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - cores: 8 - memory: 16384 - net: - net0: virtio,bridge=vmbr1 - update: true - update_unsafe: true - -- name: Delete QEMU parameters - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - delete: 'args,template,cpulimit' - -- name: Revert a pending change - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - revert: 'template,cpulimit' - -- name: Migrate VM on second node - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf-2 - migrate: true - -- name: Add hookscript to existing VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - vmid: 999 - node: sabrewulf - hookscript: local:snippets/hookscript.pl - update: true -""" - -RETURN = r""" -vmid: - description: The VM vmid. - returned: success - type: int - sample: 115 -status: - description: The current virtual machine status. - returned: success, not clone, not absent, not update - type: str - sample: running -msg: - description: A short message. - returned: always - type: str - sample: "VM kropta with vmid = 110 is running" -""" - -import re -import time -from ansible.module_utils.six.moves.urllib.parse import quote - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.parsing.convert_bool import boolean - - -def parse_mac(netstr): - return re.search('=(.*?),', netstr).group(1) - - -def parse_dev(devstr): - return re.search('(.*?)(,|$)', devstr).group(1) - - -class ProxmoxKvmAnsible(ProxmoxAnsible): - def get_vminfo(self, node, vmid, **kwargs): - global results - results = {} - mac = {} - devices = {} - try: - vm = self.proxmox_api.nodes(node).qemu(vmid).config.get() - except Exception as e: - self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = {k: v for k, v in kwargs.items() if v is not None} - - # Convert all dict in kwargs to elements. - # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] - for k in list(kwargs.keys()): - if isinstance(kwargs[k], dict): - kwargs.update(kwargs[k]) - del kwargs[k] - - # Split information by type - re_net = re.compile(r'net[0-9]') - re_dev = re.compile(r'(virtio|ide|scsi|sata|efidisk)[0-9]') - for k in kwargs.keys(): - if re_net.match(k): - mac[k] = parse_mac(vm[k]) - elif re_dev.match(k): - devices[k] = parse_dev(vm[k]) - - results['mac'] = mac - results['devices'] = devices - results['vmid'] = int(vmid) - - def settings(self, vmid, node, **kwargs): - proxmox_node = self.proxmox_api.nodes(node) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = {k: v for k, v in kwargs.items() if v is not None} - - return proxmox_node.qemu(vmid).config.set(**kwargs) is None - - def wait_for_task(self, node, taskid): - timeout = self.module.params['timeout'] - if self.module.params['state'] == 'stopped': - # Increase task timeout in case of stopped state to be sure it waits longer than VM stop operation itself - timeout += 10 - - while timeout: - if self.api_task_ok(node, taskid): - # Wait an extra second as the API can be a ahead of the hypervisor - time.sleep(1) - return True - timeout = timeout - 1 - if timeout == 0: - break - time.sleep(1) - return False - - def create_vm(self, vmid, newid, node, name, memory, cpu, cores, sockets, update, update_unsafe, **kwargs): - # Available only in PVE 4 - only_v4 = ['force', 'protection', 'skiplock'] - only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags'] - only_v8 = ['ciupgrade'] - - # valid clone parameters - valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target'] - clone_params = {} - # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm. - vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid) - - proxmox_node = self.proxmox_api.nodes(node) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = {k: v for k, v in kwargs.items() if v is not None} - kwargs.update({k: int(v) for k, v in kwargs.items() if isinstance(v, bool)}) - - version = self.version() - pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0] - - # The features work only on PVE 4+ - if pve_major_version < 4: - for p in only_v4: - if p in kwargs: - del kwargs[p] - - # The features work only on PVE 6 - if pve_major_version < 6: - for p in only_v6: - if p in kwargs: - del kwargs[p] - - # The features work only on PVE 8 - if pve_major_version < 8: - for p in only_v8: - if p in kwargs: - del kwargs[p] - - # 'sshkeys' param expects an urlencoded string - if 'sshkeys' in kwargs: - urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='') - kwargs['sshkeys'] = str(urlencoded_ssh_keys) - - # If update, don't update disk (virtio, efidisk0, tpmstate0, ide, sata, scsi) and network interface, unless update_unsafe=True - # pool parameter not supported by qemu//config endpoint on "update" (PVE 6.2) - only with "create" - if update: - if update_unsafe is False: - if 'virtio' in kwargs: - del kwargs['virtio'] - if 'sata' in kwargs: - del kwargs['sata'] - if 'scsi' in kwargs: - del kwargs['scsi'] - if 'ide' in kwargs: - del kwargs['ide'] - if 'efidisk0' in kwargs: - del kwargs['efidisk0'] - if 'tpmstate0' in kwargs: - del kwargs['tpmstate0'] - if 'net' in kwargs: - del kwargs['net'] - if 'force' in kwargs: - del kwargs['force'] - if 'pool' in kwargs: - del kwargs['pool'] - - # Check that the bios option is set to ovmf if the efidisk0 option is present - if 'efidisk0' in kwargs: - if ('bios' not in kwargs) or ('ovmf' != kwargs['bios']): - self.module.fail_json(msg='efidisk0 cannot be used if bios is not set to ovmf. ') - - # Flatten efidisk0 option to a string so that it is a string which is what Proxmoxer and the API expect - if 'efidisk0' in kwargs: - efidisk0_str = '' - # Regexp to catch underscores in keys name, to replace them after by hyphens - hyphen_re = re.compile(r'_') - # If present, the storage definition should be the first argument - if 'storage' in kwargs['efidisk0']: - efidisk0_str += kwargs['efidisk0'].get('storage') + ':1,' - kwargs['efidisk0'].pop('storage') - # Join other elements from the dict as key=value using commas as separator, replacing any underscore in key - # by hyphens (needed for pre_enrolled_keys to pre-enrolled-keys) - efidisk0_str += ','.join([hyphen_re.sub('-', k) + "=" + str(v) for k, v in kwargs['efidisk0'].items() - if 'storage' != k]) - kwargs['efidisk0'] = efidisk0_str - - # Flatten tpmstate0 option to a string so that it is a string which is what Proxmoxer and the API expect - if 'tpmstate0' in kwargs: - kwargs['tpmstate0'] = '{storage}:1,version=v{version}'.format( - storage=kwargs['tpmstate0'].get('storage'), - version=kwargs['tpmstate0'].get('version') - ) - - # Convert all dict in kwargs to elements. - # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n], usb[n] - for k in list(kwargs.keys()): - if isinstance(kwargs[k], dict): - kwargs.update(kwargs[k]) - del kwargs[k] - - if 'agent' in kwargs: - try: - # The API also allows booleans instead of e.g. `enabled=1` for backward-compatibility. - kwargs['agent'] = int(boolean(kwargs['agent'], strict=True)) - except TypeError: - # Not something that Ansible would parse as a boolean. - pass - - # Rename numa_enabled to numa, according the API documentation - if 'numa_enabled' in kwargs: - kwargs['numa'] = kwargs['numa_enabled'] - del kwargs['numa_enabled'] - - # PVE api expects strings for the following params - if 'nameservers' in self.module.params: - nameservers = self.module.params.pop('nameservers') - if nameservers: - kwargs['nameserver'] = ' '.join(nameservers) - if 'searchdomains' in self.module.params: - searchdomains = self.module.params.pop('searchdomains') - if searchdomains: - kwargs['searchdomain'] = ' '.join(searchdomains) - - # VM tags are expected to be valid and presented as a comma/semi-colon delimited string - if 'tags' in kwargs: - re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$') - for tag in kwargs['tags']: - if not re_tag.match(tag): - self.module.fail_json(msg='%s is not a valid tag' % tag) - kwargs['tags'] = ",".join(kwargs['tags']) - - # -args and skiplock require root@pam user - but can not use api tokens - if self.module.params['api_user'] == "root@pam" and self.module.params['args'] is not None: - kwargs['args'] = self.module.params['args'] - elif self.module.params['api_user'] != "root@pam" and self.module.params['args'] is not None: - self.module.fail_json(msg='args parameter require root@pam user. ') - - if self.module.params['api_user'] != "root@pam" and self.module.params['skiplock'] is not None: - self.module.fail_json(msg='skiplock parameter require root@pam user. ') - - if update: - if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None: - return True - else: - return False - elif self.module.params['clone'] is not None: - for param in valid_clone_params: - if self.module.params[param] is not None: - clone_params[param] = self.module.params[param] - clone_params.update({k: int(v) for k, v in clone_params.items() if isinstance(v, bool)}) - taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params) - else: - taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) - - if not self.wait_for_task(node, taskid): - self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - def start_vm(self, vm): - vmid = vm['vmid'] - proxmox_node = self.proxmox_api.nodes(vm['node']) - taskid = proxmox_node.qemu(vmid).status.start.post() - if not self.wait_for_task(vm['node'], taskid): - self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - def stop_vm(self, vm, force, timeout): - vmid = vm['vmid'] - proxmox_node = self.proxmox_api.nodes(vm['node']) - taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0), timeout=timeout) - if not self.wait_for_task(vm['node'], taskid): - self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - def restart_vm(self, vm, force, **status): - vmid = vm['vmid'] - try: - proxmox_node = self.proxmox_api.nodes(vm['node']) - taskid = proxmox_node.qemu(vmid).status.reset.post() if force else proxmox_node.qemu(vmid).status.reboot.post() - if not self.wait_for_task(vm['node'], taskid): - self.module.fail_json(msg='Reached timeout while waiting for rebooting VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - except Exception as e: - self.module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e)) - return False - - def convert_to_template(self, vm, timeout, force): - vmid = vm['vmid'] - try: - proxmox_node = self.proxmox_api.nodes(vm['node']) - if proxmox_node.qemu(vmid).status.current.get()['status'] == 'running' and force: - self.stop_instance(vm, vmid, timeout, force) - # not sure why, but templating a container doesn't return a taskid - proxmox_node.qemu(vmid).template.post() - return True - except Exception as e: - self.module.fail_json(vmid=vmid, msg="conversion of VM %s to template failed with exception: %s" % (vmid, e)) - return False - - def migrate_vm(self, vm, target_node): - vmid = vm['vmid'] - proxmox_node = self.proxmox_api.nodes(vm['node']) - taskid = proxmox_node.qemu(vmid).migrate.post(vmid=vmid, node=vm['node'], target=target_node, online=1) - if not self.wait_for_task(vm['node'], taskid): - self.module.fail_json(msg='Reached timeout while waiting for migrating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - -def main(): - module_args = proxmox_auth_argument_spec() - kvm_args = dict( - archive=dict(type='str'), - acpi=dict(type='bool'), - agent=dict(type='str'), - args=dict(type='str'), - autostart=dict(type='bool'), - balloon=dict(type='int'), - bios=dict(choices=['seabios', 'ovmf']), - boot=dict(type='str'), - bootdisk=dict(type='str'), - cicustom=dict(type='str'), - cipassword=dict(type='str', no_log=True), - citype=dict(type='str', choices=['nocloud', 'configdrive2']), - ciupgrade=dict(type='bool'), - ciuser=dict(type='str'), - clone=dict(type='str'), - cores=dict(type='int'), - cpu=dict(type='str'), - cpulimit=dict(type='int'), - cpuunits=dict(type='int'), - delete=dict(type='str'), - description=dict(type='str'), - digest=dict(type='str'), - efidisk0=dict(type='dict', - options=dict( - storage=dict(type='str'), - format=dict(type='str'), - efitype=dict(type='str', choices=['2m', '4m']), - pre_enrolled_keys=dict(type='bool'), - )), - force=dict(type='bool'), - format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']), - freeze=dict(type='bool'), - full=dict(type='bool', default=True), - hookscript=dict(type='str'), - hostpci=dict(type='dict'), - hotplug=dict(type='str'), - hugepages=dict(choices=['any', '2', '1024']), - ide=dict(type='dict'), - ipconfig=dict(type='dict'), - keyboard=dict(type='str'), - kvm=dict(type='bool'), - localtime=dict(type='bool'), - lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']), - machine=dict(type='str'), - memory=dict(type='int'), - migrate=dict(type='bool', default=False), - migrate_downtime=dict(type='int'), - migrate_speed=dict(type='int'), - name=dict(type='str'), - nameservers=dict(type='list', elements='str'), - net=dict(type='dict'), - newid=dict(type='int'), - node=dict(), - numa=dict(type='dict'), - numa_enabled=dict(type='bool'), - onboot=dict(type='bool'), - ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris']), - parallel=dict(type='dict'), - pool=dict(type='str'), - protection=dict(type='bool'), - reboot=dict(type='bool'), - revert=dict(type='str'), - sata=dict(type='dict'), - scsi=dict(type='dict'), - scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']), - serial=dict(type='dict'), - searchdomains=dict(type='list', elements='str'), - shares=dict(type='int'), - skiplock=dict(type='bool'), - smbios=dict(type='str'), - snapname=dict(type='str'), - sockets=dict(type='int'), - sshkeys=dict(type='str', no_log=False), - startdate=dict(type='str'), - startup=dict(), - state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current', 'template']), - storage=dict(type='str'), - tablet=dict(type='bool'), - tags=dict(type='list', elements='str'), - target=dict(type='str'), - tdf=dict(type='bool'), - template=dict(type='bool'), - timeout=dict(type='int', default=30), - tpmstate0=dict(type='dict', - options=dict( - storage=dict(type='str', required=True), - version=dict(type='str', choices=['2.0', '1.2'], default='2.0') - )), - usb=dict(type='dict'), - update=dict(type='bool', default=False), - update_unsafe=dict(type='bool', default=False), - vcpus=dict(type='int'), - vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']), - virtio=dict(type='dict'), - vmid=dict(type='int'), - watchdog=dict(), - ) - module_args.update(kvm_args) - - module = AnsibleModule( - argument_spec=module_args, - mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')], - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], - required_if=[('state', 'present', ['node'])], - ) - - clone = module.params['clone'] - cpu = module.params['cpu'] - cores = module.params['cores'] - delete = module.params['delete'] - migrate = module.params['migrate'] - memory = module.params['memory'] - name = module.params['name'] - newid = module.params['newid'] - node = module.params['node'] - revert = module.params['revert'] - sockets = module.params['sockets'] - state = module.params['state'] - update = bool(module.params['update']) - update_unsafe = bool(module.params['update_unsafe']) - vmid = module.params['vmid'] - validate_certs = module.params['validate_certs'] - - if module.params['format'] == 'unspecified': - module.params['format'] = None - - proxmox = ProxmoxKvmAnsible(module) - - # If vmid is not defined then retrieve its value from the vm name, - # the cloned vm name or retrieve the next free VM id from ProxmoxAPI. - if not vmid: - if state == 'present' and not update and not clone and not delete and not revert and not migrate: - existing_vmid = proxmox.get_vmid(name, ignore_missing=True) - if existing_vmid: - vmid = existing_vmid - else: - try: - vmid = proxmox.get_nextvmid() - except Exception: - module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) - else: - clone_target = clone or name - vmid = proxmox.get_vmid(clone_target, ignore_missing=True) - - if clone is not None: - # If newid is not defined then retrieve the next free id from ProxmoxAPI - if not newid: - try: - newid = proxmox.get_nextvmid() - except Exception: - module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) - - # Ensure source VM name exists when cloning - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % clone) - - # Ensure source VM id exists when cloning - proxmox.get_vm(vmid) - - # Ensure the chosen VM name doesn't already exist when cloning - existing_vmid = proxmox.get_vmid(name, ignore_missing=True) - if existing_vmid: - module.exit_json(changed=False, vmid=existing_vmid, msg="VM with name <%s> already exists" % name) - - # Ensure the chosen VM id doesn't already exist when cloning - if proxmox.get_vm(newid, ignore_missing=True): - module.exit_json(changed=False, vmid=vmid, msg="vmid %s with VM name %s already exists" % (newid, name)) - - if delete is not None: - try: - proxmox.settings(vmid, node, delete=delete) - module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e)) - - if revert is not None: - try: - proxmox.settings(vmid, node, revert=revert) - module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e)) - - if migrate: - try: - vm = proxmox.get_vm(vmid) - vm_node = vm['node'] - if node != vm_node: - proxmox.migrate_vm(vm, node) - module.exit_json(changed=True, vmid=vmid, msg="VM {0} has been migrated from {1} to {2}".format(vmid, vm_node, node)) - else: - module.exit_json(changed=False, vmid=vmid, msg="VM {0} is already on {1}".format(vmid, node)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to migrate VM {0} from {1} to {2}: {3}'.format(vmid, vm_node, node, e)) - - if state == 'present': - if not (update or clone) and proxmox.get_vm(vmid, ignore_missing=True): - module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid) - elif not (update or clone or vmid) and proxmox.get_vmid(name, ignore_missing=True): - module.exit_json(changed=False, vmid=proxmox.get_vmid(name), msg="VM with name <%s> already exists" % name) - elif not node: - module.fail_json(msg='node is mandatory for creating/updating VM') - elif update and not any([vmid, name]): - module.fail_json(msg='vmid or name is mandatory for updating VM') - elif not proxmox.get_node(node): - module.fail_json(msg="node '%s' does not exist in cluster" % node) - - try: - proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update, update_unsafe, - archive=module.params['archive'], - acpi=module.params['acpi'], - agent=module.params['agent'], - autostart=module.params['autostart'], - balloon=module.params['balloon'], - bios=module.params['bios'], - boot=module.params['boot'], - bootdisk=module.params['bootdisk'], - cicustom=module.params['cicustom'], - cipassword=module.params['cipassword'], - citype=module.params['citype'], - ciupgrade=module.params['ciupgrade'], - ciuser=module.params['ciuser'], - cpulimit=module.params['cpulimit'], - cpuunits=module.params['cpuunits'], - description=module.params['description'], - digest=module.params['digest'], - efidisk0=module.params['efidisk0'], - force=module.params['force'], - freeze=module.params['freeze'], - hookscript=module.params['hookscript'], - hostpci=module.params['hostpci'], - hotplug=module.params['hotplug'], - hugepages=module.params['hugepages'], - ide=module.params['ide'], - ipconfig=module.params['ipconfig'], - keyboard=module.params['keyboard'], - kvm=module.params['kvm'], - localtime=module.params['localtime'], - lock=module.params['lock'], - machine=module.params['machine'], - migrate_downtime=module.params['migrate_downtime'], - migrate_speed=module.params['migrate_speed'], - net=module.params['net'], - numa=module.params['numa'], - numa_enabled=module.params['numa_enabled'], - onboot=module.params['onboot'], - ostype=module.params['ostype'], - parallel=module.params['parallel'], - pool=module.params['pool'], - protection=module.params['protection'], - reboot=module.params['reboot'], - sata=module.params['sata'], - scsi=module.params['scsi'], - scsihw=module.params['scsihw'], - serial=module.params['serial'], - shares=module.params['shares'], - skiplock=module.params['skiplock'], - smbios1=module.params['smbios'], - snapname=module.params['snapname'], - sshkeys=module.params['sshkeys'], - startdate=module.params['startdate'], - startup=module.params['startup'], - tablet=module.params['tablet'], - tags=module.params['tags'], - target=module.params['target'], - tdf=module.params['tdf'], - template=module.params['template'], - tpmstate0=module.params['tpmstate0'], - usb=module.params['usb'], - vcpus=module.params['vcpus'], - vga=module.params['vga'], - virtio=module.params['virtio'], - watchdog=module.params['watchdog']) - - if not clone: - proxmox.get_vminfo(node, vmid, - ide=module.params['ide'], - net=module.params['net'], - sata=module.params['sata'], - scsi=module.params['scsi'], - virtio=module.params['virtio']) - except Exception as e: - if update: - module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e)) - elif clone is not None: - module.fail_json(vmid=vmid, msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e)) - else: - module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e)) - - if update: - module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid)) - elif clone is not None: - module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) - else: - module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) - - elif state == 'started': - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - - status = {} - try: - vm = proxmox.get_vm(vmid) - current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status'] - status['status'] = current - if current == 'running': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status) - - if proxmox.start_vm(vm): - module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status) - - elif state == 'stopped': - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - - status = {} - try: - vm = proxmox.get_vm(vmid) - current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status'] - status['status'] = current - if current == 'stopped': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status) - - proxmox.stop_vm(vm, force=module.params['force'], timeout=module.params['timeout']) - module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status) - - elif state == 'template': - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - - status = {} - try: - vm = proxmox.get_vm(vmid) - - if vm['template'] == 1: - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already a template" % vmid, **status) - - if proxmox.convert_to_template(vm, force=module.params['force'], timeout=module.params['timeout']): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is converting to template" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="conversion of VM %s to template failed with exception: %s" % (vmid, e), **status) - - elif state == 'restarted': - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - - status = {} - vm = proxmox.get_vm(vmid) - current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status'] - status['status'] = current - if current == 'stopped': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status) - - if proxmox.restart_vm(vm, force=module.params['force']): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status) - - elif state == 'absent': - status = {} - if not vmid: - module.exit_json(changed=False, msg='VM with name = %s is already absent' % name) - - try: - vm = proxmox.get_vm(vmid, ignore_missing=True) - if not vm: - module.exit_json(changed=False, vmid=vmid) - - proxmox_node = proxmox.proxmox_api.nodes(vm['node']) - current = proxmox_node.qemu(vmid).status.current.get()['status'] - status['status'] = current - if current == 'running': - if module.params['force']: - proxmox.stop_vm(vm, True, timeout=module.params['timeout']) - else: - module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion or use force=true." % vmid) - taskid = proxmox_node.qemu.delete(vmid) - if not proxmox.wait_for_task(vm['node'], taskid): - module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - else: - module.exit_json(changed=True, vmid=vmid, msg="VM %s removed" % vmid) - except Exception as e: - module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'current': - status = {} - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - vm = proxmox.get_vm(vmid) - if not name: - name = vm.get('name', '(unnamed)') - current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status'] - status['status'] = current - if status: - module.exit_json(changed=False, vmid=vmid, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_nic.py b/plugins/modules/proxmox_nic.py deleted file mode 100644 index bcf23bc5a1..0000000000 --- a/plugins/modules/proxmox_nic.py +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2021, Lammert Hellinga (@Kogelvis) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r""" -module: proxmox_nic -short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster -version_added: 3.1.0 -description: - - Allows you to create/update/delete a NIC on Qemu(KVM) Virtual Machines in a Proxmox VE cluster. -author: "Lammert Hellinga (@Kogelvis) " -attributes: - check_mode: - support: full - diff_mode: - support: none - action_group: - version_added: 9.0.0 -options: - bridge: - description: - - Add this interface to the specified bridge device. The Proxmox VE default bridge is called V(vmbr0). - type: str - firewall: - description: - - Whether this interface should be protected by the firewall. - type: bool - default: false - interface: - description: - - Name of the interface, should be V(net[n]) where C(1 ≤ n ≤ 31). - type: str - required: true - link_down: - description: - - Whether this interface should be disconnected (like pulling the plug). - type: bool - default: false - mac: - description: - - V(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified. - - When not specified this module will keep the MAC address the same when changing an existing interface. - type: str - model: - description: - - The NIC emulator model. - type: str - choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', - 'virtio', 'vmxnet3'] - default: virtio - mtu: - description: - - Force MTU, for C(virtio) model only, setting will be ignored otherwise. - - Set to V(1) to use the bridge MTU. - - Value should be C(1 ≤ n ≤ 65520). - type: int - name: - description: - - Specifies the VM name. Only used on the configuration web interface. - - Required only for O(state=present). - type: str - queues: - description: - - Number of packet queues to be used on the device. - - Value should be C(0 ≤ n ≤ 16). - type: int - rate: - description: - - Rate limit in MBps (MegaBytes per second) as floating point number. - type: float - state: - description: - - Indicates desired state of the NIC. - type: str - choices: ['present', 'absent'] - default: present - tag: - description: - - VLAN tag to apply to packets on this interface. - - Value should be C(1 ≤ n ≤ 4094). - type: int - trunks: - description: - - List of VLAN trunks to pass through this interface. - type: list - elements: int - vmid: - description: - - Specifies the instance ID. - type: int -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes -""" - -EXAMPLES = r""" -- name: Create NIC net0 targeting the vm by name - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - name: my_vm - interface: net0 - bridge: vmbr0 - tag: 3 - -- name: Create NIC net0 targeting the vm by id - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - vmid: 103 - interface: net0 - bridge: vmbr0 - mac: "12:34:56:C0:FF:EE" - firewall: true - -- name: Delete NIC net0 targeting the vm by name - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - name: my_vm - interface: net0 - state: absent -""" - -RETURN = r""" -vmid: - description: The VM vmid. - returned: success - type: int - sample: 115 -msg: - description: A short message. - returned: always - type: str - sample: "Nic net0 unchanged on VM with vmid 103" -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxNicAnsible(ProxmoxAnsible): - def update_nic(self, vmid, interface, model, **kwargs): - vm = self.get_vm(vmid) - - try: - vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get() - except Exception as e: - self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) - - if interface in vminfo: - # Convert the current config to a dictionary - config = vminfo[interface].split(',') - config.sort() - - config_current = {} - - for i in config: - kv = i.split('=') - try: - config_current[kv[0]] = kv[1] - except IndexError: - config_current[kv[0]] = '' - - # determine the current model nic and mac-address - models = ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', - 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', 'virtio', 'vmxnet3'] - current_model = set(models) & set(config_current.keys()) - current_model = current_model.pop() - current_mac = config_current[current_model] - - # build nic config string - config_provided = "{0}={1}".format(model, current_mac) - else: - config_provided = model - - if kwargs['mac']: - config_provided = "{0}={1}".format(model, kwargs['mac']) - - if kwargs['bridge']: - config_provided += ",bridge={0}".format(kwargs['bridge']) - - if kwargs['firewall']: - config_provided += ",firewall=1" - - if kwargs['link_down']: - config_provided += ',link_down=1' - - if kwargs['mtu']: - config_provided += ",mtu={0}".format(kwargs['mtu']) - if model != 'virtio': - self.module.warn( - 'Ignoring MTU for nic {0} on VM with vmid {1}, ' - 'model should be set to \'virtio\': '.format(interface, vmid)) - - if kwargs['queues']: - config_provided += ",queues={0}".format(kwargs['queues']) - - if kwargs['rate']: - config_provided += ",rate={0}".format(kwargs['rate']) - - if kwargs['tag']: - config_provided += ",tag={0}".format(kwargs['tag']) - - if kwargs['trunks']: - config_provided += ",trunks={0}".format(';'.join(str(x) for x in kwargs['trunks'])) - - net = {interface: config_provided} - vm = self.get_vm(vmid) - - if ((interface not in vminfo) or (vminfo[interface] != config_provided)): - if not self.module.check_mode: - self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**net) - return True - - return False - - def delete_nic(self, vmid, interface): - vm = self.get_vm(vmid) - vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get() - - if interface in vminfo: - if not self.module.check_mode: - self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(delete=interface) - return True - - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - nic_args = dict( - bridge=dict(type='str'), - firewall=dict(type='bool', default=False), - interface=dict(type='str', required=True), - link_down=dict(type='bool', default=False), - mac=dict(type='str'), - model=dict(choices=['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', - 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', - 'rtl8139', 'virtio', 'vmxnet3'], default='virtio'), - mtu=dict(type='int'), - name=dict(type='str'), - queues=dict(type='int'), - rate=dict(type='float'), - state=dict(default='present', choices=['present', 'absent']), - tag=dict(type='int'), - trunks=dict(type='list', elements='int'), - vmid=dict(type='int'), - ) - module_args.update(nic_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], - supports_check_mode=True, - ) - - proxmox = ProxmoxNicAnsible(module) - - interface = module.params['interface'] - model = module.params['model'] - name = module.params['name'] - state = module.params['state'] - vmid = module.params['vmid'] - - # If vmid is not defined then retrieve its value from the vm name, - if not vmid: - vmid = proxmox.get_vmid(name) - - # Ensure VM id exists - proxmox.get_vm(vmid) - - if state == 'present': - try: - if proxmox.update_nic(vmid, interface, model, - bridge=module.params['bridge'], - firewall=module.params['firewall'], - link_down=module.params['link_down'], - mac=module.params['mac'], - mtu=module.params['mtu'], - queues=module.params['queues'], - rate=module.params['rate'], - tag=module.params['tag'], - trunks=module.params['trunks']): - module.exit_json(changed=True, vmid=vmid, msg="Nic {0} updated on VM with vmid {1}".format(interface, vmid)) - else: - module.exit_json(vmid=vmid, msg="Nic {0} unchanged on VM with vmid {1}".format(interface, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to change nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) - - elif state == 'absent': - try: - if proxmox.delete_nic(vmid, interface): - module.exit_json(changed=True, vmid=vmid, msg="Nic {0} deleted on VM with vmid {1}".format(interface, vmid)) - else: - module.exit_json(vmid=vmid, msg="Nic {0} does not exist on VM with vmid {1}".format(interface, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to delete nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_node_info.py b/plugins/modules/proxmox_node_info.py deleted file mode 100644 index e243862134..0000000000 --- a/plugins/modules/proxmox_node_info.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright John Berninger (@jberning) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: proxmox_node_info -short_description: Retrieve information about one or more Proxmox VE nodes -version_added: 8.2.0 -description: - - Retrieve information about one or more Proxmox VE nodes. -author: John Berninger (@jwbernin) -attributes: - action_group: - version_added: 9.0.0 -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes - - community.general.attributes.info_module -""" - - -EXAMPLES = r""" -- name: List existing nodes - community.general.proxmox_node_info: - api_host: proxmox1 - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_nodes -""" - - -RETURN = r""" -proxmox_nodes: - description: List of Proxmox VE nodes. - returned: always, but can be empty - type: list - elements: dict - contains: - cpu: - description: Current CPU usage in fractional shares of this host's total available CPU. - returned: on success - type: float - disk: - description: Current local disk usage of this host. - returned: on success - type: int - id: - description: Identity of the node. - returned: on success - type: str - level: - description: Support level. Can be blank if not under a paid support contract. - returned: on success - type: str - maxcpu: - description: Total number of available CPUs on this host. - returned: on success - type: int - maxdisk: - description: Size of local disk in bytes. - returned: on success - type: int - maxmem: - description: Memory size in bytes. - returned: on success - type: int - mem: - description: Used memory in bytes. - returned: on success - type: int - node: - description: Short hostname of this node. - returned: on success - type: str - ssl_fingerprint: - description: SSL fingerprint of the node certificate. - returned: on success - type: str - status: - description: Node status. - returned: on success - type: str - type: - description: Object type being returned. - returned: on success - type: str - uptime: - description: Node uptime in seconds. - returned: on success - type: int -""" - - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxNodeInfoAnsible(ProxmoxAnsible): - def get_nodes(self): - nodes = self.proxmox_api.nodes.get() - return nodes - - -def proxmox_node_info_argument_spec(): - return dict() - - -def main(): - module_args = proxmox_auth_argument_spec() - node_info_args = proxmox_node_info_argument_spec() - module_args.update(node_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - supports_check_mode=True, - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxNodeInfoAnsible(module) - - nodes = proxmox.get_nodes() - result['proxmox_nodes'] = nodes - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_pool.py b/plugins/modules/proxmox_pool.py deleted file mode 100644 index c53e394eeb..0000000000 --- a/plugins/modules/proxmox_pool.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2023, Sergei Antipov (UnderGreen) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r""" -module: proxmox_pool -short_description: Pool management for Proxmox VE cluster -description: - - Create or delete a pool for Proxmox VE clusters. - - For pool members management please consult M(community.general.proxmox_pool_member) module. -version_added: 7.1.0 -author: "Sergei Antipov (@UnderGreen) " -attributes: - check_mode: - support: full - diff_mode: - support: none - action_group: - version_added: 9.0.0 -options: - poolid: - description: - - The pool ID. - type: str - aliases: ["name"] - required: true - state: - description: - - Indicate desired state of the pool. - - The pool must be empty prior deleting it with O(state=absent). - choices: ['present', 'absent'] - default: present - type: str - comment: - description: - - Specify the description for the pool. - - Parameter is ignored when pool already exists or O(state=absent). - type: str - -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes -""" - -EXAMPLES = r""" -- name: Create new Proxmox VE pool - community.general.proxmox_pool: - api_host: node1 - api_user: root@pam - api_password: password - poolid: test - comment: 'New pool' - -- name: Delete the Proxmox VE pool - community.general.proxmox_pool: - api_host: node1 - api_user: root@pam - api_password: password - poolid: test - state: absent -""" - -RETURN = r""" -poolid: - description: The pool ID. - returned: success - type: str - sample: test -msg: - description: A short message on what the module did. - returned: always - type: str - sample: "Pool test successfully created" -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxPoolAnsible(ProxmoxAnsible): - - def is_pool_existing(self, poolid): - """Check whether pool already exist - - :param poolid: str - name of the pool - :return: bool - is pool exists? - """ - try: - pools = self.proxmox_api.pools.get() - for pool in pools: - if pool['poolid'] == poolid: - return True - return False - except Exception as e: - self.module.fail_json(msg="Unable to retrieve pools: {0}".format(e)) - - def is_pool_empty(self, poolid): - """Check whether pool has members - - :param poolid: str - name of the pool - :return: bool - is pool empty? - """ - return True if not self.get_pool(poolid)['members'] else False - - def create_pool(self, poolid, comment=None): - """Create Proxmox VE pool - - :param poolid: str - name of the pool - :param comment: str, optional - Description of a pool - :return: None - """ - if self.is_pool_existing(poolid): - self.module.exit_json(changed=False, poolid=poolid, msg="Pool {0} already exists".format(poolid)) - - if self.module.check_mode: - return - - try: - self.proxmox_api.pools.post(poolid=poolid, comment=comment) - except Exception as e: - self.module.fail_json(msg="Failed to create pool with ID {0}: {1}".format(poolid, e)) - - def delete_pool(self, poolid): - """Delete Proxmox VE pool - - :param poolid: str - name of the pool - :return: None - """ - if not self.is_pool_existing(poolid): - self.module.exit_json(changed=False, poolid=poolid, msg="Pool {0} doesn't exist".format(poolid)) - - if self.is_pool_empty(poolid): - if self.module.check_mode: - return - - try: - self.proxmox_api.pools(poolid).delete() - except Exception as e: - self.module.fail_json(msg="Failed to delete pool with ID {0}: {1}".format(poolid, e)) - else: - self.module.fail_json(msg="Can't delete pool {0} with members. Please remove members from pool first.".format(poolid)) - - -def main(): - module_args = proxmox_auth_argument_spec() - pools_args = dict( - poolid=dict(type="str", aliases=["name"], required=True), - comment=dict(type="str"), - state=dict(default="present", choices=["present", "absent"]), - ) - - module_args.update(pools_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[("api_token_id", "api_token_secret")], - required_one_of=[("api_password", "api_token_id")], - supports_check_mode=True - ) - - poolid = module.params["poolid"] - comment = module.params["comment"] - state = module.params["state"] - - proxmox = ProxmoxPoolAnsible(module) - - if state == "present": - proxmox.create_pool(poolid, comment) - module.exit_json(changed=True, poolid=poolid, msg="Pool {0} successfully created".format(poolid)) - else: - proxmox.delete_pool(poolid) - module.exit_json(changed=True, poolid=poolid, msg="Pool {0} successfully deleted".format(poolid)) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/proxmox_pool_member.py b/plugins/modules/proxmox_pool_member.py deleted file mode 100644 index bd32e94e42..0000000000 --- a/plugins/modules/proxmox_pool_member.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2023, Sergei Antipov (UnderGreen) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r""" -module: proxmox_pool_member -short_description: Add or delete members from Proxmox VE cluster pools -description: - - Create or delete a pool member in Proxmox VE clusters. -version_added: 7.1.0 -author: "Sergei Antipov (@UnderGreen) " -attributes: - check_mode: - support: full - diff_mode: - support: full - action_group: - version_added: 9.0.0 -options: - poolid: - description: - - The pool ID. - type: str - aliases: ["name"] - required: true - member: - description: - - Specify the member name. - - For O(type=storage) it is a storage name. - - For O(type=vm) either vmid or vm name could be used. - type: str - required: true - type: - description: - - Member type to add/remove from the pool. - choices: ["vm", "storage"] - default: vm - type: str - state: - description: - - Indicate desired state of the pool member. - choices: ['present', 'absent'] - default: present - type: str - -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes -""" - -EXAMPLES = r""" -- name: Add new VM to Proxmox VE pool - community.general.proxmox_pool_member: - api_host: node1 - api_user: root@pam - api_password: password - poolid: test - member: 101 - -- name: Add new storage to Proxmox VE pool - community.general.proxmox_pool_member: - api_host: node1 - api_user: root@pam - api_password: password - poolid: test - member: zfs-data - type: storage - -- name: Remove VM from the Proxmox VE pool using VM name - community.general.proxmox_pool_member: - api_host: node1 - api_user: root@pam - api_password: password - poolid: test - member: pxe.home.arpa - state: absent - -- name: Remove storage from the Proxmox VE pool - community.general.proxmox_pool_member: - api_host: node1 - api_user: root@pam - api_password: password - poolid: test - member: zfs-storage - type: storage - state: absent -""" - -RETURN = r""" -poolid: - description: The pool ID. - returned: success - type: str - sample: test -member: - description: Member name. - returned: success - type: str - sample: 101 -msg: - description: A short message on what the module did. - returned: always - type: str - sample: "Member 101 deleted from the pool test" -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxPoolMemberAnsible(ProxmoxAnsible): - - def pool_members(self, poolid): - vms = [] - storage = [] - for member in self.get_pool(poolid)["members"]: - if member["type"] == "storage": - storage.append(member["storage"]) - else: - vms.append(member["vmid"]) - - return (vms, storage) - - def add_pool_member(self, poolid, member, member_type): - current_vms_members, current_storage_members = self.pool_members(poolid) - all_members_before = current_storage_members + current_vms_members - all_members_after = all_members_before.copy() - diff = {"before": {"members": all_members_before}, "after": {"members": all_members_after}} - - try: - if member_type == "storage": - storages = self.get_storages(type=None) - if member not in [storage["storage"] for storage in storages]: - self.module.fail_json(msg="Storage {0} doesn't exist in the cluster".format(member)) - if member in current_storage_members: - self.module.exit_json(changed=False, poolid=poolid, member=member, - diff=diff, msg="Member {0} is already part of the pool {1}".format(member, poolid)) - - all_members_after.append(member) - if self.module.check_mode: - return diff - - self.proxmox_api.pools(poolid).put(storage=[member]) - return diff - else: - try: - vmid = int(member) - except ValueError: - vmid = self.get_vmid(member) - - if vmid in current_vms_members: - self.module.exit_json(changed=False, poolid=poolid, member=member, - diff=diff, msg="VM {0} is already part of the pool {1}".format(member, poolid)) - - all_members_after.append(member) - - if not self.module.check_mode: - self.proxmox_api.pools(poolid).put(vms=[vmid]) - return diff - except Exception as e: - self.module.fail_json(msg="Failed to add a new member ({0}) to the pool {1}: {2}".format(member, poolid, e)) - - def delete_pool_member(self, poolid, member, member_type): - current_vms_members, current_storage_members = self.pool_members(poolid) - all_members_before = current_storage_members + current_vms_members - all_members_after = all_members_before.copy() - diff = {"before": {"members": all_members_before}, "after": {"members": all_members_after}} - - try: - if member_type == "storage": - if member not in current_storage_members: - self.module.exit_json(changed=False, poolid=poolid, member=member, - diff=diff, msg="Member {0} is not part of the pool {1}".format(member, poolid)) - - all_members_after.remove(member) - if self.module.check_mode: - return diff - - self.proxmox_api.pools(poolid).put(storage=[member], delete=1) - return diff - else: - try: - vmid = int(member) - except ValueError: - vmid = self.get_vmid(member) - - if vmid not in current_vms_members: - self.module.exit_json(changed=False, poolid=poolid, member=member, - diff=diff, msg="VM {0} is not part of the pool {1}".format(member, poolid)) - - all_members_after.remove(vmid) - - if not self.module.check_mode: - self.proxmox_api.pools(poolid).put(vms=[vmid], delete=1) - return diff - except Exception as e: - self.module.fail_json(msg="Failed to delete a member ({0}) from the pool {1}: {2}".format(member, poolid, e)) - - -def main(): - module_args = proxmox_auth_argument_spec() - pool_members_args = dict( - poolid=dict(type="str", aliases=["name"], required=True), - member=dict(type="str", required=True), - type=dict(default="vm", choices=["vm", "storage"]), - state=dict(default="present", choices=["present", "absent"]), - ) - - module_args.update(pool_members_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[("api_token_id", "api_token_secret")], - required_one_of=[("api_password", "api_token_id")], - supports_check_mode=True - ) - - poolid = module.params["poolid"] - member = module.params["member"] - member_type = module.params["type"] - state = module.params["state"] - - proxmox = ProxmoxPoolMemberAnsible(module) - - if state == "present": - diff = proxmox.add_pool_member(poolid, member, member_type) - module.exit_json(changed=True, poolid=poolid, member=member, diff=diff, msg="New member {0} added to the pool {1}".format(member, poolid)) - else: - diff = proxmox.delete_pool_member(poolid, member, member_type) - module.exit_json(changed=True, poolid=poolid, member=member, diff=diff, msg="Member {0} deleted from the pool {1}".format(member, poolid)) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/proxmox_snap.py b/plugins/modules/proxmox_snap.py deleted file mode 100644 index 57dad92413..0000000000 --- a/plugins/modules/proxmox_snap.py +++ /dev/null @@ -1,395 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2020, Jeffrey van Pelt (@Thulium-Drake) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r""" -module: proxmox_snap -short_description: Snapshot management of instances in Proxmox VE cluster -version_added: 2.0.0 -description: - - Allows you to create/delete/restore snapshots from instances in Proxmox VE cluster. - - Supports both KVM and LXC, OpenVZ has not been tested, as it is no longer supported on Proxmox VE. -attributes: - check_mode: - support: full - diff_mode: - support: none - action_group: - version_added: 9.0.0 -options: - hostname: - description: - - The instance name. - type: str - vmid: - description: - - The instance id. - - If not set, will be fetched from PromoxAPI based on the hostname. - type: str - state: - description: - - Indicate desired state of the instance snapshot. - - The V(rollback) value was added in community.general 4.8.0. - choices: ['present', 'absent', 'rollback'] - default: present - type: str - force: - description: - - For removal from config file, even if removing disk snapshot fails. - default: false - type: bool - unbind: - description: - - This option only applies to LXC containers. - - Allows to snapshot a container even if it has configured mountpoints. - - Temporarily disables all configured mountpoints, takes snapshot, and finally restores original configuration. - - If running, the container will be stopped and restarted to apply config changes. - - Due to restrictions in the Proxmox API this option can only be used authenticating as V(root@pam) with O(api_password), API tokens do - not work either. - - See U(https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config) (PUT tab) for more details. - default: false - type: bool - version_added: 5.7.0 - vmstate: - description: - - Snapshot includes RAM. - default: false - type: bool - description: - description: - - Specify the description for the snapshot. Only used on the configuration web interface. - - This is saved as a comment inside the configuration file. - type: str - timeout: - description: - - Timeout for operations. - default: 30 - type: int - snapname: - description: - - Name of the snapshot that has to be created/deleted/restored. - default: 'ansible_snap' - type: str - retention: - description: - - Remove old snapshots if there are more than O(retention) snapshots. - - If O(retention) is set to V(0), all snapshots will be kept. - - This is only used when O(state=present) and when an actual snapshot is created. If no snapshot is created, all existing snapshots will - be kept. - default: 0 - type: int - version_added: 7.1.0 - -notes: - - Requires proxmoxer and requests modules on host. These modules can be installed with pip. -requirements: ["proxmoxer", "requests"] -author: Jeffrey van Pelt (@Thulium-Drake) -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes -""" - -EXAMPLES = r""" -- name: Create new container snapshot - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: present - snapname: pre-updates - -- name: Create new container snapshot and keep only the 2 newest snapshots - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: present - snapname: snapshot-42 - retention: 2 - -- name: Create new snapshot for a container with configured mountpoints - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: present - unbind: true # requires root@pam+password auth, API tokens are not supported - snapname: pre-updates - -- name: Remove container snapshot - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: absent - snapname: pre-updates - -- name: Rollback container snapshot - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: rollback - snapname: pre-updates -""" - -RETURN = r"""#""" - -import time - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxSnapAnsible(ProxmoxAnsible): - def snapshot(self, vm, vmid): - return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).snapshot - - def vmconfig(self, vm, vmid): - return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).config - - def vmstatus(self, vm, vmid): - return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).status - - def _container_mp_get(self, vm, vmid): - cfg = self.vmconfig(vm, vmid).get() - mountpoints = {} - for key, value in cfg.items(): - if key.startswith('mp'): - mountpoints[key] = value - return mountpoints - - def _container_mp_disable(self, vm, vmid, timeout, unbind, mountpoints, vmstatus): - # shutdown container if running - if vmstatus == 'running': - self.shutdown_instance(vm, vmid, timeout) - # delete all mountpoints configs - self.vmconfig(vm, vmid).put(delete=' '.join(mountpoints)) - - def _container_mp_restore(self, vm, vmid, timeout, unbind, mountpoints, vmstatus): - # NOTE: requires auth as `root@pam`, API tokens are not supported - # see https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config - # restore original config - self.vmconfig(vm, vmid).put(**mountpoints) - # start container (if was running before snap) - if vmstatus == 'running': - self.start_instance(vm, vmid, timeout) - - def start_instance(self, vm, vmid, timeout): - taskid = self.vmstatus(vm, vmid).start.post() - while timeout: - if self.api_task_ok(vm['node'], taskid): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for VM to start. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - time.sleep(1) - return False - - def shutdown_instance(self, vm, vmid, timeout): - taskid = self.vmstatus(vm, vmid).shutdown.post() - while timeout: - if self.api_task_ok(vm['node'], taskid): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for VM to stop. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - time.sleep(1) - return False - - def snapshot_retention(self, vm, vmid, retention): - # ignore the last snapshot, which is the current state - snapshots = self.snapshot(vm, vmid).get()[:-1] - if retention > 0 and len(snapshots) > retention: - # sort by age, oldest first - for snap in sorted(snapshots, key=lambda x: x['snaptime'])[:len(snapshots) - retention]: - self.snapshot(vm, vmid)(snap['name']).delete() - - def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate, unbind, retention): - if self.module.check_mode: - return True - - if vm['type'] == 'lxc': - if unbind is True: - # check if credentials will work - # WARN: it is crucial this check runs here! - # The correct permissions are required only to reconfig mounts. - # Not checking now would allow to remove the configuration BUT - # fail later, leaving the container in a misconfigured state. - if ( - self.module.params['api_user'] != 'root@pam' - or not self.module.params['api_password'] - ): - self.module.fail_json(msg='`unbind=True` requires authentication as `root@pam` with `api_password`, API tokens are not supported.') - return False - mountpoints = self._container_mp_get(vm, vmid) - vmstatus = self.vmstatus(vm, vmid).current().get()['status'] - if mountpoints: - self._container_mp_disable(vm, vmid, timeout, unbind, mountpoints, vmstatus) - taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description) - else: - taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description, vmstate=int(vmstate)) - - while timeout: - if self.api_task_ok(vm['node'], taskid): - break - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - timeout -= 1 - if vm['type'] == 'lxc' and unbind is True and mountpoints: - self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus) - - self.snapshot_retention(vm, vmid, retention) - return timeout > 0 - - def snapshot_remove(self, vm, vmid, timeout, snapname, force): - if self.module.check_mode: - return True - - taskid = self.snapshot(vm, vmid).delete(snapname, force=int(force)) - while timeout: - if self.api_task_ok(vm['node'], taskid): - return True - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - timeout -= 1 - return False - - def snapshot_rollback(self, vm, vmid, timeout, snapname): - if self.module.check_mode: - return True - - taskid = self.snapshot(vm, vmid)(snapname).post("rollback") - while timeout: - if self.api_task_ok(vm['node'], taskid): - return True - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for rolling back VM snapshot. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - timeout -= 1 - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - snap_args = dict( - vmid=dict(required=False), - hostname=dict(), - timeout=dict(type='int', default=30), - state=dict(default='present', choices=['present', 'absent', 'rollback']), - description=dict(type='str'), - snapname=dict(type='str', default='ansible_snap'), - force=dict(type='bool', default=False), - unbind=dict(type='bool', default=False), - vmstate=dict(type='bool', default=False), - retention=dict(type='int', default=0), - ) - module_args.update(snap_args) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - - proxmox = ProxmoxSnapAnsible(module) - - state = module.params['state'] - vmid = module.params['vmid'] - hostname = module.params['hostname'] - description = module.params['description'] - snapname = module.params['snapname'] - timeout = module.params['timeout'] - force = module.params['force'] - unbind = module.params['unbind'] - vmstate = module.params['vmstate'] - retention = module.params['retention'] - - # If hostname is set get the VM id from ProxmoxAPI - if not vmid and hostname: - vmid = proxmox.get_vmid(hostname) - elif not vmid: - module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) - - vm = proxmox.get_vm(vmid) - - if state == 'present': - try: - for i in proxmox.snapshot(vm, vmid).get(): - if i['name'] == snapname: - module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname) - - if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate, unbind, retention): - if module.check_mode: - module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname) - else: - module.exit_json(changed=True, msg="Snapshot %s created" % snapname) - - except Exception as e: - module.fail_json(msg="Creating snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) - - elif state == 'absent': - try: - snap_exist = False - - for i in proxmox.snapshot(vm, vmid).get(): - if i['name'] == snapname: - snap_exist = True - continue - - if not snap_exist: - module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname) - else: - if proxmox.snapshot_remove(vm, vmid, timeout, snapname, force): - if module.check_mode: - module.exit_json(changed=False, msg="Snapshot %s would be removed" % snapname) - else: - module.exit_json(changed=True, msg="Snapshot %s removed" % snapname) - - except Exception as e: - module.fail_json(msg="Removing snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) - elif state == 'rollback': - try: - snap_exist = False - - for i in proxmox.snapshot(vm, vmid).get(): - if i['name'] == snapname: - snap_exist = True - continue - - if not snap_exist: - module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname) - if proxmox.snapshot_rollback(vm, vmid, timeout, snapname): - if module.check_mode: - module.exit_json(changed=True, msg="Snapshot %s would be rolled back" % snapname) - else: - module.exit_json(changed=True, msg="Snapshot %s rolled back" % snapname) - - except Exception as e: - module.fail_json(msg="Rollback of snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_storage_contents_info.py b/plugins/modules/proxmox_storage_contents_info.py deleted file mode 100644 index e0e95565d7..0000000000 --- a/plugins/modules/proxmox_storage_contents_info.py +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright Julian Vanden Broeck (@l00ptr) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r""" -module: proxmox_storage_contents_info -short_description: List content from a Proxmox VE storage -version_added: 8.2.0 -description: - - Retrieves information about stored objects on a specific storage attached to a node. -attributes: - action_group: - version_added: 9.0.0 -options: - storage: - description: - - Only return content stored on that specific storage. - aliases: ['name'] - type: str - required: true - node: - description: - - Proxmox node to which the storage is attached. - type: str - required: true - content: - description: - - Filter on a specific content type. - type: str - choices: ["all", "backup", "rootdir", "images", "iso"] - default: "all" - vmid: - description: - - Filter on a specific VMID. - type: int -author: Julian Vanden Broeck (@l00ptr) -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes - - community.general.attributes.info_module -""" - - -EXAMPLES = r""" -- name: List existing storages - community.general.proxmox_storage_contents_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - storage: lvm2 - content: backup - vmid: 130 -""" - - -RETURN = r""" -proxmox_storage_content: - description: Content of of storage attached to a node. - type: list - returned: success - elements: dict - contains: - content: - description: Proxmox content of listed objects on this storage. - type: str - returned: success - ctime: - description: Creation time of the listed objects. - type: str - returned: success - format: - description: Format of the listed objects (can be V(raw), V(pbs-vm), V(iso),...). - type: str - returned: success - size: - description: Size of the listed objects. - type: int - returned: success - subtype: - description: Subtype of the listed objects (can be V(qemu) or V(lxc)). - type: str - returned: When storage is dedicated to backup, typically on PBS storage. - verification: - description: Backup verification status of the listed objects. - type: dict - returned: When storage is dedicated to backup, typically on PBS storage. - sample: { - "state": "ok", - "upid": "UPID:backup-srv:00130F49:1A12D8375:00001CD7:657A2258:verificationjob:daily\\x3av\\x2dd0cc18c5\\x2d8707:root@pam:" - } - volid: - description: Volume identifier of the listed objects. - type: str - returned: success -""" - - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - ProxmoxAnsible, proxmox_auth_argument_spec) - - -def proxmox_storage_info_argument_spec(): - return dict( - storage=dict(type="str", required=True, aliases=["name"]), - content=dict(type="str", required=False, default="all", choices=["all", "backup", "rootdir", "images", "iso"]), - vmid=dict(type="int"), - node=dict(required=True, type="str"), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - storage_info_args = proxmox_storage_info_argument_spec() - module_args.update(storage_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[("api_password", "api_token_id")], - required_together=[("api_token_id", "api_token_secret")], - supports_check_mode=True, - ) - result = dict(changed=False) - proxmox = ProxmoxAnsible(module) - res = proxmox.get_storage_content( - node=module.params["node"], - storage=module.params["storage"], - content=None if module.params["content"] == "all" else module.params["content"], - vmid=module.params["vmid"], - ) - result["proxmox_storage_content"] = res - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/proxmox_storage_info.py b/plugins/modules/proxmox_storage_info.py deleted file mode 100644 index 5b9b1b6aaa..0000000000 --- a/plugins/modules/proxmox_storage_info.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright Tristan Le Guern (@tleguern) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: proxmox_storage_info -short_description: Retrieve information about one or more Proxmox VE storages -version_added: 2.2.0 -description: - - Retrieve information about one or more Proxmox VE storages. -attributes: - action_group: - version_added: 9.0.0 -options: - storage: - description: - - Only return information on a specific storage. - aliases: ['name'] - type: str - type: - description: - - Filter on a specific storage type. - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes - - community.general.attributes.info_module -notes: - - Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage). -""" - - -EXAMPLES = r""" -- name: List existing storages - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_storages - -- name: List NFS storages only - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - type: nfs - register: proxmox_storages_nfs - -- name: Retrieve information about the lvm2 storage - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - storage: lvm2 - register: proxmox_storage_lvm -""" - - -RETURN = r""" -proxmox_storages: - description: List of storage pools. - returned: on success - type: list - elements: dict - contains: - content: - description: Proxmox content types available in this storage. - returned: on success - type: list - elements: str - digest: - description: Storage's digest. - returned: on success - type: str - nodes: - description: List of nodes associated to this storage. - returned: on success, if storage is not local - type: list - elements: str - path: - description: Physical path to this storage. - returned: on success - type: str - prune-backups: - description: Backup retention options. - returned: on success - type: list - elements: dict - shared: - description: Is this storage shared. - returned: on success - type: bool - storage: - description: Storage name. - returned: on success - type: str - type: - description: Storage type. - returned: on success - type: str -""" - - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool) - - -class ProxmoxStorageInfoAnsible(ProxmoxAnsible): - def get_storage(self, storage): - try: - storage = self.proxmox_api.storage.get(storage) - except Exception: - self.module.fail_json(msg="Storage '%s' does not exist" % storage) - return ProxmoxStorage(storage) - - def get_storages(self, type=None): - storages = self.proxmox_api.storage.get(type=type) - storages = [ProxmoxStorage(storage) for storage in storages] - return storages - - -class ProxmoxStorage: - def __init__(self, storage): - self.storage = storage - # Convert proxmox representation of lists, dicts and boolean for easier - # manipulation within ansible. - if 'shared' in self.storage: - self.storage['shared'] = proxmox_to_ansible_bool(self.storage['shared']) - if 'content' in self.storage: - self.storage['content'] = self.storage['content'].split(',') - if 'nodes' in self.storage: - self.storage['nodes'] = self.storage['nodes'].split(',') - if 'prune-backups' in storage: - options = storage['prune-backups'].split(',') - self.storage['prune-backups'] = dict() - for option in options: - k, v = option.split('=') - self.storage['prune-backups'][k] = v - - -def proxmox_storage_info_argument_spec(): - return dict( - storage=dict(type='str', aliases=['name']), - type=dict(type='str'), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - storage_info_args = proxmox_storage_info_argument_spec() - module_args.update(storage_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - mutually_exclusive=[('storage', 'type')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxStorageInfoAnsible(module) - storage = module.params['storage'] - storagetype = module.params['type'] - - if storage: - storages = [proxmox.get_storage(storage)] - else: - storages = proxmox.get_storages(type=storagetype) - result['proxmox_storages'] = [storage.storage for storage in storages] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_tasks_info.py b/plugins/modules/proxmox_tasks_info.py deleted file mode 100644 index 574a971427..0000000000 --- a/plugins/modules/proxmox_tasks_info.py +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2021, Andreas Botzner (@paginabianca) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: proxmox_tasks_info -short_description: Retrieve information about one or more Proxmox VE tasks -version_added: 3.8.0 -description: - - Retrieve information about one or more Proxmox VE tasks. -author: 'Andreas Botzner (@paginabianca) ' -attributes: - action_group: - version_added: 9.0.0 -options: - node: - description: - - Node where to get tasks. - required: true - type: str - task: - description: - - Return specific task. - aliases: ['upid', 'name'] - type: str -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes - - community.general.attributes.info_module -""" - - -EXAMPLES = r""" -- name: List tasks on node01 - community.general.proxmox_tasks_info: - api_host: proxmoxhost - api_user: root@pam - api_password: '{{ password | default(omit) }}' - api_token_id: '{{ token_id | default(omit) }}' - api_token_secret: '{{ token_secret | default(omit) }}' - node: node01 - register: result - -- name: Retrieve information about specific tasks on node01 - community.general.proxmox_tasks_info: - api_host: proxmoxhost - api_user: root@pam - api_password: '{{ password | default(omit) }}' - api_token_id: '{{ token_id | default(omit) }}' - api_token_secret: '{{ token_secret | default(omit) }}' - task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:' - node: node01 - register: proxmox_tasks -""" - - -RETURN = r""" -proxmox_tasks: - description: List of tasks. - returned: on success - type: list - elements: dict - contains: - id: - description: ID of the task. - returned: on success - type: str - node: - description: Node name. - returned: on success - type: str - pid: - description: PID of the task. - returned: on success - type: int - pstart: - description: Pastart of the task. - returned: on success - type: int - starttime: - description: Starting time of the task. - returned: on success - type: int - type: - description: Type of the task. - returned: on success - type: str - upid: - description: UPID of the task. - returned: on success - type: str - user: - description: User that owns the task. - returned: on success - type: str - endtime: - description: Endtime of the task. - returned: on success, can be absent - type: int - status: - description: Status of the task. - returned: on success, can be absent - type: str - failed: - description: If the task failed. - returned: when status is defined - type: bool -msg: - description: Short message. - returned: on failure - type: str - sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode' -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxTaskInfoAnsible(ProxmoxAnsible): - def get_task(self, upid, node): - tasks = self.get_tasks(node) - for task in tasks: - if task.info['upid'] == upid: - return [task] - - def get_tasks(self, node): - tasks = self.proxmox_api.nodes(node).tasks.get() - return [ProxmoxTask(task) for task in tasks] - - -class ProxmoxTask: - def __init__(self, task): - self.info = dict() - for k, v in task.items(): - if k == 'status' and isinstance(v, str): - self.info[k] = v - if v != 'OK': - self.info['failed'] = True - else: - self.info[k] = v - - -def proxmox_task_info_argument_spec(): - return dict( - task=dict(type='str', aliases=['upid', 'name'], required=False), - node=dict(type='str', required=True), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - task_info_args = proxmox_task_info_argument_spec() - module_args.update(task_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('api_password', 'api_token_id')], - supports_check_mode=True) - result = dict(changed=False) - - proxmox = ProxmoxTaskInfoAnsible(module) - upid = module.params['task'] - node = module.params['node'] - if upid: - tasks = proxmox.get_task(upid=upid, node=node) - else: - tasks = proxmox.get_tasks(node=node) - if tasks is not None: - result['proxmox_tasks'] = [task.info for task in tasks] - module.exit_json(**result) - else: - result['msg'] = 'Task: {0} does not exist on node: {1}.'.format( - upid, node) - module.fail_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_template.py b/plugins/modules/proxmox_template.py deleted file mode 100644 index c9987a4a70..0000000000 --- a/plugins/modules/proxmox_template.py +++ /dev/null @@ -1,324 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: proxmox_template -short_description: Management of OS templates in Proxmox VE cluster -description: - - Allows you to upload/delete templates in Proxmox VE cluster. -attributes: - check_mode: - support: none - diff_mode: - support: none - action_group: - version_added: 9.0.0 -options: - node: - description: - - Proxmox VE node on which to operate. - type: str - src: - description: - - Path to uploaded file. - - Exactly one of O(src) or O(url) is required for O(state=present). - type: path - url: - description: - - URL to file to download. - - Exactly one of O(src) or O(url) is required for O(state=present). - type: str - version_added: 10.1.0 - template: - description: - - The template name. - - Required for O(state=absent) to delete a template. - - Required for O(state=present) to download an appliance container template (pveam). - type: str - content_type: - description: - - Content type. - - Required only for O(state=present). - type: str - default: 'vztmpl' - choices: ['vztmpl', 'iso'] - storage: - description: - - Target storage. - type: str - default: 'local' - timeout: - description: - - Timeout for operations. - type: int - default: 30 - force: - description: - - It can only be used with O(state=present), existing template will be overwritten. - type: bool - default: false - state: - description: - - Indicate desired state of the template. - type: str - choices: ['present', 'absent'] - default: present -notes: - - Requires C(proxmoxer) and C(requests) modules on host. Those modules can be installed with M(ansible.builtin.pip). - - C(proxmoxer) >= 1.2.0 requires C(requests_toolbelt) to upload files larger than 256 MB. -author: Sergei Antipov (@UnderGreen) -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes -""" - -EXAMPLES = r""" -- name: Upload new openvz template with minimal options - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - src: ~/ubuntu-14.04-x86_64.tar.gz - -- name: Pull new openvz template with minimal options - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - url: https://ubuntu-mirror/ubuntu-14.04-x86_64.tar.gz - -- name: > - Upload new openvz template with minimal options use environment - PROXMOX_PASSWORD variable(you should export it before) - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_host: node1 - src: ~/ubuntu-14.04-x86_64.tar.gz - -- name: Upload new openvz template with all options and force overwrite - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - storage: local - content_type: vztmpl - src: ~/ubuntu-14.04-x86_64.tar.gz - force: true - -- name: Pull new openvz template with all options and force overwrite - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - storage: local - content_type: vztmpl - url: https://ubuntu-mirror/ubuntu-14.04-x86_64.tar.gz - force: true - -- name: Delete template with minimal options - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - template: ubuntu-14.04-x86_64.tar.gz - state: absent - -- name: Download proxmox appliance container template - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - storage: local - content_type: vztmpl - template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz -""" - -import os -import time -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -from ansible.module_utils.six.moves.urllib.parse import urlparse - -REQUESTS_TOOLBELT_ERR = None -try: - # requests_toolbelt is used internally by proxmoxer module - import requests_toolbelt # noqa: F401, pylint: disable=unused-import - HAS_REQUESTS_TOOLBELT = True -except ImportError: - HAS_REQUESTS_TOOLBELT = False - REQUESTS_TOOLBELT_ERR = traceback.format_exc() - - -class ProxmoxTemplateAnsible(ProxmoxAnsible): - def has_template(self, node, storage, content_type, template): - volid = '%s:%s/%s' % (storage, content_type, template) - try: - return any(tmpl['volid'] == volid for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get()) - except Exception as e: - self.module.fail_json(msg="Failed to retrieve template '%s': %s" % (volid, e)) - - def task_status(self, node, taskid, timeout): - """ - Check the task status and wait until the task is completed or the timeout is reached. - """ - while timeout: - if self.api_task_ok(node, taskid): - return True - timeout = timeout - 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s' % - self.proxmox_api.nodes(node).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def upload_template(self, node, storage, content_type, realpath, timeout): - stats = os.stat(realpath) - if (LooseVersion(self.proxmoxer_version) >= LooseVersion('1.2.0') and - stats.st_size > 268435456 and not HAS_REQUESTS_TOOLBELT): - self.module.fail_json(msg="'requests_toolbelt' module is required to upload files larger than 256MB", - exception=missing_required_lib('requests_toolbelt')) - - try: - taskid = self.proxmox_api.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb')) - return self.task_status(node, taskid, timeout) - except Exception as e: - self.module.fail_json(msg="Uploading template %s failed with error: %s" % (realpath, e)) - - def fetch_template(self, node, storage, content_type, url, timeout): - """Fetch a template from a web url source using the proxmox download-url endpoint - """ - try: - taskid = self.proxmox_api.nodes(node).storage(storage)("download-url").post( - url=url, content=content_type, filename=os.path.basename(url) - ) - return self.task_status(node, taskid, timeout) - except Exception as e: - self.module.fail_json(msg="Fetching template from url %s failed with error: %s" % (url, e)) - - def download_template(self, node, storage, template, timeout): - try: - taskid = self.proxmox_api.nodes(node).aplinfo.post(storage=storage, template=template) - return self.task_status(node, taskid, timeout) - except Exception as e: - self.module.fail_json(msg="Downloading template %s failed with error: %s" % (template, e)) - - def delete_template(self, node, storage, content_type, template, timeout): - volid = '%s:%s/%s' % (storage, content_type, template) - self.proxmox_api.nodes(node).storage(storage).content.delete(volid) - while timeout: - if not self.has_template(node, storage, content_type, template): - return True - timeout = timeout - 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for deleting template.') - - time.sleep(1) - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - template_args = dict( - node=dict(), - src=dict(type='path'), - url=dict(), - template=dict(), - content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']), - storage=dict(default='local'), - timeout=dict(type='int', default=30), - force=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - ) - module_args.update(template_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('api_password', 'api_token_id')], - required_if=[('state', 'absent', ['template'])], - mutually_exclusive=[("src", "url")], - ) - - proxmox = ProxmoxTemplateAnsible(module) - - state = module.params['state'] - node = module.params['node'] - storage = module.params['storage'] - timeout = module.params['timeout'] - - if state == 'present': - content_type = module.params['content_type'] - src = module.params['src'] - url = module.params['url'] - - # download appliance template - if content_type == 'vztmpl' and not (src or url) : - template = module.params['template'] - - if not template: - module.fail_json(msg='template param for downloading appliance template is mandatory') - - if proxmox.has_template(node, storage, content_type, template) and not module.params['force']: - module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template)) - - if proxmox.download_template(node, storage, template, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template)) - - if not src and not url: - module.fail_json(msg='src or url param for uploading template file is mandatory') - elif not url: - template = os.path.basename(src) - if proxmox.has_template(node, storage, content_type, template) and not module.params['force']: - module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template)) - elif not (os.path.exists(src) and os.path.isfile(src)): - module.fail_json(msg='template file on path %s not exists' % src) - - if proxmox.upload_template(node, storage, content_type, src, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) - elif not src: - template = os.path.basename(urlparse(url).path) - if proxmox.has_template(node, storage, content_type, template): - if not module.params['force']: - module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template)) - elif not proxmox.delete_template(node, storage, content_type, template, timeout): - module.fail_json(changed=False, msg='failed to delete template with volid=%s:%s/%s' % (storage, content_type, template)) - - if proxmox.fetch_template(node, storage, content_type, url, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) - - elif state == 'absent': - try: - content_type = module.params['content_type'] - template = module.params['template'] - - if not proxmox.has_template(node, storage, content_type, template): - module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) - - if proxmox.delete_template(node, storage, content_type, template, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template)) - except Exception as e: - module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_user_info.py b/plugins/modules/proxmox_user_info.py deleted file mode 100644 index a8da1ee30a..0000000000 --- a/plugins/modules/proxmox_user_info.py +++ /dev/null @@ -1,260 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright Tristan Le Guern -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r""" -module: proxmox_user_info -short_description: Retrieve information about one or more Proxmox VE users -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE users. -attributes: - action_group: - version_added: 9.0.0 -options: - domain: - description: - - Restrict results to a specific authentication realm. - aliases: ['realm'] - type: str - user: - description: - - Restrict results to a specific user. - aliases: ['name'] - type: str - userid: - description: - - Restrict results to a specific user ID, which is a concatenation of a user and domain parts. - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes - - community.general.attributes.info_module -""" - -EXAMPLES = r""" -- name: List existing users - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_users - -- name: List existing users in the pve authentication realm - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - domain: pve - register: proxmox_users_pve - -- name: Retrieve information about admin@pve - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - userid: admin@pve - register: proxmox_user_admin - -- name: Alternative way to retrieve information about admin@pve - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - user: admin - domain: pve - register: proxmox_user_admin -""" - - -RETURN = r""" -proxmox_users: - description: List of users. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the user. - returned: on success - type: str - domain: - description: User's authentication realm, also the right part of the user ID. - returned: on success - type: str - email: - description: User's email address. - returned: on success - type: str - enabled: - description: User's account state. - returned: on success - type: bool - expire: - description: Expiration date in seconds since EPOCH. Zero means no expiration. - returned: on success - type: int - firstname: - description: User's first name. - returned: on success - type: str - groups: - description: List of groups which the user is a member of. - returned: on success - type: list - elements: str - keys: - description: User's two factor authentication keys. - returned: on success - type: str - lastname: - description: User's last name. - returned: on success - type: str - tokens: - description: List of API tokens associated to the user. - returned: on success - type: list - elements: dict - contains: - comment: - description: Short description of the token. - returned: on success - type: str - expire: - description: Expiration date in seconds since EPOCH. Zero means no expiration. - returned: on success - type: int - privsep: - description: Describe if the API token is further restricted with ACLs or is fully privileged. - returned: on success - type: bool - tokenid: - description: Token name. - returned: on success - type: str - user: - description: User's login name, also the left part of the user ID. - returned: on success - type: str - userid: - description: Proxmox user ID, represented as user@realm. - returned: on success - type: str -""" - - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool) - - -class ProxmoxUserInfoAnsible(ProxmoxAnsible): - def get_user(self, userid): - try: - user = self.proxmox_api.access.users.get(userid) - except Exception: - self.module.fail_json(msg="User '%s' does not exist" % userid) - user['userid'] = userid - return ProxmoxUser(user) - - def get_users(self, domain=None): - users = self.proxmox_api.access.users.get(full=1) - users = [ProxmoxUser(user) for user in users] - if domain: - return [user for user in users if user.user['domain'] == domain] - return users - - -class ProxmoxUser: - def __init__(self, user): - self.user = dict() - # Data representation is not the same depending on API calls - for k, v in user.items(): - if k == 'enable': - self.user['enabled'] = proxmox_to_ansible_bool(user['enable']) - elif k == 'userid': - self.user['user'] = user['userid'].split('@')[0] - self.user['domain'] = user['userid'].split('@')[1] - self.user[k] = v - elif k in ['groups', 'tokens'] and (v == '' or v is None): - self.user[k] = [] - elif k == 'groups' and isinstance(v, str): - self.user['groups'] = v.split(',') - elif k == 'tokens' and isinstance(v, list): - for token in v: - if 'privsep' in token: - token['privsep'] = proxmox_to_ansible_bool(token['privsep']) - self.user['tokens'] = v - elif k == 'tokens' and isinstance(v, dict): - self.user['tokens'] = list() - for tokenid, tokenvalues in v.items(): - t = tokenvalues - t['tokenid'] = tokenid - if 'privsep' in tokenvalues: - t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep']) - self.user['tokens'].append(t) - else: - self.user[k] = v - - -def proxmox_user_info_argument_spec(): - return dict( - domain=dict(type='str', aliases=['realm']), - user=dict(type='str', aliases=['name']), - userid=dict(type='str'), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - user_info_args = proxmox_user_info_argument_spec() - module_args.update(user_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - mutually_exclusive=[('user', 'userid'), ('domain', 'userid')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxUserInfoAnsible(module) - domain = module.params['domain'] - user = module.params['user'] - if user and domain: - userid = user + '@' + domain - else: - userid = module.params['userid'] - - if userid: - users = [proxmox.get_user(userid=userid)] - else: - users = proxmox.get_users(domain=domain) - result['proxmox_users'] = [user.user for user in users] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/proxmox_vm_info.py b/plugins/modules/proxmox_vm_info.py deleted file mode 100644 index 36ddea9db8..0000000000 --- a/plugins/modules/proxmox_vm_info.py +++ /dev/null @@ -1,285 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2023, Sergei Antipov -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = r""" -module: proxmox_vm_info -short_description: Retrieve information about one or more Proxmox VE virtual machines -version_added: 7.2.0 -description: - - Retrieve information about one or more Proxmox VE virtual machines. -author: 'Sergei Antipov (@UnderGreen) ' -attributes: - action_group: - version_added: 9.0.0 -options: - node: - description: - - Restrict results to a specific Proxmox VE node. - type: str - type: - description: - - Restrict results to a specific virtual machine(s) type. - type: str - choices: - - all - - qemu - - lxc - default: all - vmid: - description: - - Restrict results to a specific virtual machine by using its ID. - - If VM with the specified vmid does not exist in a cluster then resulting list will be empty. - type: int - name: - description: - - Restrict results to a specific virtual machine(s) by using their name. - - If VM(s) with the specified name do not exist in a cluster then the resulting list will be empty. - type: str - config: - description: - - Whether to retrieve the VM configuration along with VM status. - - If set to V(none) (default), no configuration will be returned. - - If set to V(current), the current running configuration will be returned. - - If set to V(pending), the configuration with pending changes applied will be returned. - type: str - choices: - - none - - current - - pending - default: none - version_added: 8.1.0 - network: - description: - - Whether to retrieve the current network status. - - Requires enabled/running qemu-guest-agent on qemu VMs. - type: bool - default: false - version_added: 9.1.0 -extends_documentation_fragment: - - community.general.proxmox.actiongroup_proxmox - - community.general.proxmox.documentation - - community.general.attributes - - community.general.attributes.info_module -""" - -EXAMPLES = r""" -- name: List all existing virtual machines on node - community.general.proxmox_vm_info: - api_host: proxmoxhost - api_user: root@pam - api_token_id: '{{ token_id | default(omit) }}' - api_token_secret: '{{ token_secret | default(omit) }}' - node: node01 - -- name: List all QEMU virtual machines on node - community.general.proxmox_vm_info: - api_host: proxmoxhost - api_user: root@pam - api_password: '{{ password | default(omit) }}' - node: node01 - type: qemu - -- name: Retrieve information about specific VM by ID - community.general.proxmox_vm_info: - api_host: proxmoxhost - api_user: root@pam - api_password: '{{ password | default(omit) }}' - node: node01 - type: qemu - vmid: 101 - -- name: Retrieve information about specific VM by name and get current configuration - community.general.proxmox_vm_info: - api_host: proxmoxhost - api_user: root@pam - api_password: '{{ password | default(omit) }}' - node: node01 - type: lxc - name: lxc05.home.arpa - config: current -""" - -RETURN = r""" -proxmox_vms: - description: List of virtual machines. - returned: on success - type: list - elements: dict - sample: - [ - { - "cpu": 0.258944410905281, - "cpus": 1, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "id": "qemu/100", - "maxcpu": 1, - "maxdisk": 34359738368, - "maxmem": 4294967296, - "mem": 35158379, - "name": "pxe.home.arpa", - "netin": 99715803, - "netout": 14237835, - "node": "pve", - "pid": 1947197, - "status": "running", - "template": False, - "type": "qemu", - "uptime": 135530, - "vmid": 100 - }, - { - "cpu": 0, - "cpus": 1, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "id": "qemu/101", - "maxcpu": 1, - "maxdisk": 0, - "maxmem": 536870912, - "mem": 0, - "name": "test1", - "netin": 0, - "netout": 0, - "node": "pve", - "status": "stopped", - "template": False, - "type": "qemu", - "uptime": 0, - "vmid": 101 - } - ] -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, - ProxmoxAnsible, - proxmox_to_ansible_bool, -) - - -class ProxmoxVmInfoAnsible(ProxmoxAnsible): - def get_vms_from_cluster_resources(self): - try: - return self.proxmox_api.cluster().resources().get(type="vm") - except Exception as e: - self.module.fail_json( - msg="Failed to retrieve VMs information from cluster resources: %s" % e - ) - - def get_vms_from_nodes(self, cluster_machines, type, vmid=None, name=None, node=None, config=None, network=False): - # Leave in dict only machines that user wants to know about - filtered_vms = { - vm: info for vm, info in cluster_machines.items() if not ( - type != info["type"] - or (node and info["node"] != node) - or (vmid and int(info["vmid"]) != vmid) - or (name is not None and info["name"] != name) - ) - } - # Get list of unique node names and loop through it to get info about machines. - nodes = frozenset([info["node"] for vm, info in filtered_vms.items()]) - for this_node in nodes: - # "type" is mandatory and can have only values of "qemu" or "lxc". Seems that use of reflection is safe. - call_vm_getter = getattr(self.proxmox_api.nodes(this_node), type) - vms_from_this_node = call_vm_getter().get() - for detected_vm in vms_from_this_node: - this_vm_id = int(detected_vm["vmid"]) - desired_vm = filtered_vms.get(this_vm_id, None) - if desired_vm: - desired_vm.update(detected_vm) - desired_vm["vmid"] = this_vm_id - desired_vm["template"] = proxmox_to_ansible_bool(desired_vm["template"]) - # When user wants to retrieve the VM configuration - if config != "none": - # pending = 0, current = 1 - config_type = 0 if config == "pending" else 1 - # GET /nodes/{node}/qemu/{vmid}/config current=[0/1] - desired_vm["config"] = call_vm_getter(this_vm_id).config().get(current=config_type) - if network: - if type == "qemu": - desired_vm["network"] = call_vm_getter(this_vm_id).agent("network-get-interfaces").get()['result'] - elif type == "lxc": - desired_vm["network"] = call_vm_getter(this_vm_id).interfaces.get() - - return filtered_vms - - def get_qemu_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None, network=False): - try: - return self.get_vms_from_nodes(cluster_machines, "qemu", vmid, name, node, config, network) - except Exception as e: - self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e) - - def get_lxc_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None, network=False): - try: - return self.get_vms_from_nodes(cluster_machines, "lxc", vmid, name, node, config, network) - except Exception as e: - self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e) - - -def main(): - module_args = proxmox_auth_argument_spec() - vm_info_args = dict( - node=dict(type="str", required=False), - type=dict( - type="str", choices=["lxc", "qemu", "all"], default="all", required=False - ), - vmid=dict(type="int", required=False), - name=dict(type="str", required=False), - config=dict( - type="str", choices=["none", "current", "pending"], - default="none", required=False - ), - network=dict(type="bool", default=False, required=False), - ) - module_args.update(vm_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[("api_token_id", "api_token_secret")], - required_one_of=[("api_password", "api_token_id")], - supports_check_mode=True, - ) - - proxmox = ProxmoxVmInfoAnsible(module) - node = module.params["node"] - type = module.params["type"] - vmid = module.params["vmid"] - name = module.params["name"] - config = module.params["config"] - network = module.params["network"] - - result = dict(changed=False) - - if node and proxmox.get_node(node) is None: - module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node) - - vms_cluster_resources = proxmox.get_vms_from_cluster_resources() - cluster_machines = {int(machine["vmid"]): machine for machine in vms_cluster_resources} - vms = {} - - if type == "lxc": - vms = proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config, network) - elif type == "qemu": - vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config, network) - else: - vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config, network) - vms.update(proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config, network)) - - result["proxmox_vms"] = [info for vm, info in sorted(vms.items())] - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/pubnub_blocks.py b/plugins/modules/pubnub_blocks.py index 598b6b5af3..316fced4be 100644 --- a/plugins/modules/pubnub_blocks.py +++ b/plugins/modules/pubnub_blocks.py @@ -18,8 +18,8 @@ DOCUMENTATION = r""" module: pubnub_blocks short_description: PubNub blocks management module description: - - 'This module allows Ansible to interface with the PubNub BLOCKS infrastructure by providing the following operations: create / remove, start - / stop and rename for blocks and create / modify / remove for event handlers.' + - 'This module allows Ansible to interface with the PubNub BLOCKS infrastructure by providing the following operations: + create / remove, start / stop and rename for blocks and create / modify / remove for event handlers.' author: - PubNub (@pubnub) - Sergey Mamontov (@parfeon) @@ -49,20 +49,20 @@ options: default: '' cache: description: >- - In case if single play use blocks management module few times it is preferred to enabled 'caching' by making previous module to share gathered - artifacts and pass them to this parameter. + In case if single play use blocks management module few times it is preferred to enabled 'caching' by making previous + module to share gathered artifacts and pass them to this parameter. required: false type: dict default: {} account: description: - - Name of PubNub account for from which O(application) will be used to manage blocks. - - User's account will be used if value not set or empty. + - Name of PubNub account for from which O(application) is used to manage blocks. + - User's account is used if value not set or empty. type: str default: '' application: description: - - Name of target PubNub application for which blocks configuration on specific O(keyset) will be done. + - Name of target PubNub application for which blocks configuration on specific O(keyset) is done. type: str required: true keyset: @@ -72,35 +72,35 @@ options: required: true state: description: - - Intended block state after event handlers creation / update process will be completed. + - Intended block state after event handlers creation / update process is completed. required: false default: 'present' choices: ['started', 'stopped', 'present', 'absent'] type: str name: description: - - Name of managed block which will be later visible on admin.pubnub.com. + - Name of managed block which is later visible on admin.pubnub.com. required: true type: str description: description: - - Short block description which will be later visible on admin.pubnub.com. Used only if block doesn't exists and won't change description - for existing block. + - Short block description which is later visible on U(https://admin.pubnub.com). + - Used only if block does not exists and does not change description for existing block. required: false type: str event_handlers: description: - List of event handlers which should be updated for specified block O(name). - - 'Each entry for new event handler should contain: V(name), V(src), V(channels), V(event). V(name) used as event handler name which can - be used later to make changes to it.' + - 'Each entry for new event handler should contain: V(name), V(src), V(channels), V(event). V(name) used as event handler + name which can be used later to make changes to it.' - C(src) is full path to file with event handler code. - V(channels) is name of channel from which event handler is waiting for events. - 'V(event) is type of event which is able to trigger event handler: V(js-before-publish), V(js-after-publish), V(js-after-presence).' - - Each entry for existing handlers should contain C(name) (so target handler can be identified). Rest parameters (C(src), C(channels) and - C(event)) can be added if changes required for them. - - It is possible to rename event handler by adding C(changes) key to event handler payload and pass dictionary, which will contain single - key C(name), where new name should be passed. - - To remove particular event handler it is possible to set C(state) for it to C(absent) and it will be removed. + - Each entry for existing handlers should contain C(name) (so target handler can be identified). Rest parameters (C(src), + C(channels) and C(event)) can be added if changes required for them. + - It is possible to rename event handler by adding C(changes) key to event handler payload and pass dictionary, which + contains single key C(name), where new name should be passed. + - To remove particular event handler it is possible to set C(state) for it to C(absent) and it is removed. required: false default: [] type: list @@ -114,8 +114,8 @@ options: type: dict validate_certs: description: - - This key allow to try skip certificates check when performing REST API calls. Sometimes host may have issues with certificates on it and - this will cause problems to call PubNub REST API. + - This key allow to try skip certificates check when performing REST API calls. Sometimes host may have issues with + certificates on it and this causes problems to call PubNub REST API. - If check should be ignored V(false) should be passed to this parameter. required: false default: true @@ -208,8 +208,8 @@ EXAMPLES = r""" RETURN = r""" module_cache: description: - - Cached account information. In case if with single play module used few times it is better to pass cached data to next module calls to speed - up process. + - Cached account information. In case if with single play module used few times it is better to pass cached data to next + module calls to speed up process. type: dict returned: always """ @@ -532,9 +532,9 @@ def _content_of_file_at_path(path): def main(): fields = dict( - email=dict(default='', required=False, type='str'), - password=dict(default='', required=False, type='str', no_log=True), - account=dict(default='', required=False, type='str'), + email=dict(default='', type='str'), + password=dict(default='', type='str', no_log=True), + account=dict(default='', type='str'), application=dict(required=True, type='str'), keyset=dict(required=True, type='str', no_log=False), state=dict(default='present', type='str', diff --git a/plugins/modules/pulp_repo.py b/plugins/modules/pulp_repo.py index 142c5f66f4..cc7f001837 100644 --- a/plugins/modules/pulp_repo.py +++ b/plugins/modules/pulp_repo.py @@ -34,9 +34,9 @@ options: type: str force_basic_auth: description: - - C(httplib2), the library used by the M(ansible.builtin.uri) module only sends authentication information when a webservice responds to an - initial request with a 401 status. Since some basic auth services do not properly send a 401, logins will fail. This option forces the - sending of the Basic authentication header upon initial request. + - C(httplib2), the library used by the M(ansible.builtin.uri) module only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins + fail. This option forces the sending of the Basic authentication header upon initial request. type: bool default: false generate_sqlite: @@ -47,20 +47,22 @@ options: default: false feed_ca_cert: description: - - CA certificate string used to validate the feed source SSL certificate. This can be the file content or the path to the file. + - CA certificate string used to validate the feed source SSL certificate. This can be the file content or the path to + the file. type: str aliases: [importer_ssl_ca_cert] feed_client_cert: description: - - Certificate used as the client certificate when synchronizing the repository. This is used to communicate authentication information to - the feed source. The value to this option must be the full path to the certificate. The specified file may be the certificate itself or - a single file containing both the certificate and private key. This can be the file content or the path to the file. + - Certificate used as the client certificate when synchronizing the repository. This is used to communicate authentication + information to the feed source. The value to this option must be the full path to the certificate. The specified file + may be the certificate itself or a single file containing both the certificate and private key. This can be the file + content or the path to the file. type: str aliases: [importer_ssl_client_cert] feed_client_key: description: - - Private key to the certificate specified in O(feed_client_cert), assuming it is not included in the certificate file itself. This can - be the file content or the path to the file. + - Private key to the certificate specified in O(feed_client_cert), assuming it is not included in the certificate file + itself. This can be the file content or the path to the file. type: str aliases: [importer_ssl_client_key] name: @@ -71,7 +73,7 @@ options: aliases: [repo] proxy_host: description: - - Proxy url setting for the pulp repository importer. This is in the format scheme://host. + - Proxy URL setting for the pulp repository importer. This is in the format V(scheme://host). required: false default: type: str @@ -129,21 +131,22 @@ options: default: true state: description: - - The repo state. A state of V(sync) will queue a sync of the repo. This is asynchronous but not delayed like a scheduled sync. A state - of V(publish) will use the repository's distributor to publish the content. + - The repo state. A state of V(sync) queues a sync of the repo. This is asynchronous but not delayed like a scheduled + sync. A state of V(publish) uses the repository's distributor to publish the content. default: present choices: ["present", "absent", "sync", "publish"] type: str url_password: description: - - The password for use in HTTP basic authentication to the pulp API. If the O(url_username) parameter is not specified, the O(url_password) - parameter will not be used. + - The password for use in HTTP basic authentication to the pulp API. If the O(url_username) parameter is not specified, + the O(url_password) parameter is not used. url_username: description: - The username for use in HTTP basic authentication to the pulp API. validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool default: true wait_for_completion: @@ -152,7 +155,8 @@ options: type: bool default: false notes: - - This module can currently only create distributors and importers on rpm repositories. Contributions to support other repo types are welcome. + - This module can currently only create distributors and importers on rpm repositories. Contributions to support other repo + types are welcome. extends_documentation_fragment: - ansible.builtin.url - community.general.attributes @@ -579,29 +583,20 @@ def main(): if importer_ssl_ca_cert is not None: importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert) if os.path.isfile(importer_ssl_ca_cert_file_path): - importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r') - try: + with open(importer_ssl_ca_cert_file_path, 'r') as importer_ssl_ca_cert_file_object: importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read() - finally: - importer_ssl_ca_cert_file_object.close() if importer_ssl_client_cert is not None: importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert) if os.path.isfile(importer_ssl_client_cert_file_path): - importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r') - try: + with open(importer_ssl_client_cert_file_path, 'r') as importer_ssl_client_cert_file_object: importer_ssl_client_cert = importer_ssl_client_cert_file_object.read() - finally: - importer_ssl_client_cert_file_object.close() if importer_ssl_client_key is not None: importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key) if os.path.isfile(importer_ssl_client_key_file_path): - importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r') - try: + with open(importer_ssl_client_key_file_path, 'r') as importer_ssl_client_key_file_object: importer_ssl_client_key = importer_ssl_client_key_file_object.read() - finally: - importer_ssl_client_key_file_object.close() server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion) server.set_repo_list() diff --git a/plugins/modules/puppet.py b/plugins/modules/puppet.py index 332ab17d17..a1ab66efc6 100644 --- a/plugins/modules/puppet.py +++ b/plugins/modules/puppet.py @@ -66,8 +66,8 @@ options: logdest: description: - Where the puppet logs should go, if puppet apply is being used. - - V(all) will go to both C(console) and C(syslog). - - V(stdout) will be deprecated and replaced by C(console). + - V(all) goes to both C(console) and C(syslog). + - V(stdout) is deprecated and replaced by C(console). type: str choices: [all, stdout, syslog] default: stdout @@ -103,7 +103,8 @@ options: waitforlock: description: - The maximum amount of time C(puppet) should wait for an already running C(puppet) agent to finish before starting. - - If a number without unit is provided, it is assumed to be a number of seconds. Allowed units are V(m) for minutes and V(h) for hours. + - If a number without unit is provided, it is assumed to be a number of seconds. Allowed units are V(m) for minutes + and V(h) for hours. type: str version_added: 9.0.0 verbose: @@ -125,10 +126,10 @@ options: description: - The lang environment to use when running the puppet agent. - The default value, V(C), is supported on every system, but can lead to encoding errors if UTF-8 is used in the output. - - Use V(C.UTF-8) or V(en_US.UTF-8) or similar UTF-8 supporting locales in case of problems. You need to make sure the selected locale is - supported on the system the puppet agent runs on. - - Starting with community.general 9.1.0, you can use the value V(auto) and the module will try and determine the best parseable locale to - use. + - Use V(C.UTF-8) or V(en_US.UTF-8) or similar UTF-8 supporting locales in case of problems. You need to make sure the + selected locale is supported on the system the puppet agent runs on. + - Starting with community.general 9.1.0, you can use the value V(auto) and the module tries to determine the best parseable + locale to use. type: str default: C version_added: 8.6.0 diff --git a/plugins/modules/pushbullet.py b/plugins/modules/pushbullet.py index 32a659922a..990ac36525 100644 --- a/plugins/modules/pushbullet.py +++ b/plugins/modules/pushbullet.py @@ -32,7 +32,8 @@ options: channel: type: str description: - - The channel TAG you wish to broadcast a push notification, as seen on the "My Channels" > "Edit your channel" at Pushbullet page. + - The channel TAG you wish to broadcast a push notification, as seen on the "My Channels" > "Edit your channel" at Pushbullet + page. device: type: str description: @@ -80,7 +81,7 @@ EXAMPLES = r""" community.general.pushbullet: api_key: ABC123abc123ABC123abc123ABC123ab channel: my-awesome-channel - title: Broadcasting a message to the #my-awesome-channel folks + title: "Broadcasting a message to the #my-awesome-channel folks" - name: Sends a push notification with title and body to a channel community.general.pushbullet: @@ -113,12 +114,12 @@ def main(): module = AnsibleModule( argument_spec=dict( api_key=dict(type='str', required=True, no_log=True), - channel=dict(type='str', default=None), - device=dict(type='str', default=None), + channel=dict(type='str'), + device=dict(type='str'), push_type=dict(type='str', default="note", choices=['note', 'link']), title=dict(type='str', required=True), - body=dict(type='str', default=None), - url=dict(type='str', default=None), + body=dict(type='str'), + url=dict(type='str'), ), mutually_exclusive=( ['channel', 'device'], diff --git a/plugins/modules/pushover.py b/plugins/modules/pushover.py index ae57411531..dcfce34a06 100644 --- a/plugins/modules/pushover.py +++ b/plugins/modules/pushover.py @@ -15,7 +15,7 @@ short_description: Send notifications through U(https://pushover.net) description: - Send notifications through pushover to subscriber list of devices and email addresses. Requires pushover app on devices. notes: - - You will require a pushover.net account to use this module. But no account is required to receive messages. + - You need a pushover.net account to use this module. But no account is required to receive messages. extends_documentation_fragment: - community.general.attributes attributes: @@ -140,7 +140,7 @@ def main(): msg=dict(required=True), app_token=dict(required=True, no_log=True), user_key=dict(required=True, no_log=True), - pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']), + pri=dict(default='0', choices=['-2', '-1', '0', '1', '2']), device=dict(type='str'), ), ) diff --git a/plugins/modules/python_requirements_info.py b/plugins/modules/python_requirements_info.py index 17432583e3..cbe93dd944 100644 --- a/plugins/modules/python_requirements_info.py +++ b/plugins/modules/python_requirements_info.py @@ -20,9 +20,10 @@ options: type: list elements: str description: - - 'A list of version-likes or module names to check for installation. Supported operators: C(<), C(>), C(<=), C(>=), or C(==).' - - The bare module name like V(ansible), the module with a specific version like V(boto3==1.6.1), - or a partial version like V(requests>2) are all valid specifications. + - 'A list of version-likes or module names to check for installation. Supported operators: C(<), C(>), C(<=), C(>=), + or C(==).' + - The bare module name like V(ansible), the module with a specific version like V(boto3==1.6.1), or a partial version + like V(requests>2) are all valid specifications. default: [] author: - Will Thames (@willthames) @@ -90,8 +91,8 @@ python_system_path: - /usr/local/opt/python@2/site-packages/ - /usr/lib/python/site-packages/ valid: - description: A dictionary of dependencies that matched their desired versions. If no version was specified, then RV(ignore:desired) will be - null. + description: A dictionary of dependencies that matched their desired versions. If no version was specified, then RV(ignore:desired) + is V(null). returned: always type: dict sample: diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py index edbbb18e5f..f8578488e9 100644 --- a/plugins/modules/redfish_command.py +++ b/plugins/modules/redfish_command.py @@ -18,6 +18,7 @@ description: - Manages system power ex. on, off, graceful and forced reboot. extends_documentation_fragment: - community.general.attributes + - community.general.redfish attributes: check_mode: support: none @@ -63,7 +64,8 @@ options: aliases: [account_id] description: - ID of account to delete/modify. - - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the POST request. + - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the + POST request. type: str new_username: required: false @@ -227,9 +229,9 @@ options: description: - Custom OEM properties for HTTP Multipart Push updates. - If set, then O(update_custom_oem_header) is required too. - - The properties will be passed raw without any validation or conversion by Ansible. This means the content can be a file, a string, or - any other data. If the content is a dict that should be converted to JSON, then the content must be converted to JSON before passing it - to this module using the P(ansible.builtin.to_json#filter) filter. + - The properties are passed raw without any validation or conversion by Ansible. This means the content can be a file, + a string, or any other data. If the content is a dictionary that should be converted to JSON, then the content must + be converted to JSON before passing it to this module using the P(ansible.builtin.to_json#filter) filter. type: raw version_added: '10.1.0' update_custom_oem_mime_type: @@ -323,15 +325,11 @@ options: default: 120 version_added: 9.1.0 ciphers: - required: false - description: - - SSL/TLS Ciphers to use for the request. - - When a list is provided, all ciphers are joined in order with V(:). - - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) for more details. - - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. - type: list - elements: str version_added: 9.2.0 + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 author: - "Jose Delarosa (@jose-delarosa)" @@ -832,27 +830,30 @@ return_values: returned: on success type: dict version_added: 6.1.0 - sample: { - "update_status": { - "handle": "/redfish/v1/TaskService/TaskMonitors/735", - "messages": [], - "resets_requested": [], - "ret": true, - "status": "New" + sample: + { + "update_status": { + "handle": "/redfish/v1/TaskService/TaskMonitors/735", + "messages": [], + "resets_requested": [], + "ret": true, + "status": "New" + } } - } """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native # More will be added as module features are expanded CATEGORY_COMMANDS_ALL = { "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart", - "PowerGracefulShutdown", "PowerReboot", "PowerCycle", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride", - "IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink", "VirtualMediaInsert", "VirtualMediaEject", "VerifyBiosAttributes"], + "PowerGracefulShutdown", "PowerReboot", "PowerCycle", "PowerFullPowerCycle", + "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride", + "IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink", "VirtualMediaInsert", + "VirtualMediaEject", "VerifyBiosAttributes"], "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"], "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser", "UpdateUserRole", "UpdateUserPassword", "UpdateUserName", @@ -869,67 +870,68 @@ CATEGORY_COMMANDS_ALL = { def main(): result = {} return_values = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - session_uri=dict(), - id=dict(aliases=["account_id"]), - new_username=dict(aliases=["account_username"]), - new_password=dict(aliases=["account_password"], no_log=True), - roleid=dict(aliases=["account_roleid"]), - account_types=dict(type='list', elements='str', aliases=["account_accounttypes"]), - oem_account_types=dict(type='list', elements='str', aliases=["account_oemaccounttypes"]), - update_username=dict(type='str', aliases=["account_updatename"]), - account_properties=dict(type='dict', default={}), - bootdevice=dict(), - timeout=dict(type='int', default=60), - uefi_target=dict(), - boot_next=dict(), - boot_override_mode=dict(choices=['Legacy', 'UEFI']), - resource_id=dict(), - update_image_uri=dict(), - update_image_file=dict(type='path'), - update_protocol=dict(), - update_targets=dict(type='list', elements='str', default=[]), - update_oem_params=dict(type='dict'), - update_custom_oem_header=dict(type='str'), - update_custom_oem_mime_type=dict(type='str'), - update_custom_oem_params=dict(type='raw'), - update_creds=dict( - type='dict', - options=dict( - username=dict(), - password=dict(no_log=True) - ) - ), - update_apply_time=dict(choices=['Immediate', 'OnReset', 'AtMaintenanceWindowStart', - 'InMaintenanceWindowOnReset', 'OnStartUpdateRequest']), - update_handle=dict(), - virtual_media=dict( - type='dict', - options=dict( - media_types=dict(type='list', elements='str', default=[]), - image_url=dict(), - inserted=dict(type='bool', default=True), - write_protected=dict(type='bool', default=True), - username=dict(), - password=dict(no_log=True), - transfer_protocol_type=dict(), - transfer_method=dict(), - ) - ), - strip_etag_quotes=dict(type='bool', default=False), - reset_to_defaults_mode=dict(choices=['ResetAll', 'PreserveNetworkAndUsers', 'PreserveNetwork']), - bios_attributes=dict(type="dict"), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=120), - ciphers=dict(type='list', elements='str'), + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + session_uri=dict(), + id=dict(aliases=["account_id"]), + new_username=dict(aliases=["account_username"]), + new_password=dict(aliases=["account_password"], no_log=True), + roleid=dict(aliases=["account_roleid"]), + account_types=dict(type='list', elements='str', aliases=["account_accounttypes"]), + oem_account_types=dict(type='list', elements='str', aliases=["account_oemaccounttypes"]), + update_username=dict(type='str', aliases=["account_updatename"]), + account_properties=dict(type='dict', default={}), + bootdevice=dict(), + timeout=dict(type='int', default=60), + uefi_target=dict(), + boot_next=dict(), + boot_override_mode=dict(choices=['Legacy', 'UEFI']), + resource_id=dict(), + update_image_uri=dict(), + update_image_file=dict(type='path'), + update_protocol=dict(), + update_targets=dict(type='list', elements='str', default=[]), + update_oem_params=dict(type='dict'), + update_custom_oem_header=dict(type='str'), + update_custom_oem_mime_type=dict(type='str'), + update_custom_oem_params=dict(type='raw'), + update_creds=dict( + type='dict', + options=dict( + username=dict(), + password=dict(no_log=True) + ) ), + update_apply_time=dict(choices=['Immediate', 'OnReset', 'AtMaintenanceWindowStart', + 'InMaintenanceWindowOnReset', 'OnStartUpdateRequest']), + update_handle=dict(), + virtual_media=dict( + type='dict', + options=dict( + media_types=dict(type='list', elements='str', default=[]), + image_url=dict(), + inserted=dict(type='bool', default=True), + write_protected=dict(type='bool', default=True), + username=dict(), + password=dict(no_log=True), + transfer_protocol_type=dict(), + transfer_method=dict(), + ) + ), + strip_etag_quotes=dict(type='bool', default=False), + reset_to_defaults_mode=dict(choices=['ResetAll', 'PreserveNetworkAndUsers', 'PreserveNetwork']), + bios_attributes=dict(type="dict"), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=120), + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, required_together=[ ('username', 'password'), ('update_custom_oem_header', 'update_custom_oem_params'), @@ -1002,14 +1004,10 @@ def main(): # BIOS Attributes options bios_attributes = module.params['bios_attributes'] - # ciphers - ciphers = module.params['ciphers'] - # Build root URI root_uri = "https://" + module.params['baseuri'] rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes, - ciphers=ciphers) + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py index e47597f73f..6eba0b0048 100644 --- a/plugins/modules/redfish_config.py +++ b/plugins/modules/redfish_config.py @@ -17,6 +17,7 @@ description: - Manages OOB controller configuration settings. extends_documentation_fragment: - community.general.attributes + - community.general.redfish attributes: check_mode: support: none @@ -136,7 +137,7 @@ options: storage_subsystem_id: required: false description: - - Id of the Storage Subsystem on which the volume is to be created. + - ID of the Storage Subsystem on which the volume is to be created. type: str default: '' version_added: '7.3.0' @@ -165,22 +166,27 @@ options: volume_details: required: false description: - - Setting dict of volume to be created. - - If C(CapacityBytes) key is not specified in this dictionary, the size of the volume will be determined by the Redfish service. It is possible - the size will not be the maximum available size. + - Setting dictionary of volume to be created. + - If C(CapacityBytes) key is not specified in this dictionary, the size of the volume is determined by the Redfish service. + It is possible the size is not the maximum available size. type: dict default: {} version_added: '7.5.0' - ciphers: - required: false + power_restore_policy: description: - - SSL/TLS Ciphers to use for the request. - - When a list is provided, all ciphers are joined in order with V(:). - - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) for more details. - - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. - type: list - elements: str + - The desired power state of the system when power is restored after a power loss. + type: str + choices: + - AlwaysOn + - AlwaysOff + - LastState + version_added: '10.5.0' + ciphers: version_added: 9.2.0 + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 author: - "Jose Delarosa (@jose-delarosa)" @@ -357,6 +363,15 @@ EXAMPLES = r""" Drives: - "/redfish/v1/Systems/1/Storage/DE00B000/Drives/1" +- name: Set PowerRestorePolicy + community.general.redfish_config: + category: Systems + command: SetPowerRestorePolicy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + power_restore_policy: "AlwaysOff" + - name: Set service identification to {{ service_id }} community.general.redfish_config: category: Manager @@ -376,14 +391,15 @@ msg: """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native # More will be added as module features are expanded CATEGORY_COMMANDS_ALL = { "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder", - "SetDefaultBootOrder", "EnableSecureBoot", "SetSecureBoot", "DeleteVolumes", "CreateVolume"], + "SetDefaultBootOrder", "EnableSecureBoot", "SetSecureBoot", "DeleteVolumes", "CreateVolume", + "SetPowerRestorePolicy"], "Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface", "SetServiceIdentification"], "Sessions": ["SetSessionService"], } @@ -391,39 +407,41 @@ CATEGORY_COMMANDS_ALL = { def main(): result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - bios_attributes=dict(type='dict', default={}), - timeout=dict(type='int', default=60), - boot_order=dict(type='list', elements='str', default=[]), - network_protocols=dict( - type='dict', - default={} - ), - resource_id=dict(), - service_id=dict(), - nic_addr=dict(default='null'), - nic_config=dict( - type='dict', - default={} - ), - strip_etag_quotes=dict(type='bool', default=False), - hostinterface_config=dict(type='dict', default={}), - hostinterface_id=dict(), - sessions_config=dict(type='dict', default={}), - storage_subsystem_id=dict(type='str', default=''), - storage_none_volume_deletion=dict(type='bool', default=False), - volume_ids=dict(type='list', default=[], elements='str'), - secure_boot_enable=dict(type='bool', default=True), - volume_details=dict(type='dict', default={}), - ciphers=dict(type='list', elements='str'), + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + bios_attributes=dict(type='dict', default={}), + timeout=dict(type='int', default=60), + boot_order=dict(type='list', elements='str', default=[]), + network_protocols=dict( + type='dict', + default={} ), + resource_id=dict(), + service_id=dict(), + nic_addr=dict(default='null'), + nic_config=dict( + type='dict', + default={} + ), + strip_etag_quotes=dict(type='bool', default=False), + hostinterface_config=dict(type='dict', default={}), + hostinterface_id=dict(), + sessions_config=dict(type='dict', default={}), + storage_subsystem_id=dict(type='str', default=''), + storage_none_volume_deletion=dict(type='bool', default=False), + volume_ids=dict(type='list', default=[], elements='str'), + secure_boot_enable=dict(type='bool', default=True), + volume_details=dict(type='dict', default={}), + power_restore_policy=dict(choices=['AlwaysOn', 'AlwaysOff', 'LastState']), + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, required_together=[ ('username', 'password'), ], @@ -487,14 +505,13 @@ def main(): storage_subsystem_id = module.params['storage_subsystem_id'] storage_none_volume_deletion = module.params['storage_none_volume_deletion'] - # ciphers - ciphers = module.params['ciphers'] + # Power Restore Policy + power_restore_policy = module.params['power_restore_policy'] # Build root URI root_uri = "https://" + module.params['baseuri'] rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes, - ciphers=ciphers) + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: @@ -530,6 +547,8 @@ def main(): result = rf_utils.delete_volumes(storage_subsystem_id, volume_ids) elif command == "CreateVolume": result = rf_utils.create_volume(volume_details, storage_subsystem_id, storage_none_volume_deletion) + elif command == "SetPowerRestorePolicy": + result = rf_utils.set_power_restore_policy(power_restore_policy) elif category == "Manager": # execute only if we find a Manager service resource diff --git a/plugins/modules/redfish_info.py b/plugins/modules/redfish_info.py index e4e909ad48..fb87bef8c6 100644 --- a/plugins/modules/redfish_info.py +++ b/plugins/modules/redfish_info.py @@ -17,6 +17,7 @@ description: extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module + - community.general.redfish attributes: check_mode: version_added: 3.3.0 @@ -71,15 +72,11 @@ options: type: str version_added: '6.1.0' ciphers: - required: false - description: - - SSL/TLS Ciphers to use for the request. - - When a list is provided, all ciphers are joined in order with V(:). - - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) for more details. - - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. - type: list - elements: str version_added: 9.2.0 + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 author: "Jose Delarosa (@jose-delarosa)" """ @@ -199,6 +196,14 @@ EXAMPLES = r""" username: "{{ username }}" password: "{{ password }}" +- name: Get configuration of the AccountService + community.general.redfish_info: + category: Accounts + command: GetAccountServiceConfig + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + - name: Get default system inventory and user information community.general.redfish_info: category: Systems,Accounts @@ -367,6 +372,14 @@ EXAMPLES = r""" username: "{{ username }}" password: "{{ password }}" +- name: Get power restore policy + community.general.redfish_info: + category: Systems + command: GetPowerRestorePolicy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + - name: Check the availability of the service with a timeout of 5 seconds community.general.redfish_info: category: Service @@ -387,16 +400,17 @@ result: """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC CATEGORY_COMMANDS_ALL = { "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory", "GetMemoryInventory", "GetNicInventory", "GetHealthReport", "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory", - "GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia", "GetBiosRegistries"], + "GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia", "GetBiosRegistries", + "GetPowerRestorePolicy"], "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower", "GetChassisThermals", "GetChassisInventory", "GetHealthReport", "GetHPEThermalConfig", "GetHPEFanPercentMin"], - "Accounts": ["ListUsers"], + "Accounts": ["ListUsers", "GetAccountServiceConfig"], "Sessions": ["GetSessions"], "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory", "GetUpdateStatus"], @@ -419,19 +433,20 @@ CATEGORY_COMMANDS_DEFAULT = { def main(): result = {} category_list = [] + argument_spec = dict( + category=dict(type='list', elements='str', default=['Systems']), + command=dict(type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=60), + update_handle=dict(), + manager=dict(), + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(type='list', elements='str', default=['Systems']), - command=dict(type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=60), - update_handle=dict(), - manager=dict(), - ciphers=dict(type='list', elements='str'), - ), + argument_spec, required_together=[ ('username', 'password'), ], @@ -458,12 +473,9 @@ def main(): # manager manager = module.params['manager'] - # ciphers - ciphers = module.params['ciphers'] - # Build root URI root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module, ciphers=ciphers) + rf_utils = RedfishUtils(creds, root_uri, timeout, module) # Build Category list if "all" in module.params['category']: @@ -535,6 +547,8 @@ def main(): result["virtual_media"] = rf_utils.get_multi_virtualmedia(category) elif command == "GetBiosRegistries": result["bios_registries"] = rf_utils.get_bios_registries() + elif command == "GetPowerRestorePolicy": + result["power_restore_policy"] = rf_utils.get_multi_power_restore_policy() elif category == "Chassis": # execute only if we find Chassis resource @@ -569,6 +583,8 @@ def main(): for command in command_list: if command == "ListUsers": result["user"] = rf_utils.list_users() + elif command == "GetAccountServiceConfig": + result["accountservice_config"] = rf_utils.get_accountservice_properties() elif category == "Update": # execute only if we find UpdateService resources diff --git a/plugins/modules/redhat_subscription.py b/plugins/modules/redhat_subscription.py index d9f791e5cf..165bfb2891 100644 --- a/plugins/modules/redhat_subscription.py +++ b/plugins/modules/redhat_subscription.py @@ -14,32 +14,27 @@ DOCUMENTATION = r""" module: redhat_subscription short_description: Manage registration and subscriptions to RHSM using C(subscription-manager) description: - - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command, - registering using D-Bus if possible. + - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) + command, registering using D-Bus if possible. author: "Barnaby Court (@barnabycourt)" notes: - - >- - The module tries to use the D-Bus C(rhsm) service (part of C(subscription-manager)) - to register, starting from community.general 6.5.0: this is done so credentials - (username, password, activation keys) can be passed to C(rhsm) in a secure way. - C(subscription-manager) itself gets credentials only as arguments of command line - parameters, which is I(not) secure, as they can be easily stolen by checking the - process listing on the system. Due to limitations of the D-Bus interface of C(rhsm), - the module will I(not) use D-Bus for registration when trying either to register - using O(token), or when specifying O(environment), or when the system is old - (typically RHEL 7 older than 7.4, RHEL 6, and older). - - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID. - - Since 2.5 values for O(server_hostname), O(server_insecure), O(rhsm_baseurl), - O(server_proxy_hostname), O(server_proxy_port), O(server_proxy_user) and - O(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf) - config file and default to V(null). - - It is possible to interact with C(subscription-manager) only as root, - so root permissions are required to successfully run this module. - - Since community.general 6.5.0, credentials (that is, O(username) and O(password), - O(activationkey), or O(token)) are needed only in case the the system is not registered, - or O(force_register) is specified; this makes it possible to use the module to tweak an - already registered system, for example attaching pools to it (using O(pool_ids)), - and modifying the C(syspurpose) attributes (using O(syspurpose)). + - 'The module tries to use the D-Bus C(rhsm) service (part of C(subscription-manager)) to register, starting from community.general + 6.5.0: this is done so credentials (username, password, activation keys) can be passed to C(rhsm) in a secure way. C(subscription-manager) + itself gets credentials only as arguments of command line parameters, which is I(not) secure, as they can be easily stolen + by checking the process listing on the system. Due to limitations of the D-Bus interface of C(rhsm), the module does I(not) + use D-Bus for registration when trying either to register using O(token), or when specifying O(environment), or when the + system is old (typically RHEL 7 older than 7.4, RHEL 6, and older).' + - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an + Organization ID. + - Since 2.5 values for O(server_hostname), O(server_insecure), O(rhsm_baseurl), O(server_proxy_hostname), O(server_proxy_port), + O(server_proxy_user) and O(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf) config file and + default to V(null). + - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully + run this module. + - Since community.general 6.5.0, credentials (that is, O(username) and O(password), O(activationkey), or O(token)) are needed + only in case the the system is not registered, or O(force_register) is specified; this makes it possible to use the module + to tweak an already registered system, for example attaching pools to it (using O(pool_ids)), and modifying the C(syspurpose) + attributes (using O(syspurpose)). requirements: - subscription-manager - Optionally the C(dbus) Python library; this is usually included in the OS as it is used by C(subscription-manager). @@ -120,7 +115,7 @@ options: auto_attach: description: - Upon successful registration, auto-consume available subscriptions. - - "Please note that the alias O(ignore:autosubscribe) was removed in community.general 9.0.0." + - Please note that the alias O(ignore:autosubscribe) was removed in community.general 9.0.0. type: bool activationkey: description: @@ -136,10 +131,10 @@ options: type: str pool_ids: description: - - "Specify subscription pool IDs to consume. - - A pool ID may be specified as a C(string) - just the pool ID (for example V(0123456789abcdef0123456789abcdef)), - or as a C(dict) with the pool ID as the key, and a quantity as the value (for example V(0123456789abcdef0123456789abcdef: 2). If the - quantity is provided, it is used to consume multiple entitlements from a pool (the pool must support this)." + - Specify subscription pool IDs to consume. + - 'A pool ID may be specified as a C(string) - just the pool ID (for example V(0123456789abcdef0123456789abcdef)), or + as a C(dict) with the pool ID as the key, and a quantity as the value (for example V(0123456789abcdef0123456789abcdef: + 2). If the quantity is provided, it is used to consume multiple entitlements from a pool (the pool must support this).' default: [] type: list elements: raw @@ -153,9 +148,9 @@ options: type: str consumer_id: description: - - "References an existing consumer ID to resume using a previous registration for this system. If the system's identity certificate is - lost or corrupted, this option allows it to resume using its previous identity and subscriptions. The default is to not specify a consumer - ID so a new ID is created." + - References an existing consumer ID to resume using a previous registration for this system. If the system's identity + certificate is lost or corrupted, this option allows it to resume using its previous identity and subscriptions. The + default is to not specify a consumer ID so a new ID is created. type: str force_register: description: @@ -168,10 +163,11 @@ options: type: str syspurpose: description: - - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json) and synchronize these attributes with RHSM server. Syspurpose - attributes help attach the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file already contains some - attributes, then new attributes overwrite existing attributes. When some attribute is not listed in the new list of attributes, the existing - attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored. + - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json) and synchronize these attributes with RHSM + server. Syspurpose attributes help attach the most appropriate subscriptions to the system automatically. When C(syspurpose.json) + file already contains some attributes, then new attributes overwrite existing attributes. When some attribute is not + listed in the new list of attributes, the existing attribute is removed from C(syspurpose.json) file. Unknown attributes + are ignored. type: dict suboptions: usage: @@ -189,8 +185,8 @@ options: elements: str sync: description: - - When this option is V(true), then syspurpose attributes are synchronized with RHSM server immediately. When this option is V(false), - then syspurpose attributes will be synchronized with RHSM server by rhsmcertd daemon. + - When this option is V(true), then syspurpose attributes are synchronized with RHSM server immediately. When this + option is V(false), then syspurpose attributes are synchronized with RHSM server by rhsmcertd daemon. type: bool default: false """ @@ -311,9 +307,8 @@ class Rhsm(object): else: cfg.set('main', 'enabled', '0') - fd = open(tmpfile, 'w+') - cfg.write(fd) - fd.close() + with open(tmpfile, 'w+') as fd: + cfg.write(fd) self.module.atomic_move(tmpfile, plugin_conf) def enable(self): @@ -547,6 +542,45 @@ class Rhsm(object): (distro_version[0] == 9 and distro_version[1] >= 2) or distro_version[0] > 9)): dbus_force_option_works = True + # We need to use the 'enable_content' D-Bus option to ensure that + # content is enabled; sadly the option is available depending on the + # version of the distro, and also depending on which API/method is used + # for registration. + dbus_has_enable_content_option = False + if activationkey: + def supports_enable_content_for_activation_keys(): + # subscription-manager in Fedora >= 41 has the new option. + if distro_id == 'fedora' and distro_version[0] >= 41: + return True + # Assume EL distros here. + if distro_version[0] >= 10: + return True + return False + dbus_has_enable_content_option = supports_enable_content_for_activation_keys() + else: + def supports_enable_content_for_credentials(): + # subscription-manager in any supported Fedora version + # has the new option. + if distro_id == 'fedora': + return True + # Check for RHEL 8 >= 8.6, or RHEL >= 9. + if distro_id == 'rhel' and \ + ((distro_version[0] == 8 and distro_version[1] >= 6) or + distro_version[0] >= 9): + return True + # CentOS: similar checks as for RHEL, with one extra bit: + # if the 2nd part of the version is empty, it means it is + # CentOS Stream, and thus we can assume it has the latest + # version of subscription-manager. + if distro_id == 'centos' and \ + ((distro_version[0] == 8 and + (distro_version[1] >= 6 or distro_version_parts[1] == '')) or + distro_version[0] >= 9): + return True + # Unknown or old distro: assume it does not support + # the new option. + return False + dbus_has_enable_content_option = supports_enable_content_for_credentials() if force_register and not dbus_force_option_works and was_registered: self.unregister() @@ -619,6 +653,8 @@ class Rhsm(object): register_opts[environment_key] = environment if force_register and dbus_force_option_works and was_registered: register_opts['force'] = True + if dbus_has_enable_content_option: + register_opts['enable_content'] = "1" # Wrap it as proper D-Bus dict register_opts = dbus.Dictionary(register_opts, signature='sv', variant_level=1) @@ -1122,7 +1158,6 @@ def main(): module.exit_json(changed=False, msg="System already unregistered.") else: try: - rhsm.unsubscribe() rhsm.unregister() except Exception as e: module.fail_json(msg="Failed to unregister: %s" % to_native(e)) diff --git a/plugins/modules/redis.py b/plugins/modules/redis.py index 716f5f1851..9a3d030f52 100644 --- a/plugins/modules/redis.py +++ b/plugins/modules/redis.py @@ -72,14 +72,15 @@ options: type: str value: description: - - A redis config value. When memory size is needed, it is possible to specify it in the usual form of 1KB, 2M, 400MB where the base is 1024. - Units are case insensitive, in other words 1m = 1mb = 1M = 1MB. + - A redis config value. When memory size is needed, it is possible to specify it in the usual form of 1KB, 2M, 400MB + where the base is 1024. Units are case insensitive, in other words 1m = 1mb = 1M = 1MB. type: str notes: - - Requires the C(redis-py) Python package on the remote host. You can install it with pip - (C(pip install redis)) or with a package manager. U(https://github.com/andymccurdy/redis-py). - - If the redis master instance you are making replica of is password protected this needs to be in the C(redis.conf) in the C(masterauth) variable. + - Requires the C(redis-py) Python package on the remote host. You can install it with pip (C(pip install redis)) or with + a package manager. U(https://github.com/andymccurdy/redis-py). + - If the redis master instance you are making replica of is password protected this needs to be in the C(redis.conf) in + the C(masterauth) variable. seealso: - module: community.general.redis_info requirements: [redis] diff --git a/plugins/modules/redis_data.py b/plugins/modules/redis_data.py index 03ae78dce3..4efe409b72 100644 --- a/plugins/modules/redis_data.py +++ b/plugins/modules/redis_data.py @@ -33,7 +33,7 @@ options: type: str expiration: description: - - Expiration time in milliseconds. Setting this flag will always result in a change in the database. + - Expiration time in milliseconds. Setting this option always results in a change in the database. required: false type: int non_existing: @@ -143,11 +143,11 @@ def main(): redis_auth_args = redis_auth_argument_spec() module_args = dict( key=dict(type='str', required=True, no_log=False), - value=dict(type='str', required=False), - expiration=dict(type='int', required=False), - non_existing=dict(type='bool', required=False), - existing=dict(type='bool', required=False), - keep_ttl=dict(type='bool', required=False), + value=dict(type='str'), + expiration=dict(type='int'), + non_existing=dict(type='bool'), + existing=dict(type='bool'), + keep_ttl=dict(type='bool'), state=dict(type='str', default='present', choices=['present', 'absent']), ) diff --git a/plugins/modules/redis_data_incr.py b/plugins/modules/redis_data_incr.py index 0f0aa92974..d5f2fe05c2 100644 --- a/plugins/modules/redis_data_incr.py +++ b/plugins/modules/redis_data_incr.py @@ -20,10 +20,10 @@ attributes: check_mode: support: partial details: - - For C(check_mode) to work, the specified O(login_user) needs permission to run the C(GET) command on the key, otherwise the module will - fail. - - When using C(check_mode) the module will try to calculate the value that Redis would return. If the key is not present, V(0.0) is used as - value. + - For C(check_mode) to work, the specified O(login_user) needs permission to run the C(GET) command on the key, otherwise + the module fails. + - When using C(check_mode) the module tries to calculate the value that Redis would return. If the key is not present, + V(0.0) is used as value. diff_mode: support: none options: @@ -98,8 +98,8 @@ def main(): redis_auth_args = redis_auth_argument_spec() module_args = dict( key=dict(type='str', required=True, no_log=False), - increment_int=dict(type='int', required=False), - increment_float=dict(type='float', required=False), + increment_int=dict(type='int'), + increment_float=dict(type='float'), ) module_args.update(redis_auth_args) diff --git a/plugins/modules/redis_info.py b/plugins/modules/redis_info.py index bc43f9251e..1b38bc1bca 100644 --- a/plugins/modules/redis_info.py +++ b/plugins/modules/redis_info.py @@ -63,7 +63,8 @@ info: description: The default set of server information sections U(https://redis.io/commands/info). returned: success type: dict - sample: { + sample: + { "active_defrag_hits": 0, "active_defrag_key_hits": 0, "active_defrag_key_misses": 0, @@ -196,20 +197,21 @@ cluster: returned: success if O(cluster=true) version_added: 9.1.0 type: dict - sample: { - "cluster_state": ok, - "cluster_slots_assigned": 16384, - "cluster_slots_ok": 16384, - "cluster_slots_pfail": 0, - "cluster_slots_fail": 0, - "cluster_known_nodes": 6, - "cluster_size": 3, - "cluster_current_epoch": 6, - "cluster_my_epoch": 2, - "cluster_stats_messages_sent": 1483972, - "cluster_stats_messages_received": 1483968, - "total_cluster_links_buffer_limit_exceeded": 0 - } + sample: + { + "cluster_state": "ok", + "cluster_slots_assigned": 16384, + "cluster_slots_ok": 16384, + "cluster_slots_pfail": 0, + "cluster_slots_fail": 0, + "cluster_known_nodes": 6, + "cluster_size": 3, + "cluster_current_epoch": 6, + "cluster_my_epoch": 2, + "cluster_stats_messages_sent": 1483972, + "cluster_stats_messages_received": 1483968, + "total_cluster_links_buffer_limit_exceeded": 0 + } """ import traceback diff --git a/plugins/modules/rhevm.py b/plugins/modules/rhevm.py index 4d0a810108..7536b7843a 100644 --- a/plugins/modules/rhevm.py +++ b/plugins/modules/rhevm.py @@ -153,67 +153,68 @@ options: RETURN = r""" vm: - description: Returns all of the VMs variables and execution. - returned: always - type: dict - sample: { - "boot_order": [ - "hd", - "network" - ], - "changed": true, - "changes": [ - "Delete Protection" - ], - "cluster": "C1", - "cpu_share": "0", - "created": false, - "datacenter": "Default", - "del_prot": true, - "disks": [ - { - "domain": "ssd-san", - "name": "OS", - "size": 40 - } - ], - "eth0": "00:00:5E:00:53:00", - "eth1": "00:00:5E:00:53:01", - "eth2": "00:00:5E:00:53:02", - "exists": true, - "failed": false, - "ifaces": [ - { - "name": "eth0", - "vlan": "Management" - }, - { - "name": "eth1", - "vlan": "Internal" - }, - { - "name": "eth2", - "vlan": "External" - } - ], - "image": false, - "mempol": "0", - "msg": [ - "VM exists", - "cpu_share was already set to 0", - "VM high availability was already set to True", - "The boot order has already been set", - "VM delete protection has been set to True", - "Disk web2_Disk0_OS already exists", - "The VM starting host was already set to host416" - ], - "name": "web2", - "type": "server", - "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b", - "vm_ha": true, - "vmcpu": "4", - "vmhost": "host416", - "vmmem": "16" + description: Returns all of the VMs variables and execution. + returned: always + type: dict + sample: + { + "boot_order": [ + "hd", + "network" + ], + "changed": true, + "changes": [ + "Delete Protection" + ], + "cluster": "C1", + "cpu_share": "0", + "created": false, + "datacenter": "Default", + "del_prot": true, + "disks": [ + { + "domain": "ssd-san", + "name": "OS", + "size": 40 + } + ], + "eth0": "00:00:5E:00:53:00", + "eth1": "00:00:5E:00:53:01", + "eth2": "00:00:5E:00:53:02", + "exists": true, + "failed": false, + "ifaces": [ + { + "name": "eth0", + "vlan": "Management" + }, + { + "name": "eth1", + "vlan": "Internal" + }, + { + "name": "eth2", + "vlan": "External" + } + ], + "image": false, + "mempol": "0", + "msg": [ + "VM exists", + "cpu_share was already set to 0", + "VM high availability was already set to True", + "The boot order has already been set", + "VM delete protection has been set to True", + "Disk web2_Disk0_OS already exists", + "The VM starting host was already set to host416" + ], + "name": "web2", + "type": "server", + "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b", + "vm_ha": true, + "vmcpu": "4", + "vmhost": "host416", + "vmmem": "16" } """ @@ -811,7 +812,7 @@ class RHEVConn(object): setChanged() HOST = self.get_Host(host_name) state = HOST.status.state - while (state != 'non_operational' and state != 'up'): + while state != 'non_operational' and state != 'up': HOST = self.get_Host(host_name) state = HOST.status.state time.sleep(1) diff --git a/plugins/modules/rhsm_release.py b/plugins/modules/rhsm_release.py index ca3a0d03d9..665c734849 100644 --- a/plugins/modules/rhsm_release.py +++ b/plugins/modules/rhsm_release.py @@ -14,9 +14,10 @@ short_description: Set or Unset RHSM Release version description: - Sets or unsets the release version used by RHSM repositories. notes: - - This module will fail on an unregistered system. Use the M(community.general.redhat_subscription) module to register a system prior to setting - the RHSM release. - - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully run this module. + - This module fails on an unregistered system. Use the M(community.general.redhat_subscription) module to register a system + prior to setting the RHSM release. + - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully + run this module. requirements: - Red Hat Enterprise Linux 6+ with subscription-manager installed extends_documentation_fragment: diff --git a/plugins/modules/rhsm_repository.py b/plugins/modules/rhsm_repository.py index 3e95e69425..e59fcd27b4 100644 --- a/plugins/modules/rhsm_repository.py +++ b/plugins/modules/rhsm_repository.py @@ -12,12 +12,14 @@ DOCUMENTATION = r""" module: rhsm_repository short_description: Manage RHSM repositories using the subscription-manager command description: - - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command. + - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) + command. author: Giovanni Sciortino (@giovannisciortino) notes: - In order to manage RHSM repositories the system must be already registered to RHSM manually or using the Ansible M(community.general.redhat_subscription) module. - - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully run this module. + - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully + run this module. requirements: - subscription-manager extends_documentation_fragment: @@ -31,7 +33,8 @@ options: state: description: - If state is equal to present or disabled, indicates the desired repository state. - - In community.general 10.0.0 the states V(present) and V(absent) have been removed. Please use V(enabled) and V(disabled) instead. + - In community.general 10.0.0 the states V(present) and V(absent) have been removed. Please use V(enabled) and V(disabled) + instead. choices: [enabled, disabled] default: "enabled" type: str @@ -44,8 +47,8 @@ options: elements: str purge: description: - - Disable all currently enabled repositories that are not not specified in O(name). Only set this to V(true) if passing in a list of repositories - to the O(name) field. Using this with C(loop) will most likely not have the desired result. + - Disable all currently enabled repositories that are not not specified in O(name). Only set this to V(true) if passing + in a list of repositories to the O(name) field. Using this with C(loop) is likely not to have the desired result. type: bool default: false """ diff --git a/plugins/modules/riak.py b/plugins/modules/riak.py index d19b17eec5..2009ca0a22 100644 --- a/plugins/modules/riak.py +++ b/plugins/modules/riak.py @@ -62,7 +62,8 @@ options: type: str validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool default: true """ @@ -102,15 +103,13 @@ def main(): module = AnsibleModule( argument_spec=dict( - command=dict(required=False, default=None, choices=[ - 'ping', 'kv_test', 'join', 'plan', 'commit']), + command=dict(choices=['ping', 'kv_test', 'join', 'plan', 'commit']), config_dir=dict(default='/etc/riak', type='path'), - http_conn=dict(required=False, default='127.0.0.1:8098'), - target_node=dict(default='riak@127.0.0.1', required=False), + http_conn=dict(default='127.0.0.1:8098'), + target_node=dict(default='riak@127.0.0.1'), wait_for_handoffs=dict(default=0, type='int'), wait_for_ring=dict(default=0, type='int'), - wait_for_service=dict( - required=False, default=None, choices=['kv']), + wait_for_service=dict(choices=['kv']), validate_certs=dict(default=True, type='bool')) ) diff --git a/plugins/modules/rocketchat.py b/plugins/modules/rocketchat.py index 82de3f829f..6da9b36e8d 100644 --- a/plugins/modules/rocketchat.py +++ b/plugins/modules/rocketchat.py @@ -34,7 +34,8 @@ options: token: type: str description: - - Rocket Chat Incoming Webhook integration token. This provides authentication to Rocket Chat's Incoming webhook for posting messages. + - Rocket Chat Incoming Webhook integration token. This provides authentication to Rocket Chat's Incoming webhook for + posting messages. required: true protocol: type: str @@ -51,8 +52,8 @@ options: channel: type: str description: - - Channel to send the message to. If absent, the message goes to the channel selected for the O(token) specified during the creation of - webhook. + - Channel to send the message to. If absent, the message goes to the channel selected for the O(token) specified during + the creation of webhook. username: type: str description: @@ -68,7 +69,7 @@ options: description: - Emoji for the message sender. The representation for the available emojis can be got from Rocket Chat. - For example V(:thumbsup:). - - If O(icon_emoji) is set, O(icon_url) will not be used. + - If O(icon_emoji) is set, O(icon_url) is not used. link_names: type: int description: @@ -79,13 +80,15 @@ options: - 0 validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool default: true color: type: str description: - - Allow text to use default colors - use the default of V(normal) to not send a custom color bar at the start of the message. + - Allow text to use default colors - use the default of V(normal) to not send a custom color bar at the start of the + message. default: 'normal' choices: - 'normal' @@ -97,6 +100,15 @@ options: elements: dict description: - Define a list of attachments. + is_pre740: + description: + - If V(true), the payload matches Rocket.Chat prior to 7.4.0 format. This format has been used by the module since its + inception, but is no longer supported by Rocket.Chat 7.4.0. + - The default value of the option is going to change to V(false) eventually. + - This parameter is going to be removed in a future release when Rocket.Chat 7.4.0 becomes the minimum supported version. + type: bool + default: true + version_added: 10.5.0 """ EXAMPLES = r""" @@ -112,13 +124,14 @@ EXAMPLES = r""" domain: chat.example.com token: thetoken/generatedby/rocketchat msg: '{{ inventory_hostname }} completed' - channel: #ansible + channel: "#ansible" username: 'Ansible on {{ inventory_hostname }}' icon_url: http://www.example.com/some-image-file.png link_names: 0 delegate_to: localhost -- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat +- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured + in rocketchat community.general.rocketchat: token: thetoken/generatedby/rocketchat domain: chat.example.com @@ -134,7 +147,7 @@ EXAMPLES = r""" domain: chat.example.com attachments: - text: Display my system load on host A and B - color: #ff00dd + color: "#ff00dd" title: System load fields: - title: System A @@ -146,13 +159,6 @@ EXAMPLES = r""" delegate_to: localhost """ -RETURN = r""" -changed: - description: A flag indicating if any change was made or not. - returned: success - type: bool - sample: false -""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url @@ -161,14 +167,14 @@ from ansible.module_utils.urls import fetch_url ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s' -def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments): +def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740): payload = {} if color == "normal" and text is not None: payload = dict(text=text) elif text is not None: payload = dict(attachments=[dict(text=text, color=color)]) if channel is not None: - if (channel[0] == '#') or (channel[0] == '@'): + if channel[0] == '#' or channel[0] == '@': payload['channel'] = channel else: payload['channel'] = '#' + channel @@ -191,7 +197,9 @@ def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon attachment['fallback'] = attachment['text'] payload['attachments'].append(attachment) - payload = "payload=" + module.jsonify(payload) + payload = module.jsonify(payload) + if is_pre740: + payload = "payload=" + payload return payload @@ -213,7 +221,7 @@ def main(): domain=dict(type='str', required=True), token=dict(type='str', required=True, no_log=True), protocol=dict(type='str', default='https', choices=['http', 'https']), - msg=dict(type='str', required=False), + msg=dict(type='str'), channel=dict(type='str'), username=dict(type='str', default='Ansible'), icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), @@ -221,7 +229,8 @@ def main(): link_names=dict(type='int', default=1, choices=[0, 1]), validate_certs=dict(default=True, type='bool'), color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']), - attachments=dict(type='list', elements='dict', required=False) + attachments=dict(type='list', elements='dict'), + is_pre740=dict(default=True, type='bool') ) ) @@ -236,8 +245,9 @@ def main(): link_names = module.params['link_names'] color = module.params['color'] attachments = module.params['attachments'] + is_pre740 = module.params['is_pre740'] - payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments) + payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740) do_notify_rocketchat(module, domain, token, protocol, payload) module.exit_json(msg="OK") diff --git a/plugins/modules/rollbar_deployment.py b/plugins/modules/rollbar_deployment.py index e9bfc239b0..e0cf4e31aa 100644 --- a/plugins/modules/rollbar_deployment.py +++ b/plugins/modules/rollbar_deployment.py @@ -61,8 +61,8 @@ options: default: 'https://api.rollbar.com/api/1/deploy/' validate_certs: description: - - If V(false), SSL certificates for the target url will not be validated. This should only be used on personally controlled sites using - self-signed certificates. + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. required: false default: true type: bool @@ -100,13 +100,10 @@ def main(): token=dict(required=True, no_log=True), environment=dict(required=True), revision=dict(required=True), - user=dict(required=False), - rollbar_user=dict(required=False), - comment=dict(required=False), - url=dict( - required=False, - default='https://api.rollbar.com/api/1/deploy/' - ), + user=dict(), + rollbar_user=dict(), + comment=dict(), + url=dict(default='https://api.rollbar.com/api/1/deploy/'), validate_certs=dict(default=True, type='bool'), ), supports_check_mode=True diff --git a/plugins/modules/rpm_ostree_pkg.py b/plugins/modules/rpm_ostree_pkg.py index db084c9091..01462b25f1 100644 --- a/plugins/modules/rpm_ostree_pkg.py +++ b/plugins/modules/rpm_ostree_pkg.py @@ -78,20 +78,10 @@ EXAMPLES = r""" register: rpm_ostree_pkg until: rpm_ostree_pkg is not failed retries: 10 - dealy: 30 + delay: 30 """ RETURN = r""" -rc: - description: Return code of rpm-ostree command. - returned: always - type: int - sample: 0 -changed: - description: State changes. - returned: always - type: bool - sample: true action: description: Action performed. returned: always @@ -101,17 +91,7 @@ packages: description: A list of packages specified. returned: always type: list - sample: ['nfs-utils'] -stdout: - description: Stdout of rpm-ostree command. - returned: always - type: str - sample: 'Staging deployment...done\n...' -stderr: - description: Stderr of rpm-ostree command. - returned: always - type: str - sample: '' + sample: ["nfs-utils"] cmd: description: Full command used for performed action. returned: always diff --git a/plugins/modules/rundeck_acl_policy.py b/plugins/modules/rundeck_acl_policy.py index aa22e6e6ea..0c089792b8 100644 --- a/plugins/modules/rundeck_acl_policy.py +++ b/plugins/modules/rundeck_acl_policy.py @@ -129,11 +129,18 @@ from ansible_collections.community.general.plugins.module_utils.rundeck import ( class RundeckACLManager: def __init__(self, module): self.module = module + if module.params.get("project"): + self.endpoint = "project/%s/acl/%s.aclpolicy" % ( + self.module.params["project"], + self.module.params["name"], + ) + else: + self.endpoint = "system/acl/%s.aclpolicy" % self.module.params["name"] def get_acl(self): resp, info = api_request( module=self.module, - endpoint="system/acl/%s.aclpolicy" % self.module.params["name"], + endpoint=self.endpoint, ) return resp @@ -147,7 +154,7 @@ class RundeckACLManager: resp, info = api_request( module=self.module, - endpoint="system/acl/%s.aclpolicy" % self.module.params["name"], + endpoint=self.endpoint, method="POST", data={"contents": self.module.params["policy"]}, ) @@ -171,7 +178,7 @@ class RundeckACLManager: resp, info = api_request( module=self.module, - endpoint="system/acl/%s.aclpolicy" % self.module.params["name"], + endpoint=self.endpoint, method="PUT", data={"contents": self.module.params["policy"]}, ) @@ -194,7 +201,7 @@ class RundeckACLManager: if not self.module.check_mode: api_request( module=self.module, - endpoint="system/acl/%s.aclpolicy" % self.module.params["name"], + endpoint=self.endpoint, method="DELETE", ) diff --git a/plugins/modules/rundeck_job_executions_info.py b/plugins/modules/rundeck_job_executions_info.py index 540c8c7788..77fb94c79d 100644 --- a/plugins/modules/rundeck_job_executions_info.py +++ b/plugins/modules/rundeck_job_executions_info.py @@ -80,46 +80,53 @@ paging: description: Maximum number of results per page. type: int returned: success - sample: {"count": 20, "total": 100, "offset": 0, "max": 20} + sample: + { + "count": 20, + "total": 100, + "offset": 0, + "max": 20 + } executions: - description: Job executions list. - returned: always - type: list - elements: dict - sample: [ - { - "id": 1, - "href": "https://rundeck.example.org/api/39/execution/1", - "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", - "status": "succeeded", - "project": "myproject", - "executionType": "user", - "user": "admin", - "date-started": { - "unixtime": 1633525515026, - "date": "2021-10-06T13:05:15Z" - }, - "date-ended": { - "unixtime": 1633525518386, - "date": "2021-10-06T13:05:18Z" - }, - "job": { - "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", - "averageDuration": 6381, - "name": "Test", - "group": "", - "project": "myproject", - "description": "", - "options": { - "exit_code": "0" - }, - "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", - "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" - }, - "description": "Plugin[com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowStep, nodeStep: false]", - "argstring": "-exit_code 0", - "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068" - } + description: Job executions list. + returned: always + type: list + elements: dict + sample: + [ + { + "id": 1, + "href": "https://rundeck.example.org/api/39/execution/1", + "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", + "status": "succeeded", + "project": "myproject", + "executionType": "user", + "user": "admin", + "date-started": { + "unixtime": 1633525515026, + "date": "2021-10-06T13:05:15Z" + }, + "date-ended": { + "unixtime": 1633525518386, + "date": "2021-10-06T13:05:18Z" + }, + "job": { + "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "averageDuration": 6381, + "name": "Test", + "group": "", + "project": "myproject", + "description": "", + "options": { + "exit_code": "0" + }, + "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" + }, + "description": "Plugin[com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowStep, nodeStep: false]", + "argstring": "-exit_code 0", + "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068" + } ] """ diff --git a/plugins/modules/rundeck_job_run.py b/plugins/modules/rundeck_job_run.py index f46b5ee432..1c5841b3c5 100644 --- a/plugins/modules/rundeck_job_run.py +++ b/plugins/modules/rundeck_job_run.py @@ -62,7 +62,7 @@ options: type: int description: - Job execution wait timeout in seconds. - - If the timeout is reached, the job will be aborted. + - If the timeout is reached, the job is aborted. - Keep in mind that there is a sleep based on O(wait_execution_delay) after each job status check. default: 120 abort_on_timeout: @@ -133,48 +133,49 @@ EXAMPLES = r""" RETURN = r""" execution_info: - description: Rundeck job execution metadata. - returned: always - type: dict - sample: { - "msg": "Job execution succeeded!", - "execution_info": { - "id": 1, - "href": "https://rundeck.example.org/api/39/execution/1", - "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", - "status": "succeeded", - "project": "myproject", - "executionType": "user", - "user": "admin", - "date-started": { - "unixtime": 1633449020784, - "date": "2021-10-05T15:50:20Z" - }, - "date-ended": { - "unixtime": 1633449026358, - "date": "2021-10-05T15:50:26Z" - }, - "job": { - "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", - "averageDuration": 4917, - "name": "Test", - "group": "", - "project": "myproject", - "description": "", - "options": { - "exit_code": "0" - }, - "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", - "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" - }, - "description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}", - "argstring": "-exit_code 0", - "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068", - "successfulNodes": [ - "localhost" - ], - "output": "Test!" - } + description: Rundeck job execution metadata. + returned: always + type: dict + sample: + { + "msg": "Job execution succeeded!", + "execution_info": { + "id": 1, + "href": "https://rundeck.example.org/api/39/execution/1", + "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", + "status": "succeeded", + "project": "myproject", + "executionType": "user", + "user": "admin", + "date-started": { + "unixtime": 1633449020784, + "date": "2021-10-05T15:50:20Z" + }, + "date-ended": { + "unixtime": 1633449026358, + "date": "2021-10-05T15:50:26Z" + }, + "job": { + "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "averageDuration": 4917, + "name": "Test", + "group": "", + "project": "myproject", + "description": "", + "options": { + "exit_code": "0" + }, + "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" + }, + "description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}", + "argstring": "-exit_code 0", + "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068", + "successfulNodes": [ + "localhost" + ], + "output": "Test!" + } } """ diff --git a/plugins/modules/runit.py b/plugins/modules/runit.py index 221b87b0dd..5a575fa2ba 100644 --- a/plugins/modules/runit.py +++ b/plugins/modules/runit.py @@ -30,9 +30,10 @@ options: required: true state: description: - - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. V(restarted) will always bounce the service - (sv restart) and V(killed) will always bounce the service (sv force-stop). V(reloaded) will send a HUP (sv reload). V(once) will run a - normally downed sv once (sv once), not really an idempotent operation. + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the service (sv restart) and V(killed) always bounces the service (sv force-stop). + - V(reloaded) always sends a HUP (sv reload). + - V(once) runs a normally downed sv once (sv once), not really an idempotent operation. type: str choices: [killed, once, reloaded, restarted, started, stopped] enabled: diff --git a/plugins/modules/say.py b/plugins/modules/say.py index 2dc359083d..eff582f125 100644 --- a/plugins/modules/say.py +++ b/plugins/modules/say.py @@ -17,7 +17,8 @@ description: notes: - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say). - If you like this module, you may also be interested in the osx_say callback plugin. - - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host. + - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on + a Linux host. extends_documentation_fragment: - community.general.attributes attributes: @@ -66,7 +67,7 @@ def main(): module = AnsibleModule( argument_spec=dict( msg=dict(required=True), - voice=dict(required=False), + voice=dict(), ), supports_check_mode=True ) diff --git a/plugins/modules/scaleway_compute.py b/plugins/modules/scaleway_compute.py index c61030bede..f3653cd3b6 100644 --- a/plugins/modules/scaleway_compute.py +++ b/plugins/modules/scaleway_compute.py @@ -133,7 +133,7 @@ options: type: str description: - Security group unique identifier. - - If no value provided, the default security group or current security group will be used. + - If no value provided, the default security group or current security group is used. required: false """ diff --git a/plugins/modules/scaleway_compute_private_network.py b/plugins/modules/scaleway_compute_private_network.py index 5339dfef15..a3b6c031f7 100644 --- a/plugins/modules/scaleway_compute_private_network.py +++ b/plugins/modules/scaleway_compute_private_network.py @@ -93,26 +93,26 @@ EXAMPLES = r""" RETURN = r""" scaleway_compute_private_network: - description: Information on the VPC. - returned: success when O(state=present) - type: dict - sample: - { - "created_at": "2022-01-15T11:11:12.676445Z", - "id": "12345678-f1e6-40ec-83e5-12345d67ed89", - "name": "network", - "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "tags": [ - "tag1", - "tag2", - "tag3", - "tag4", - "tag5" - ], - "updated_at": "2022-01-15T11:12:04.624837Z", - "zone": "fr-par-2" - } + description: Information on the VPC. + returned: success when O(state=present) + type: dict + sample: + { + "created_at": "2022-01-15T11:11:12.676445Z", + "id": "12345678-f1e6-40ec-83e5-12345d67ed89", + "name": "network", + "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "tags": [ + "tag1", + "tag2", + "tag3", + "tag4", + "tag5" + ], + "updated_at": "2022-01-15T11:12:04.624837Z", + "zone": "fr-par-2" + } """ from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway diff --git a/plugins/modules/scaleway_container.py b/plugins/modules/scaleway_container.py index 5cdd12da5d..8351660fd6 100644 --- a/plugins/modules/scaleway_container.py +++ b/plugins/modules/scaleway_container.py @@ -89,7 +89,7 @@ options: secret_environment_variables: description: - Secret environment variables of the container namespace. - - Updating those values will not output a C(changed) state in Ansible. + - Updating those values does not output a C(changed) state in Ansible. - Injected in container at runtime. type: dict default: {} @@ -108,8 +108,8 @@ options: privacy: description: - Privacy policies define whether a container can be executed anonymously. - - Choose V(public) to enable anonymous execution, or V(private) to protect your container with an authentication mechanism provided by the - Scaleway API. + - Choose V(public) to enable anonymous execution, or V(private) to protect your container with an authentication mechanism + provided by the Scaleway API. type: str default: public choices: @@ -125,7 +125,7 @@ options: max_concurrency: description: - Maximum number of connections per container. - - This parameter will be used to trigger autoscaling. + - This parameter is used to trigger autoscaling. type: int protocol: diff --git a/plugins/modules/scaleway_container_info.py b/plugins/modules/scaleway_container_info.py index 28cf40ac50..e17547735a 100644 --- a/plugins/modules/scaleway_container_info.py +++ b/plugins/modules/scaleway_container_info.py @@ -66,7 +66,7 @@ container: description: Container used for testing scaleway_container ansible module domain_name: cnansibletestgfogtjod-cn-ansible-test.functions.fnc.fr-par.scw.cloud environment_variables: - MY_VAR: my_value + MY_VAR: my_value error_message: null http_option: "" id: c9070eb0-d7a4-48dd-9af3-4fb139890721 diff --git a/plugins/modules/scaleway_container_namespace.py b/plugins/modules/scaleway_container_namespace.py index 802a491321..781c9ffc25 100644 --- a/plugins/modules/scaleway_container_namespace.py +++ b/plugins/modules/scaleway_container_namespace.py @@ -79,7 +79,7 @@ options: secret_environment_variables: description: - Secret environment variables of the container namespace. - - Updating those values will not output a C(changed) state in Ansible. + - Updating those values does not output a C(changed) state in Ansible. - Injected in containers at runtime. type: dict default: {} diff --git a/plugins/modules/scaleway_container_registry.py b/plugins/modules/scaleway_container_registry.py index 132dfe8bb6..4e352c5b9e 100644 --- a/plugins/modules/scaleway_container_registry.py +++ b/plugins/modules/scaleway_container_registry.py @@ -71,7 +71,7 @@ options: type: str description: - Default visibility policy. - - Everyone will be able to pull images from a V(public) registry. + - Everyone can pull images from a V(public) registry. choices: - public - private diff --git a/plugins/modules/scaleway_database_backup.py b/plugins/modules/scaleway_database_backup.py index b19a6b49bd..48add5dfc6 100644 --- a/plugins/modules/scaleway_database_backup.py +++ b/plugins/modules/scaleway_database_backup.py @@ -143,25 +143,26 @@ EXAMPLES = r""" RETURN = r""" metadata: - description: Backup metadata. - returned: when O(state=present), O(state=exported), or O(state=restored) - type: dict - sample: { - "metadata": { - "created_at": "2020-08-06T12:42:05.631049Z", - "database_name": "my-database", - "download_url": null, - "download_url_expires_at": null, - "expires_at": null, - "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07", - "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49", - "instance_name": "my-instance", - "name": "backup_name", - "region": "fr-par", - "size": 600000, - "status": "ready", - "updated_at": "2020-08-06T12:42:10.581649Z" - } + description: Backup metadata. + returned: when O(state=present), O(state=exported), or O(state=restored) + type: dict + sample: + { + "metadata": { + "created_at": "2020-08-06T12:42:05.631049Z", + "database_name": "my-database", + "download_url": null, + "download_url_expires_at": null, + "expires_at": null, + "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07", + "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49", + "instance_name": "my-instance", + "name": "backup_name", + "region": "fr-par", + "size": 600000, + "status": "ready", + "updated_at": "2020-08-06T12:42:10.581649Z" + } } """ @@ -353,8 +354,8 @@ def main(): region=dict(required=True, choices=SCALEWAY_REGIONS), id=dict(), name=dict(type='str'), - database_name=dict(required=False), - instance_id=dict(required=False), + database_name=dict(), + instance_id=dict(), expires_at=dict(), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300), diff --git a/plugins/modules/scaleway_function.py b/plugins/modules/scaleway_function.py index a5e81c37e4..4bc7c42688 100644 --- a/plugins/modules/scaleway_function.py +++ b/plugins/modules/scaleway_function.py @@ -89,7 +89,7 @@ options: secret_environment_variables: description: - Secret environment variables of the function. - - Updating those values will not output a C(changed) state in Ansible. + - Updating those values does not output a C(changed) state in Ansible. - Injected in function at runtime. type: dict default: {} @@ -97,7 +97,8 @@ options: runtime: description: - Runtime of the function. - - See U(https://www.scaleway.com/en/docs/compute/functions/reference-content/functions-lifecycle/) for all available runtimes. + - See U(https://www.scaleway.com/en/docs/compute/functions/reference-content/functions-lifecycle/) for all available + runtimes. type: str required: true @@ -120,8 +121,8 @@ options: privacy: description: - Privacy policies define whether a function can be executed anonymously. - - Choose V(public) to enable anonymous execution, or V(private) to protect your function with an authentication mechanism provided by the - Scaleway API. + - Choose V(public) to enable anonymous execution, or V(private) to protect your function with an authentication mechanism + provided by the Scaleway API. type: str default: public choices: diff --git a/plugins/modules/scaleway_function_namespace.py b/plugins/modules/scaleway_function_namespace.py index d43b42bc7f..e5e00bf681 100644 --- a/plugins/modules/scaleway_function_namespace.py +++ b/plugins/modules/scaleway_function_namespace.py @@ -79,7 +79,7 @@ options: secret_environment_variables: description: - Secret environment variables of the function namespace. - - Updating those values will not output a C(changed) state in Ansible. + - Updating those values does not output a C(changed) state in Ansible. - Injected in functions at runtime. type: dict default: {} diff --git a/plugins/modules/scaleway_image_info.py b/plugins/modules/scaleway_image_info.py index 0f6d1539c8..0b2fe0476d 100644 --- a/plugins/modules/scaleway_image_info.py +++ b/plugins/modules/scaleway_image_info.py @@ -57,37 +57,37 @@ scaleway_image_info: type: list elements: dict sample: - "scaleway_image_info": [ - { - "arch": "x86_64", - "creation_date": "2018-07-17T16:18:49.276456+00:00", - "default_bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": false, - "dtb": "", - "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.9.93 rev1" - }, - "extra_volumes": [], - "from_server": null, - "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0", - "modification_date": "2018-07-17T16:42:06.319315+00:00", - "name": "Debian Stretch", - "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", - "public": true, - "root_volume": { - "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd", - "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18", - "size": 25000000000, - "volume_type": "l_ssd" - }, - "state": "available" - } + [ + { + "arch": "x86_64", + "creation_date": "2018-07-17T16:18:49.276456+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": false, + "dtb": "", + "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.9.93 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0", + "modification_date": "2018-07-17T16:42:06.319315+00:00", + "name": "Debian Stretch", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd", + "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + } ] """ diff --git a/plugins/modules/scaleway_ip.py b/plugins/modules/scaleway_ip.py index 4fad2faf61..ce9977bbf0 100644 --- a/plugins/modules/scaleway_ip.py +++ b/plugins/modules/scaleway_ip.py @@ -61,12 +61,12 @@ options: id: type: str description: - - Id of the Scaleway IP (UUID). + - ID of the Scaleway IP (UUID). server: type: str description: - - Id of the server you want to attach an IP to. - - To unattach an IP don't specify this option. + - ID of the server you want to attach an IP to. + - To unattach an IP do not specify this option. reverse: type: str description: @@ -90,23 +90,24 @@ EXAMPLES = r""" RETURN = r""" data: - description: This is only present when O(state=present). - returned: when O(state=present) - type: dict - sample: { + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { "ips": [ { - "organization": "951df375-e094-4d26-97c1-ba548eeb9c42", - "reverse": null, - "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477", - "server": { - "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1", - "name": "ansible_tuto-1" - }, - "address": "212.47.232.136" + "organization": "951df375-e094-4d26-97c1-ba548eeb9c42", + "reverse": null, + "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477", + "server": { + "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1", + "name": "ansible_tuto-1" + }, + "address": "212.47.232.136" } - ] - } + ] + } """ from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway diff --git a/plugins/modules/scaleway_ip_info.py b/plugins/modules/scaleway_ip_info.py index b9475ab58c..0812746619 100644 --- a/plugins/modules/scaleway_ip_info.py +++ b/plugins/modules/scaleway_ip_info.py @@ -52,22 +52,22 @@ RETURN = r""" scaleway_ip_info: description: - Response from Scaleway API. - - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)' + - For more details please refer to U(https://developers.scaleway.com/en/products/instance/api/). returned: success type: list elements: dict sample: - "scaleway_ip_info": [ - { - "address": "163.172.170.243", - "id": "ea081794-a581-8899-8451-386ddaf0a451", - "organization": "3f709602-5e6c-4619-b80c-e324324324af", - "reverse": null, - "server": { - "id": "12f19bc7-109c-4517-954c-e6b3d0311363", - "name": "scw-e0d158" - } + [ + { + "address": "163.172.170.243", + "id": "ea081794-a581-8899-8451-386ddaf0a451", + "organization": "3f709602-5e6c-4619-b80c-e324324324af", + "reverse": null, + "server": { + "id": "12f19bc7-109c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" } + } ] """ diff --git a/plugins/modules/scaleway_lb.py b/plugins/modules/scaleway_lb.py index 7e13c3843f..64a45c73ee 100644 --- a/plugins/modules/scaleway_lb.py +++ b/plugins/modules/scaleway_lb.py @@ -113,7 +113,7 @@ EXAMPLES = r""" region: fr-par """ -RETURNS = ''' +RETURNS = """ { "scaleway_lb": { "backend_count": 0, @@ -154,7 +154,7 @@ RETURNS = ''' ] } } -''' +""" import datetime import time diff --git a/plugins/modules/scaleway_organization_info.py b/plugins/modules/scaleway_organization_info.py index 603ab3cd4c..a28b290bbc 100644 --- a/plugins/modules/scaleway_organization_info.py +++ b/plugins/modules/scaleway_organization_info.py @@ -44,28 +44,28 @@ scaleway_organization_info: type: list elements: dict sample: - "scaleway_organization_info": [ - { - "address_city_name": "Paris", - "address_country_code": "FR", - "address_line1": "42 Rue de l'univers", - "address_line2": null, - "address_postal_code": "75042", - "address_subdivision_code": "FR-75", - "creation_date": "2018-08-06T13:43:28.508575+00:00", - "currency": "EUR", - "customer_class": "individual", - "id": "3f709602-5e6c-4619-b80c-e8432ferewtr", - "locale": "fr_FR", - "modification_date": "2018-08-06T14:56:41.401685+00:00", - "name": "James Bond", - "support_id": "694324", - "support_level": "basic", - "support_pin": "9324", - "users": [], - "vat_number": null, - "warnings": [] - } + [ + { + "address_city_name": "Paris", + "address_country_code": "FR", + "address_line1": "42 Rue de l'univers", + "address_line2": null, + "address_postal_code": "75042", + "address_subdivision_code": "FR-75", + "creation_date": "2018-08-06T13:43:28.508575+00:00", + "currency": "EUR", + "customer_class": "individual", + "id": "3f709602-5e6c-4619-b80c-e8432ferewtr", + "locale": "fr_FR", + "modification_date": "2018-08-06T14:56:41.401685+00:00", + "name": "James Bond", + "support_id": "694324", + "support_level": "basic", + "support_pin": "9324", + "users": [], + "vat_number": null, + "warnings": [] + } ] """ diff --git a/plugins/modules/scaleway_private_network.py b/plugins/modules/scaleway_private_network.py index 922a780098..edd5d435cb 100644 --- a/plugins/modules/scaleway_private_network.py +++ b/plugins/modules/scaleway_private_network.py @@ -89,26 +89,26 @@ EXAMPLES = r""" RETURN = r""" scaleway_private_network: - description: Information on the VPC. - returned: success when O(state=present) - type: dict - sample: - { - "created_at": "2022-01-15T11:11:12.676445Z", - "id": "12345678-f1e6-40ec-83e5-12345d67ed89", - "name": "network", - "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "tags": [ - "tag1", - "tag2", - "tag3", - "tag4", - "tag5" - ], - "updated_at": "2022-01-15T11:12:04.624837Z", - "zone": "fr-par-2" - } + description: Information on the VPC. + returned: success when O(state=present) + type: dict + sample: + { + "created_at": "2022-01-15T11:11:12.676445Z", + "id": "12345678-f1e6-40ec-83e5-12345d67ed89", + "name": "network", + "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "tags": [ + "tag1", + "tag2", + "tag3", + "tag4", + "tag5" + ], + "updated_at": "2022-01-15T11:12:04.624837Z", + "zone": "fr-par-2" + } """ from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway diff --git a/plugins/modules/scaleway_security_group.py b/plugins/modules/scaleway_security_group.py index 3e1a28275e..cb4e44c844 100644 --- a/plugins/modules/scaleway_security_group.py +++ b/plugins/modules/scaleway_security_group.py @@ -109,22 +109,23 @@ EXAMPLES = r""" RETURN = r""" data: - description: This is only present when O(state=present). - returned: when O(state=present) - type: dict - sample: { - "scaleway_security_group": { - "description": "my security group description", - "enable_default_security": true, - "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae", - "inbound_default_policy": "accept", - "name": "security_group", - "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9", - "organization_default": false, - "outbound_default_policy": "accept", - "servers": [], - "stateful": false - } + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { + "scaleway_security_group": { + "description": "my security group description", + "enable_default_security": true, + "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae", + "inbound_default_policy": "accept", + "name": "security_group", + "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9", + "organization_default": false, + "outbound_default_policy": "accept", + "servers": [], + "stateful": false + } } """ diff --git a/plugins/modules/scaleway_security_group_info.py b/plugins/modules/scaleway_security_group_info.py index 6664938e09..4cdb295282 100644 --- a/plugins/modules/scaleway_security_group_info.py +++ b/plugins/modules/scaleway_security_group_info.py @@ -56,21 +56,21 @@ scaleway_security_group_info: type: list elements: dict sample: - "scaleway_security_group_info": [ - { - "description": "test-ams", - "enable_default_security": true, - "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51", - "name": "test-ams", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "organization_default": false, - "servers": [ - { - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "name": "scw-e0d158" - } - ] - } + [ + { + "description": "test-ams", + "enable_default_security": true, + "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51", + "name": "test-ams", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "organization_default": false, + "servers": [ + { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" + } + ] + } ] """ diff --git a/plugins/modules/scaleway_security_group_rule.py b/plugins/modules/scaleway_security_group_rule.py index ec89d41f6c..f7f6304a26 100644 --- a/plugins/modules/scaleway_security_group_rule.py +++ b/plugins/modules/scaleway_security_group_rule.py @@ -116,21 +116,22 @@ EXAMPLES = r""" RETURN = r""" data: - description: This is only present when O(state=present). - returned: when O(state=present) - type: dict - sample: { - "scaleway_security_group_rule": { - "direction": "inbound", - "protocol": "TCP", - "ip_range": "0.0.0.0/0", - "dest_port_from": 80, - "action": "accept", - "position": 2, - "dest_port_to": null, - "editable": null, - "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" - } + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { + "scaleway_security_group_rule": { + "direction": "inbound", + "protocol": "TCP", + "ip_range": "0.0.0.0/0", + "dest_port_from": 80, + "action": "accept", + "position": 2, + "dest_port_to": null, + "editable": null, + "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" + } } """ diff --git a/plugins/modules/scaleway_server_info.py b/plugins/modules/scaleway_server_info.py index 39af47005e..327715d2db 100644 --- a/plugins/modules/scaleway_server_info.py +++ b/plugins/modules/scaleway_server_info.py @@ -57,103 +57,103 @@ scaleway_server_info: type: list elements: dict sample: - "scaleway_server_info": [ - { - "arch": "x86_64", - "boot_type": "local", - "bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": true, - "dtb": "", - "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.4.127 rev1" - }, - "commercial_type": "START1-XS", + [ + { + "arch": "x86_64", + "boot_type": "local", + "bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "commercial_type": "START1-XS", + "creation_date": "2018-08-14T21:36:56.271545+00:00", + "dynamic_ip_required": false, + "enable_ipv6": false, + "extra_networks": [], + "hostname": "scw-e0d256", + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "image": { + "arch": "x86_64", + "creation_date": "2018-04-26T12:42:21.619844+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "67375eb1-f14d-4f02-bb42-6119cecbde51", + "modification_date": "2018-04-26T12:49:07.573004+00:00", + "name": "Ubuntu Xenial", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "020b8d61-3867-4a0e-84a4-445c5393e05d", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + }, + "ipv6": null, + "location": { + "cluster_id": "5", + "hypervisor_id": "412", + "node_id": "2", + "platform_id": "13", + "zone_id": "par1" + }, + "maintenances": [], + "modification_date": "2018-08-14T21:37:28.630882+00:00", + "name": "scw-e0d256", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "private_ip": "10.14.222.131", + "protected": false, + "public_ip": { + "address": "163.172.170.197", + "dynamic": false, + "id": "ea081794-a581-4495-8451-386ddaf0a451" + }, + "security_group": { + "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e", + "name": "Default security group" + }, + "state": "running", + "state_detail": "booted", + "tags": [], + "volumes": { + "0": { "creation_date": "2018-08-14T21:36:56.271545+00:00", - "dynamic_ip_required": false, - "enable_ipv6": false, - "extra_networks": [], - "hostname": "scw-e0d256", - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "image": { - "arch": "x86_64", - "creation_date": "2018-04-26T12:42:21.619844+00:00", - "default_bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": true, - "dtb": "", - "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.4.127 rev1" - }, - "extra_volumes": [], - "from_server": null, - "id": "67375eb1-f14d-4f02-bb42-6119cecbde51", - "modification_date": "2018-04-26T12:49:07.573004+00:00", - "name": "Ubuntu Xenial", - "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", - "public": true, - "root_volume": { - "id": "020b8d61-3867-4a0e-84a4-445c5393e05d", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", - "size": 25000000000, - "volume_type": "l_ssd" - }, - "state": "available" - }, - "ipv6": null, - "location": { - "cluster_id": "5", - "hypervisor_id": "412", - "node_id": "2", - "platform_id": "13", - "zone_id": "par1" - }, - "maintenances": [], - "modification_date": "2018-08-14T21:37:28.630882+00:00", - "name": "scw-e0d256", + "export_uri": "device://dev/vda", + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "modification_date": "2018-08-14T21:36:56.271545+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "private_ip": "10.14.222.131", - "protected": false, - "public_ip": { - "address": "163.172.170.197", - "dynamic": false, - "id": "ea081794-a581-4495-8451-386ddaf0a451" + "server": { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d256" }, - "security_group": { - "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e", - "name": "Default security group" - }, - "state": "running", - "state_detail": "booted", - "tags": [], - "volumes": { - "0": { - "creation_date": "2018-08-14T21:36:56.271545+00:00", - "export_uri": "device://dev/vda", - "id": "68386fae-4f55-4fbf-aabb-953036a85872", - "modification_date": "2018-08-14T21:36:56.271545+00:00", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "server": { - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "name": "scw-e0d256" - }, - "size": 25000000000, - "state": "available", - "volume_type": "l_ssd" - } - } + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" + } } + } ] """ diff --git a/plugins/modules/scaleway_snapshot_info.py b/plugins/modules/scaleway_snapshot_info.py index 6b932cced2..ead1826aa4 100644 --- a/plugins/modules/scaleway_snapshot_info.py +++ b/plugins/modules/scaleway_snapshot_info.py @@ -57,20 +57,20 @@ scaleway_snapshot_info: type: list elements: dict sample: - "scaleway_snapshot_info": [ + [ { - "base_volume": { - "id": "68386fae-4f55-4fbf-aabb-953036a85872", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42" - }, - "creation_date": "2018-08-14T22:34:35.299461+00:00", - "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2", - "modification_date": "2018-08-14T22:34:54.520560+00:00", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "size": 25000000000, - "state": "available", - "volume_type": "l_ssd" + "base_volume": { + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42" + }, + "creation_date": "2018-08-14T22:34:35.299461+00:00", + "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2", + "modification_date": "2018-08-14T22:34:54.520560+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" } ] """ diff --git a/plugins/modules/scaleway_sshkey.py b/plugins/modules/scaleway_sshkey.py index 37e8ec8c3b..a8ccc155e1 100644 --- a/plugins/modules/scaleway_sshkey.py +++ b/plugins/modules/scaleway_sshkey.py @@ -71,13 +71,16 @@ EXAMPLES = r""" RETURN = r""" data: - description: This is only present when O(state=present). - returned: when O(state=present) - type: dict - sample: { - "ssh_public_keys": [ - {"key": "ssh-rsa AAAA...."} - ] + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { + "ssh_public_keys": [ + { + "key": "ssh-rsa AAAA...." + } + ] } """ diff --git a/plugins/modules/scaleway_volume.py b/plugins/modules/scaleway_volume.py index ed6a506742..c7c6346075 100644 --- a/plugins/modules/scaleway_volume.py +++ b/plugins/modules/scaleway_volume.py @@ -95,10 +95,11 @@ EXAMPLES = r""" RETURN = r""" data: - description: This is only present when O(state=present). - returned: when O(state=present) - type: dict - sample: { + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { "volume": { "export_uri": null, "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd", @@ -107,8 +108,8 @@ data: "server": null, "size": 10000000000, "volume_type": "l_ssd" - } -} + } + } """ from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway diff --git a/plugins/modules/scaleway_volume_info.py b/plugins/modules/scaleway_volume_info.py index 1b2e95f88c..8a4986a724 100644 --- a/plugins/modules/scaleway_volume_info.py +++ b/plugins/modules/scaleway_volume_info.py @@ -57,19 +57,19 @@ scaleway_volume_info: type: list elements: dict sample: - "scaleway_volume_info": [ - { - "creation_date": "2018-08-14T20:56:24.949660+00:00", - "export_uri": null, - "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba", - "modification_date": "2018-08-14T20:56:24.949660+00:00", - "name": "test-volume", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "server": null, - "size": 50000000000, - "state": "available", - "volume_type": "l_ssd" - } + [ + { + "creation_date": "2018-08-14T20:56:24.949660+00:00", + "export_uri": null, + "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba", + "modification_date": "2018-08-14T20:56:24.949660+00:00", + "name": "test-volume", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "server": null, + "size": 50000000000, + "state": "available", + "volume_type": "l_ssd" + } ] """ diff --git a/plugins/modules/sefcontext.py b/plugins/modules/sefcontext.py index eeba491f5d..d1023d9d87 100644 --- a/plugins/modules/sefcontext.py +++ b/plugins/modules/sefcontext.py @@ -52,9 +52,10 @@ options: type: str substitute: description: - - Path to use to substitute file context(s) for the specified O(target). The context labeling for the O(target) subtree is made equivalent - to this path. - - This is also referred to as SELinux file context equivalence and it implements the C(equal) functionality of the SELinux management tools. + - Path to use to substitute file context(s) for the specified O(target). The context labeling for the O(target) subtree + is made equivalent to this path. + - This is also referred to as SELinux file context equivalence and it implements the C(equal) functionality of the SELinux + management tools. version_added: 6.4.0 type: str aliases: [equal] @@ -72,7 +73,8 @@ options: state: description: - Whether the SELinux file context must be V(absent) or V(present). - - Specifying V(absent) without either O(setype) or O(substitute) deletes both SELinux type or path substitution mappings that match O(target). + - Specifying V(absent) without either O(setype) or O(substitute) deletes both SELinux type or path substitution mappings + that match O(target). type: str choices: [absent, present] default: present @@ -91,10 +93,11 @@ notes: - The changes are persistent across reboots. - O(setype) and O(substitute) are mutually exclusive. - If O(state=present) then one of O(setype) or O(substitute) is mandatory. - - The M(community.general.sefcontext) module does not modify existing files to the new SELinux context(s), so it is advisable to first create - the SELinux file contexts before creating files, or run C(restorecon) manually for the existing files that require the new SELinux file contexts. - - Not applying SELinux fcontexts to existing files is a deliberate decision as it would be unclear what reported changes would entail to, and - there is no guarantee that applying SELinux fcontext does not pick up other unrelated prior changes. + - The M(community.general.sefcontext) module does not modify existing files to the new SELinux context(s), so it is advisable + to first create the SELinux file contexts before creating files, or run C(restorecon) manually for the existing files + that require the new SELinux file contexts. + - Not applying SELinux fcontexts to existing files is a deliberate decision as it would be unclear what reported changes + would entail to, and there is no guarantee that applying SELinux fcontext does not pick up other unrelated prior changes. requirements: - libselinux-python - policycoreutils-python diff --git a/plugins/modules/selinux_permissive.py b/plugins/modules/selinux_permissive.py index b5c0ee4a61..c6107309ac 100644 --- a/plugins/modules/selinux_permissive.py +++ b/plugins/modules/selinux_permissive.py @@ -24,7 +24,7 @@ attributes: options: domain: description: - - The domain that will be added or removed from the list of permissive domains. + - The domain that is added or removed from the list of permissive domains. type: str required: true aliases: [name] diff --git a/plugins/modules/selogin.py b/plugins/modules/selogin.py index 8f1b20c230..408d9221da 100644 --- a/plugins/modules/selogin.py +++ b/plugins/modules/selogin.py @@ -34,7 +34,8 @@ options: type: str aliases: [serange] description: - - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range. + - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user + record range. default: s0 state: type: str diff --git a/plugins/modules/sendgrid.py b/plugins/modules/sendgrid.py index e7418af1da..c0e4b239bc 100644 --- a/plugins/modules/sendgrid.py +++ b/plugins/modules/sendgrid.py @@ -15,9 +15,12 @@ short_description: Sends an email with the SendGrid API description: - Sends an email with a SendGrid account through their API, not through the SMTP service. notes: - - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that the module fails. - - Like the other notification modules, this one requires an external dependency to work. In this case, you will need an active SendGrid account. - - In order to use O(api_key), O(cc), O(bcc), O(attachments), O(from_name), O(html_body), and O(headers) you must C(pip install sendgrid). + - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that + the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you need an active + SendGrid account. + - In order to use O(api_key), O(cc), O(bcc), O(attachments), O(from_name), O(html_body), and O(headers) you must C(pip install + sendgrid). requirements: - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported) extends_documentation_fragment: @@ -79,7 +82,7 @@ options: - The name you want to appear in the from field, for example V(John Doe). html_body: description: - - Whether the body is html content that should be rendered. + - Whether the body is HTML content that should be rendered. type: bool default: false headers: @@ -211,19 +214,19 @@ def post_sendgrid_api(module, username, password, from_address, to_addresses, def main(): module = AnsibleModule( argument_spec=dict( - username=dict(required=False), - password=dict(required=False, no_log=True), - api_key=dict(required=False, no_log=True), - bcc=dict(required=False, type='list', elements='str'), - cc=dict(required=False, type='list', elements='str'), - headers=dict(required=False, type='dict'), + username=dict(), + password=dict(no_log=True), + api_key=dict(no_log=True), + bcc=dict(type='list', elements='str'), + cc=dict(type='list', elements='str'), + headers=dict(type='dict'), from_address=dict(required=True), - from_name=dict(required=False), + from_name=dict(), to_addresses=dict(required=True, type='list', elements='str'), subject=dict(required=True), body=dict(required=True), - html_body=dict(required=False, default=False, type='bool'), - attachments=dict(required=False, type='list', elements='path') + html_body=dict(default=False, type='bool'), + attachments=dict(type='list', elements='path') ), supports_check_mode=True, mutually_exclusive=[ diff --git a/plugins/modules/sensu_check.py b/plugins/modules/sensu_check.py index 42ffcf3737..a4b5771528 100644 --- a/plugins/modules/sensu_check.py +++ b/plugins/modules/sensu_check.py @@ -14,9 +14,9 @@ module: sensu_check short_description: Manage Sensu checks description: - Manage the checks that should be run on a machine by I(Sensu). - - Most options do not have a default and will not be added to the check definition unless specified. - - All defaults except O(path), O(state), O(backup) and O(metric) are not managed by this module, - they are simply specified for your convenience. + - Most options do not have a default and are not added to the check definition unless specified. + - All defaults except O(path), O(state), O(backup) and O(metric) are not managed by this module, they are simply specified + for your convenience. deprecated: removed_in: 13.0.0 why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. @@ -45,13 +45,13 @@ options: type: str description: - Path to the JSON file of the check to be added/removed. - - Will be created if it does not exist (unless O(state=absent)). - - The parent folders need to exist when O(state=present), otherwise an error will be thrown. + - It is created if it does not exist (unless O(state=absent)). + - The parent folders need to exist when O(state=present), otherwise an error is thrown. default: /etc/sensu/conf.d/checks.json backup: description: - - Create a backup file (if yes), including the timestamp information so - you can get the original file back if you somehow clobbered it incorrectly. + - Create a backup file (if yes), including the timestamp information so you can get the original file back if you somehow + clobbered it incorrectly. type: bool default: false command: @@ -99,7 +99,8 @@ options: type: list elements: str description: - - Other checks this check depends on, if dependencies fail handling of this check will be disabled. + - Other checks this one depends on. + - If dependencies fail handling of this check is disabled. metric: description: - Whether the check is a metric. @@ -128,8 +129,7 @@ options: - Number of seconds handlers should wait before taking second action. aggregate: description: - - Classifies the check as an aggregate check, - making it available using the aggregate API. + - Classifies the check as an aggregate check, making it available using the aggregate API. - Default is V(false). type: bool low_flap_threshold: diff --git a/plugins/modules/sensu_client.py b/plugins/modules/sensu_client.py index 50a5c0ae74..f87621bd6d 100644 --- a/plugins/modules/sensu_client.py +++ b/plugins/modules/sensu_client.py @@ -14,7 +14,7 @@ author: "David Moreau Simard (@dmsimard)" short_description: Manages Sensu client configuration description: - Manages Sensu client configuration. - - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)' + - For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/clients.html). deprecated: removed_in: 13.0.0 why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. @@ -42,7 +42,8 @@ options: type: str description: - An address to help identify and reach the client. This is only informational, usually an IP address or hostname. - - If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu). + - If not specified it defaults to non-loopback IPv4 address as determined by Ruby C(Socket.ip_address_list) (provided + by Sensu). subscriptions: type: list elements: str @@ -52,8 +53,8 @@ options: - The subscriptions array items must be strings. safe_mode: description: - - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the - check. + - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request + and execute the check. type: bool default: false redact: @@ -73,7 +74,8 @@ options: keepalive: type: dict description: - - The keepalive definition scope, used to configure Sensu client keepalives behavior (for example keepalive thresholds and so). + - The keepalive definition scope, used to configure Sensu client keepalives behavior (for example keepalive thresholds + and so). registration: type: dict description: @@ -102,7 +104,8 @@ options: servicenow: type: dict description: - - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only). + - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users + only). """ EXAMPLES = r""" @@ -155,7 +158,13 @@ config: description: Effective client configuration, when state is present. returned: success type: dict - sample: {'name': 'client', 'subscriptions': ['default']} + sample: + { + "name": "client", + "subscriptions": [ + "default" + ] + } file: description: Path to the client configuration file. returned: success diff --git a/plugins/modules/sensu_handler.py b/plugins/modules/sensu_handler.py index 995399cd39..5b5494bf1c 100644 --- a/plugins/modules/sensu_handler.py +++ b/plugins/modules/sensu_handler.py @@ -14,7 +14,7 @@ author: "David Moreau Simard (@dmsimard)" short_description: Manages Sensu handler configuration description: - Manages Sensu handler configuration. - - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)' + - For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/handlers.html). deprecated: removed_in: 13.0.0 why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. @@ -57,7 +57,7 @@ options: type: list elements: str description: - - An array of check result severities the handler will handle. + - An array of check result severities the handler handles. - 'NOTE: event resolution bypasses this filtering.' - "Example: [ 'warning', 'critical', 'unknown' ]." mutator: @@ -90,7 +90,8 @@ options: type: dict description: - The socket definition scope, used to configure the TCP/UDP handler socket. - - 'NOTE: the O(socket) attribute is only required for TCP/UDP handlers (that is, handlers configured with O(type=tcp) or O(type=udp)).' + - 'NOTE: the O(socket) attribute is only required for TCP/UDP handlers (that is, handlers configured with O(type=tcp) + or O(type=udp)).' pipe: type: dict description: @@ -154,7 +155,12 @@ config: description: Effective handler configuration, when state is present. returned: success type: dict - sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'} + sample: + { + "name": "irc", + "type": "pipe", + "command": "/usr/local/bin/notify-irc.sh" + } file: description: Path to the handler configuration file. returned: success diff --git a/plugins/modules/sensu_silence.py b/plugins/modules/sensu_silence.py index 1dfc8e1802..91e6f63496 100644 --- a/plugins/modules/sensu_silence.py +++ b/plugins/modules/sensu_silence.py @@ -38,15 +38,16 @@ options: expire: type: int description: - - If specified, the silence entry will be automatically cleared after this number of seconds. + - If specified, the silence entry is automatically cleared after this number of seconds. expire_on_resolve: description: - - If specified as true, the silence entry will be automatically cleared once the condition it is silencing is resolved. + - If specified as true, the silence entry is automatically cleared once the condition it is silencing is resolved. type: bool reason: type: str description: - - If specified, this free-form string is used to provide context or rationale for the reason this silence entry was created. + - If specified, this free-form string is used to provide context or rationale for the reason this silence entry was + created. state: type: str description: @@ -201,7 +202,7 @@ def create( expire_on_resolve, reason, subscription): (rc, out, changed) = query(module, url, check, subscription) for i in out: - if (i['subscription'] == subscription): + if i['subscription'] == subscription: if ( (check is None or check == i['check']) and ( @@ -264,14 +265,14 @@ def create( def main(): module = AnsibleModule( argument_spec=dict( - check=dict(required=False), - creator=dict(required=False), - expire=dict(type='int', required=False), - expire_on_resolve=dict(type='bool', required=False), - reason=dict(required=False), + check=dict(), + creator=dict(), + expire=dict(type='int'), + expire_on_resolve=dict(type='bool'), + reason=dict(), state=dict(default='present', choices=['present', 'absent']), subscription=dict(required=True), - url=dict(required=False, default='http://127.0.01:4567'), + url=dict(default='http://127.0.01:4567'), ), supports_check_mode=True ) diff --git a/plugins/modules/sensu_subscription.py b/plugins/modules/sensu_subscription.py index cddde2630b..da5c50d42f 100644 --- a/plugins/modules/sensu_subscription.py +++ b/plugins/modules/sensu_subscription.py @@ -46,8 +46,8 @@ options: default: /etc/sensu/conf.d/subscriptions.json backup: description: - - Create a backup file (if yes), including the timestamp information so you - can get the original file back if you somehow clobbered it incorrectly. + - Create a backup file (if yes), including the timestamp information so you can get the original file back if you somehow + clobbered it incorrectly. type: bool required: false default: false diff --git a/plugins/modules/serverless.py b/plugins/modules/serverless.py index 937f7dcdea..8bba307440 100644 --- a/plugins/modules/serverless.py +++ b/plugins/modules/serverless.py @@ -51,7 +51,7 @@ options: deploy: description: - Whether or not to deploy artifacts after building them. - - When this option is V(false) all the functions will be built, but no stack update will be run to send them out. + - When this option is V(false) all the functions are built, but no stack update is run to send them out. - This is mostly useful for generating artifacts to be stored/deployed elsewhere. type: bool default: true diff --git a/plugins/modules/shutdown.py b/plugins/modules/shutdown.py index 6f2dac14b1..d6bd7ecc6b 100644 --- a/plugins/modules/shutdown.py +++ b/plugins/modules/shutdown.py @@ -12,10 +12,10 @@ DOCUMENTATION = r""" module: shutdown short_description: Shut down a machine notes: - - E(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use O(search_paths) to specify locations to search if the - default paths do not work. - - The O(msg) and O(delay) options are not supported when a shutdown command is not found in O(search_paths), instead the module will attempt - to shutdown the system by calling C(systemctl shutdown). + - E(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use O(search_paths) to specify locations + to search if the default paths do not work. + - The O(msg) and O(delay) options are not supported when a shutdown command is not found in O(search_paths), instead the + module attempts to shutdown the system by calling C(systemctl shutdown). description: - Shut downs a machine. version_added: "1.1.0" @@ -35,8 +35,8 @@ options: delay: description: - Seconds to wait before shutdown. Passed as a parameter to the shutdown command. - - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it will be set to 0. - - On Solaris and FreeBSD, this will be seconds. + - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it is set to 0. + - On Solaris and FreeBSD, this represents seconds. type: int default: 0 msg: @@ -47,8 +47,8 @@ options: search_paths: description: - Paths to search on the remote machine for the C(shutdown) command. - - I(Only) these paths will be searched for the C(shutdown) command. E(PATH) is ignored in the remote node when searching for the C(shutdown) - command. + - I(Only) these paths are searched for the C(shutdown) command. E(PATH) is ignored in the remote node when searching + for the C(shutdown) command. type: list elements: path default: ['/sbin', '/usr/sbin', '/usr/local/sbin'] diff --git a/plugins/modules/simpleinit_msb.py b/plugins/modules/simpleinit_msb.py index 2b1b865d2c..ca1371653c 100644 --- a/plugins/modules/simpleinit_msb.py +++ b/plugins/modules/simpleinit_msb.py @@ -17,8 +17,6 @@ short_description: Manage services on Source Mage GNU/Linux version_added: 7.5.0 description: - Controls services on remote hosts using C(simpleinit-msb). -notes: - - This module needs ansible-core 2.15.5 or newer. Older versions have a broken and insufficient daemonize functionality. author: "Vlad Glagolev (@vaygr)" extends_documentation_fragment: - community.general.attributes @@ -39,10 +37,10 @@ options: required: false choices: [running, started, stopped, restarted, reloaded] description: - - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. V(restarted) will always bounce the service. - V(reloaded) will always reload. + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. V(restarted) always bounces + the service. V(reloaded) always reloads. - At least one of O(state) and O(enabled) are required. - - Note that V(reloaded) will start the service if it is not already started, even if your chosen init system would not normally. + - Note that V(reloaded) starts the service if it is not already started, even if your chosen init system would not normally. enabled: type: bool required: false diff --git a/plugins/modules/sl_vm.py b/plugins/modules/sl_vm.py index ea48340f1c..8b199f5698 100644 --- a/plugins/modules/sl_vm.py +++ b/plugins/modules/sl_vm.py @@ -24,7 +24,7 @@ attributes: options: instance_id: description: - - Instance Id of the virtual instance to perform action option. + - Instance ID of the virtual instance to perform action option. type: str hostname: description: @@ -146,7 +146,7 @@ options: type: str ssh_keys: description: - - List of ssh keys by their Id to be assigned to a virtual instance. + - List of ssh keys by their ID to be assigned to a virtual instance. type: list elements: str default: [] @@ -173,8 +173,16 @@ options: type: int requirements: - softlayer >= 4.1.1 +notes: + - If using Python 2.7, you must install C(softlayer-python<=5.7.2). + - If using Python 3.6, you must install C(softlayer-python<=6.0.0). + - The C(softlayer-python) library, at version 6.2.6 (from Jan 2025), only supports Python version 3.8, 3.9 and 3.10. author: - Matt Colton (@mcltn) +seealso: + - name: SoftLayer API Python Client + description: The SoftLayer API Python Client is required for this module. + link: https://github.com/SoftLayer/softlayer-python """ EXAMPLES = r""" @@ -267,7 +275,7 @@ EXAMPLES = r""" """ # TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed. -RETURN = """# """ +RETURN = """#""" import json import time @@ -311,9 +319,9 @@ def create_virtual_instance(module): return False, None # Check if OS or Image Template is provided (Can't be both, defaults to OS) - if (module.params.get('os_code') is not None and module.params.get('os_code') != ''): + if module.params.get('os_code') is not None and module.params.get('os_code') != '': module.params['image_id'] = '' - elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''): + elif module.params.get('image_id') is not None and module.params.get('image_id') != '': module.params['os_code'] = '' module.params['disks'] = [] # Blank out disks since it will use the template else: diff --git a/plugins/modules/slack.py b/plugins/modules/slack.py index 05d34e94b9..e009320d85 100644 --- a/plugins/modules/slack.py +++ b/plugins/modules/slack.py @@ -32,29 +32,35 @@ options: domain: type: str description: - - Slack (sub)domain for your environment without protocol. (For example V(example.slack.com).) In Ansible 1.8 and beyond, this is deprecated - and may be ignored. See token documentation for information. + - "When using new format 'Webhook token' and WebAPI tokens: this can be V(slack.com) or V(slack-gov.com) and is ignored + otherwise." + - "When using old format 'Webhook token': Slack (sub)domain for your environment without protocol. (For example V(example.slack.com).) + in Ansible 1.8 and beyond, this is deprecated and may be ignored. See token documentation for information." token: type: str description: - - Slack integration token. This authenticates you to the slack service. Make sure to use the correct type of token, depending on what method - you use. - - 'Webhook token: Prior to Ansible 1.8, a token looked like V(3Ffe373sfhRE6y42Fg3rvf4GlK). In Ansible 1.8 and above, Ansible adapts to the - new slack API where tokens look like V(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens are in the new format then slack will - ignore any value of domain. If the token is in the old format the domain is required. Ansible has no control of when slack will get rid - of the old API. When slack does that the old format will stop working. ** Please keep in mind the tokens are not the API tokens but are - the webhook tokens. In slack these are found in the webhook URL which are obtained under the apps and integrations. The incoming webhooks - can be added in that area. In some cases this may be locked by your Slack admin and you must request access. It is there that the incoming - webhooks can be added. The key is on the end of the URL given to you in that section.' - - "WebAPI token: Slack WebAPI requires a personal, bot or work application token. These tokens start with V(xoxp-), V(xoxb-) or V(xoxa-), - for example V(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id. See Slack's documentation - (U(https://api.slack.com/docs/token-types)) for more information." + - Slack integration token. This authenticates you to the Slack service. Make sure to use the correct type of token, + depending on what method you use. + - 'Webhook token: Prior to Ansible 1.8, a token looked like V(3Ffe373sfhRE6y42Fg3rvf4GlK). In Ansible 1.8 and above, + Ansible adapts to the new Slack API where tokens look like V(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens + are in the new format then Slack ignores any value of domain except V(slack.com) or V(slack-gov.com). If the token + is in the old format the domain is required. Ansible has no control of when Slack is going to remove the old API. + When Slack does that the old format is going to cease working. B(Please keep in mind the tokens are not the API tokens + but are the webhook tokens.) In Slack these are found in the webhook URL which are obtained under the apps and integrations. + The incoming webhooks can be added in that area. In some cases this may be locked by your Slack admin and you must + request access. It is there that the incoming webhooks can be added. The key is on the end of the URL given to you + in that section.' + - "WebAPI token: Slack WebAPI requires a personal, bot or work application token. These tokens start with V(xoxp-), + V(xoxb-) or V(xoxa-), for example V(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive + thread_id. See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information." required: true msg: type: str description: - - Message to send. Note that the module does not handle escaping characters. Plain-text angle brackets and ampersands should be converted - to HTML entities (for example C(&) to C(&)) before sending. See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more. + - Message to send. Note that the module does not handle escaping characters. Plain-text angle brackets and ampersands + should be converted to HTML entities (for example C(&) to C(&)) before sending. See Slack's documentation + (U(https://api.slack.com/docs/message-formatting)) + for more. channel: type: str description: @@ -66,8 +72,8 @@ options: message_id: description: - Optional. Message ID to edit, instead of posting a new message. - - If supplied O(channel) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel }}) to get RV(ignore:channel) from previous task - run. + - If supplied O(channel) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel }}) to get RV(ignore:channel) + from previous task run. - The token needs history scope to get information on the message to edit (C(channels:history,groups:history,mpim:history,im:history)). - Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)). type: str @@ -86,7 +92,7 @@ options: type: str description: - Emoji for the message sender. See Slack documentation for options. - - If O(icon_emoji) is set, O(icon_url) will not be used. + - If O(icon_emoji) is set, O(icon_url) is not used. link_names: type: int description: @@ -104,14 +110,17 @@ options: - 'none' validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool default: true color: type: str description: - - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message. - - Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value. + - Allow text to use default colors - use the default of V(normal) to not send a custom color bar at the start of the + message. + - Allowed values for color can be one of V(normal), V(good), V(warning), V(danger), any valid 3 digit or 6 digit hex + color value. default: 'normal' attachments: type: list @@ -130,14 +139,14 @@ options: type: str description: - Setting for automatically prepending a V(#) symbol on the passed in O(channel). - - The V(auto) method prepends a V(#) unless O(channel) starts with one of V(#), V(@), V(C0), V(GF), V(G0), V(CP). These prefixes only cover - a small set of the prefixes that should not have a V(#) prepended. Since an exact condition which O(channel) values must not have the - V(#) prefix is not known, the value V(auto) for this option will be deprecated in the future. It is best to explicitly set O(prepend_hash=always) - or O(prepend_hash=never) to obtain the needed behavior. - - The B(current default) is V(auto), which has been B(deprecated) since community.general 10.2.0. - It will change to V(never) in community.general 12.0.0. - To prevent deprecation warnings you can explicitly set O(prepend_hash) to the value you want. - We suggest to only use V(always) or V(never), but not V(auto), when explicitly setting a value. + - The V(auto) method prepends a V(#) unless O(channel) starts with one of V(#), V(@), V(C0), V(GF), V(G0), V(CP). These + prefixes only cover a small set of the prefixes that should not have a V(#) prepended. Since an exact condition which + O(channel) values must not have the V(#) prefix is not known, the value V(auto) for this option is deprecated in the + future. It is best to explicitly set O(prepend_hash=always) or O(prepend_hash=never) to obtain the needed behavior. + - The B(current default) is V(auto), which has been B(deprecated) since community.general 10.2.0. It is going to change + to V(never) in community.general 12.0.0. To prevent deprecation warnings you can explicitly set O(prepend_hash) to + the value you want. We suggest to only use V(always) or V(never), but not V(auto), when explicitly setting a value. + # when the default changes in community.general 12.0.0, add deprecation for the `auto` value for 14.0.0 choices: - 'always' - 'never' @@ -164,7 +173,8 @@ EXAMPLES = r""" parse: 'none' delegate_to: localhost -- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack +- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured + in Slack community.general.slack: token: thetoken/generatedby/slack msg: '{{ inventory_hostname }} is alive!' @@ -262,10 +272,10 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' -SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' -SLACK_POSTMESSAGE_WEBAPI = 'https://slack.com/api/chat.postMessage' -SLACK_UPDATEMESSAGE_WEBAPI = 'https://slack.com/api/chat.update' -SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://slack.com/api/conversations.history' +SLACK_INCOMING_WEBHOOK = 'https://hooks.%s/services/%s' +SLACK_POSTMESSAGE_WEBAPI = 'https://%s/api/chat.postMessage' +SLACK_UPDATEMESSAGE_WEBAPI = 'https://%s/api/chat.update' +SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://%s/api/conversations.history' # Escaping quotes and apostrophes to avoid ending string prematurely in ansible call. # We do not escape other characters used as Slack metacharacters (e.g. &, <, >). @@ -367,7 +377,11 @@ def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_e return payload -def get_slack_message(module, token, channel, ts): +def validate_slack_domain(domain): + return (domain if domain in ('slack.com', 'slack-gov.com') else 'slack.com') + + +def get_slack_message(module, domain, token, channel, ts): headers = { 'Content-Type': 'application/json; charset=UTF-8', 'Accept': 'application/json', @@ -379,7 +393,8 @@ def get_slack_message(module, token, channel, ts): 'limit': 1, 'inclusive': 'true', }) - url = SLACK_CONVERSATIONS_HISTORY_WEBAPI + '?' + qs + domain = validate_slack_domain(domain) + url = (SLACK_CONVERSATIONS_HISTORY_WEBAPI % domain) + '?' + qs response, info = fetch_url(module=module, url=url, headers=headers, method='GET') if info['status'] != 200: module.fail_json(msg="failed to get slack message") @@ -397,9 +412,11 @@ def do_notify_slack(module, domain, token, payload): use_webapi = False if token.count('/') >= 2: # New style webhook token - slack_uri = SLACK_INCOMING_WEBHOOK % token + domain = validate_slack_domain(domain) + slack_uri = SLACK_INCOMING_WEBHOOK % (domain, token) elif re.match(r'^xox[abp]-\S+$', token): - slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI + domain = validate_slack_domain(domain) + slack_uri = (SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI) % domain use_webapi = True else: if not domain: @@ -421,7 +438,7 @@ def do_notify_slack(module, domain, token, payload): if use_webapi: obscured_incoming_webhook = slack_uri else: - obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % '[obscured]' + obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]') module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg'])) # each API requires different handling @@ -489,7 +506,7 @@ def main(): # if updating an existing message, we can check if there's anything to update if message_id is not None: changed = False - msg = get_slack_message(module, token, channel, message_id) + msg = get_slack_message(module, domain, token, channel, message_id) for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'): if msg.get(key) != module.params.get(key): changed = True diff --git a/plugins/modules/smartos_image_info.py b/plugins/modules/smartos_image_info.py index 96bf9b0575..89c00f5c26 100644 --- a/plugins/modules/smartos_image_info.py +++ b/plugins/modules/smartos_image_info.py @@ -25,7 +25,8 @@ attributes: options: filters: description: - - Criteria for selecting image. Can be any value from image manifest and V(published_date), V(published), V(source), V(clones), and V(size). + - Criteria for selecting image. Can be any value from image manifest and V(published_date), V(published), V(source), + V(clones), and V(size). - More information can be found at U(https://smartos.org/man/1m/imgadm) under C(imgadm list). type: str """ @@ -46,12 +47,20 @@ EXAMPLES = r""" - name: Print information ansible.builtin.debug: - msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} has {{ result.smartos_images[item]['clones'] }} VM(s)" + msg: >- + {{ + result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} + has {{ result.smartos_images[item]['clones'] + }} VM(s) with_items: "{{ result.smartos_images.keys() | list }}" - name: Print information ansible.builtin.debug: - msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} has {{ smartos_images[item]['clones'] }} VM(s)" + msg: >- + {{ + smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} + has {{ smartos_images[item]['clones'] + }} VM(s) with_items: "{{ smartos_images.keys() | list }}" """ diff --git a/plugins/modules/snap.py b/plugins/modules/snap.py index 1de829801d..fd424e0dd9 100644 --- a/plugins/modules/snap.py +++ b/plugins/modules/snap.py @@ -37,8 +37,8 @@ options: state: description: - Desired state of the package. - - When O(state=present) the module will use C(snap install) if the snap is not installed, and C(snap refresh) if it is installed but from - a different channel. + - When O(state=present) the module uses C(snap install) if the snap is not installed, and C(snap refresh) if it is installed + but from a different channel. default: present choices: [absent, present, enabled, disabled] type: str @@ -46,28 +46,29 @@ options: description: - Install a snap that has classic confinement. - This option corresponds to the C(--classic) argument of the C(snap install) command. - - This level of confinement is permissive, granting full system access, similar to that of traditionally packaged applications that do not - use sandboxing mechanisms. This option can only be specified when the task involves a single snap. + - This level of confinement is permissive, granting full system access, similar to that of traditionally packaged applications + that do not use sandboxing mechanisms. This option can only be specified when the task involves a single snap. - See U(https://snapcraft.io/docs/snap-confinement) for more details about classic confinement and confinement levels. type: bool required: false default: false channel: description: - - Define which release of a snap is installed and tracked for updates. This option can only be specified if there is a single snap in the - task. - - If not passed, the C(snap) command will default to V(stable). - - If the value passed does not contain the C(track), it will default to C(latest). For example, if V(edge) is passed, the module will assume - the channel to be V(latest/edge). + - Define which release of a snap is installed and tracked for updates. This option can only be specified if there is + a single snap in the task. + - If not passed, the C(snap) command defaults to V(stable). + - If the value passed does not contain the C(track), it defaults to C(latest). For example, if V(edge) is passed, the + module assumes the channel to be V(latest/edge). - See U(https://snapcraft.io/docs/channels) for more details about snap channels. type: str required: false options: description: - - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option will be applied to that snap only. If - the snap name is omitted, the options will be applied to all snaps listed in O(name). Options will only be applied to active snaps. - - Options will only be applied when C(state) is set to V(present). This is done after the necessary installation or refresh (upgrade/downgrade) - of all the snaps listed in O(name). + - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option is applied to that + snap only. If the snap name is omitted, the options are applied to all snaps listed in O(name). Options are only applied + to active snaps. + - Options are only applied when C(state) is set to V(present). This is done after the necessary installation or refresh + (upgrade/downgrade) of all the snaps listed in O(name). - See U(https://snapcraft.io/docs/configuration-in-snaps) for more details about snap configuration options. required: false type: list @@ -83,8 +84,8 @@ options: default: false version_added: 7.2.0 notes: - - Privileged operations, such as installing and configuring snaps, require root priviledges. This is only the case if the user has not logged - in to the Snap Store. + - Privileged operations, such as installing and configuring snaps, require root priviledges. This is only the case if the + user has not logged in to the Snap Store. author: - Victor Carceler (@vcarceler) - Stanislas Lange (@angristan) @@ -166,6 +167,11 @@ options_changed: type: list returned: When any options have been changed/set version_added: 4.4.0 +version: + description: Versions of snap components as reported by C(snap version). + type: dict + returned: always + version_added: 10.3.0 """ import re @@ -175,7 +181,7 @@ import numbers from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper -from ansible_collections.community.general.plugins.module_utils.snap import snap_runner +from ansible_collections.community.general.plugins.module_utils.snap import snap_runner, get_version class Snap(StateModuleHelper): @@ -197,7 +203,6 @@ class Snap(StateModuleHelper): }, supports_check_mode=True, ) - use_old_vardict = False @staticmethod def _first_non_zero(a): @@ -209,6 +214,7 @@ class Snap(StateModuleHelper): def __init_module__(self): self.runner = snap_runner(self.module) + self.vars.version = get_version(self.runner) # if state=present there might be file names passed in 'name', in # which case they must be converted to their actual snap names, which # is done using the names_from_snaps() method calling 'snap info'. diff --git a/plugins/modules/snap_alias.py b/plugins/modules/snap_alias.py index 81a968730d..3837f2b5a6 100644 --- a/plugins/modules/snap_alias.py +++ b/plugins/modules/snap_alias.py @@ -80,13 +80,18 @@ snap_aliases: type: list elements: str returned: always +version: + description: Versions of snap components as reported by C(snap version). + type: dict + returned: always + version_added: 10.3.0 """ import re from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper -from ansible_collections.community.general.plugins.module_utils.snap import snap_runner +from ansible_collections.community.general.plugins.module_utils.snap import snap_runner, get_version class SnapAlias(StateModuleHelper): @@ -104,7 +109,6 @@ class SnapAlias(StateModuleHelper): ], supports_check_mode=True, ) - use_old_vardict = False def _aliases(self): n = self.vars.name @@ -112,6 +116,7 @@ class SnapAlias(StateModuleHelper): def __init_module__(self): self.runner = snap_runner(self.module) + self.vars.version = get_version(self.runner) self.vars.set("snap_aliases", self._aliases(), change=True, diff=True) def __quit_module__(self): diff --git a/plugins/modules/snmp_facts.py b/plugins/modules/snmp_facts.py index af0abf9479..17c7bbd032 100644 --- a/plugins/modules/snmp_facts.py +++ b/plugins/modules/snmp_facts.py @@ -15,7 +15,7 @@ author: - Patrick Ogenstad (@ogenstad) short_description: Retrieve facts for a device using SNMP description: - - Retrieve facts for a device using SNMP, the facts will be inserted to the C(ansible_facts) key. + - Retrieve facts for a device using SNMP, the facts are inserted to the C(ansible_facts) key. requirements: - pysnmp extends_documentation_fragment: @@ -113,7 +113,7 @@ ansible_sysdescr: description: A textual description of the entity. returned: success type: str - sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64 + sample: "Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64" ansible_sysobjectid: description: The vendor's authoritative identification of the network management subsystem contained in the entity. returned: success @@ -125,7 +125,8 @@ ansible_sysuptime: type: int sample: 42388 ansible_syscontact: - description: The textual identification of the contact person for this managed node, together with information on how to contact this person. + description: The textual identification of the contact person for this managed node, together with information on how to + contact this person. returned: success type: str sample: Me @@ -148,40 +149,41 @@ ansible_interfaces: description: Dictionary of each network interface and its metadata. returned: success type: dict - sample: { - "1": { - "adminstatus": "up", - "description": "", - "ifindex": "1", - "ipv4": [ - { - "address": "127.0.0.1", - "netmask": "255.0.0.0" - } - ], - "mac": "", - "mtu": "65536", - "name": "lo", - "operstatus": "up", - "speed": "65536" - }, - "2": { - "adminstatus": "up", - "description": "", - "ifindex": "2", - "ipv4": [ - { - "address": "192.168.213.128", - "netmask": "255.255.255.0" - } - ], - "mac": "000a305a52a1", - "mtu": "1500", - "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", - "operstatus": "up", - "speed": "1500" + sample: + { + "1": { + "adminstatus": "up", + "description": "", + "ifindex": "1", + "ipv4": [ + { + "address": "127.0.0.1", + "netmask": "255.0.0.0" + } + ], + "mac": "", + "mtu": "65536", + "name": "lo", + "operstatus": "up", + "speed": "65536" + }, + "2": { + "adminstatus": "up", + "description": "", + "ifindex": "2", + "ipv4": [ + { + "address": "192.168.213.128", + "netmask": "255.255.255.0" + } + ], + "mac": "000a305a52a1", + "mtu": "1500", + "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", + "operstatus": "up", + "speed": "1500" + } } - } """ import binascii diff --git a/plugins/modules/solaris_zone.py b/plugins/modules/solaris_zone.py index 6aa8608be4..431e0cb31d 100644 --- a/plugins/modules/solaris_zone.py +++ b/plugins/modules/solaris_zone.py @@ -51,7 +51,7 @@ options: required: true path: description: - - The path where the zone will be created. This is required when the zone is created, but not used otherwise. + - The path where the zone is created. This is required when the zone is created, but not used otherwise. type: str sparse: description: @@ -60,12 +60,13 @@ options: default: false root_password: description: - - The password hash for the root account. If not specified, the zone's root account will not have a password. + - The password hash for the root account. If not specified, the zone's root account does not have a password. type: str config: description: - - The C(zonecfg) configuration commands for this zone. See zonecfg(1M) for the valid options and syntax. Typically this is a list of options - separated by semi-colons or new lines, for example V(set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end). + - The C(zonecfg) configuration commands for this zone. See zonecfg(1M) for the valid options and syntax. Typically this + is a list of options separated by semi-colons or new lines, for example V(set auto-boot=true;add net;set physical=bge0;set + address=10.1.1.1;end). type: str default: '' create_options: @@ -75,14 +76,14 @@ options: default: '' install_options: description: - - Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation, use this to specify the profile XML file, for example - O(install_options=-c sc_profile.xml). + - Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation, use this to specify the profile + XML file, for example O(install_options=-c sc_profile.xml). type: str default: '' attach_options: description: - - Extra options to the zoneadm attach command. For example, this can be used to specify whether a minimum or full update of packages is - required and if any packages need to be deleted. For valid values, see zoneadm(1M). + - Extra options to the zoneadm attach command. For example, this can be used to specify whether a minimum or full update + of packages is required and if any packages need to be deleted. For valid values, see zoneadm(1M). type: str default: '' timeout: @@ -245,24 +246,22 @@ class Zone(object): open('%s/root/noautoshutdown' % self.path, 'w').close() - node = open('%s/root/etc/nodename' % self.path, 'w') - node.write(self.name) - node.close() + with open('%s/root/etc/nodename' % self.path, 'w') as node: + node.write(self.name) - id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w') - id.write('1 # System previously configured?\n') - id.write('1 # Bootparams succeeded?\n') - id.write('1 # System is on a network?\n') - id.write('1 # Extended network information gathered?\n') - id.write('0 # Autobinder succeeded?\n') - id.write('1 # Network has subnets?\n') - id.write('1 # root password prompted for?\n') - id.write('1 # locale and term prompted for?\n') - id.write('1 # security policy in place\n') - id.write('1 # NFSv4 domain configured\n') - id.write('0 # Auto Registration Configured\n') - id.write('vt100') - id.close() + with open('%s/root/etc/.sysIDtool.state' % self.path, 'w') as id: + id.write('1 # System previously configured?\n') + id.write('1 # Bootparams succeeded?\n') + id.write('1 # System is on a network?\n') + id.write('1 # Extended network information gathered?\n') + id.write('0 # Autobinder succeeded?\n') + id.write('1 # Network has subnets?\n') + id.write('1 # root password prompted for?\n') + id.write('1 # locale and term prompted for?\n') + id.write('1 # security policy in place\n') + id.write('1 # NFSv4 domain configured\n') + id.write('0 # Auto Registration Configured\n') + id.write('vt100') def configure_ssh_keys(self): rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path @@ -283,9 +282,8 @@ class Zone(object): def configure_password(self): shadow = '%s/root/etc/shadow' % self.path if self.root_password: - f = open(shadow, 'r') - lines = f.readlines() - f.close() + with open(shadow, 'r') as f: + lines = f.readlines() for i in range(0, len(lines)): fields = lines[i].split(':') @@ -293,10 +291,9 @@ class Zone(object): fields[1] = self.root_password lines[i] = ':'.join(fields) - f = open(shadow, 'w') - for line in lines: - f.write(line) - f.close() + with open(shadow, 'w') as f: + for line in lines: + f.write(line) def boot(self): if not self.module.check_mode: diff --git a/plugins/modules/sorcery.py b/plugins/modules/sorcery.py index 9ad3d30f3b..de50741185 100644 --- a/plugins/modules/sorcery.py +++ b/plugins/modules/sorcery.py @@ -17,7 +17,8 @@ description: - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain. author: "Vlad Glagolev (@vaygr)" notes: - - When all three components are selected, the update goes by the sequence -- Sorcery -> Grimoire(s) -> Spell(s); you cannot override it. + - When all three components are selected, the update goes by the sequence -- Sorcery -> Grimoire(s) -> Spell(s); you cannot + override it. - Grimoire handling is supported since community.general 7.3.0. requirements: - bash @@ -33,7 +34,7 @@ options: description: - Name of the spell or grimoire. - Multiple names can be given, separated by commas. - - Special value V(*) in conjunction with states V(latest) or V(rebuild) will update or rebuild the whole system respectively. + - Special value V(*) in conjunction with states V(latest) or V(rebuild) updates or rebuilds the whole system respectively. - The alias O(grimoire) was added in community.general 7.3.0. aliases: ["spell", "grimoire"] type: list @@ -43,7 +44,7 @@ options: description: - Repository location. - If specified, O(name) represents grimoire(s) instead of spell(s). - - Special value V(*) will pull grimoire from the official location. + - Special value V(*) pulls grimoire from the official location. - Only single item in O(name) in conjunction with V(*) can be used. - O(state=absent) must be used with a special value V(*). type: str @@ -60,8 +61,8 @@ options: depends: description: - - Comma-separated list of _optional_ dependencies to build a spell (or make sure it is built) with; use V(+)/V(-) in front of dependency - to turn it on/off (V(+) is optional though). + - Comma-separated list of _optional_ dependencies to build a spell (or make sure it is built) with; use V(+)/V(-) in + front of dependency to turn it on/off (V(+) is optional though). - This option is ignored if O(name) parameter is equal to V(*) or contains more than one spell. - Providers must be supplied in the form recognized by Sorcery, for example 'V(openssl(SSL\))'. type: str @@ -459,15 +460,11 @@ def match_depends(module): if depends_new: try: - try: - fl = open(sorcery_depends, 'a') - + with open(sorcery_depends, 'a') as fl: for k in depends_new: fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k])) - except IOError: - module.fail_json(msg="I/O error on the depends file") - finally: - fl.close() + except IOError: + module.fail_json(msg="I/O error on the depends file") depends_ok = False @@ -700,11 +697,11 @@ def manage_spells(module): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(default=None, aliases=['spell', 'grimoire'], type='list', elements='str'), - repository=dict(default=None, type='str'), + name=dict(aliases=['spell', 'grimoire'], type='list', elements='str'), + repository=dict(type='str'), state=dict(default='present', choices=['present', 'latest', 'absent', 'cast', 'dispelled', 'rebuild']), - depends=dict(default=None), + depends=dict(), update=dict(default=False, type='bool'), update_cache=dict(default=False, aliases=['update_codex'], type='bool'), cache_valid_time=dict(default=0, type='int') diff --git a/plugins/modules/spectrum_device.py b/plugins/modules/spectrum_device.py index bb9761d37d..54cddbffb0 100644 --- a/plugins/modules/spectrum_device.py +++ b/plugins/modules/spectrum_device.py @@ -30,7 +30,7 @@ options: required: true description: - IP address of the device. - - If a hostname is given, it will be resolved to the IP address. + - If a hostname is given, it is resolved to the IP address. community: type: str description: @@ -69,12 +69,13 @@ options: - Oneclick user password. use_proxy: description: - - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts. + - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts. default: true type: bool validate_certs: description: - - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. default: true type: bool agentport: @@ -84,8 +85,8 @@ options: - UDP port used for SNMP discovery. default: 161 notes: - - The devices will be created inside the I(Universe) container of the specified landscape. - - All the operations will be performed only on the specified landscape. + - The devices are created inside the I(Universe) container of the specified landscape. + - All the operations are performed only on the specified landscape. """ EXAMPLES = r""" @@ -118,7 +119,12 @@ device: description: Device data when O(state=present). returned: success type: dict - sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'} + sample: + { + "model_handle": "0x1007ab", + "landscape": "0x100000", + "address": "10.10.5.1" + } """ from socket import gethostbyname, gaierror diff --git a/plugins/modules/spectrum_model_attrs.py b/plugins/modules/spectrum_model_attrs.py index 3057f04c15..53cae10b74 100644 --- a/plugins/modules/spectrum_model_attrs.py +++ b/plugins/modules/spectrum_model_attrs.py @@ -47,7 +47,7 @@ options: aliases: [password] use_proxy: description: - - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts. + - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts. default: true required: false type: bool @@ -63,8 +63,8 @@ options: required: true validate_certs: description: - - Validate SSL certificates. Only change this to V(false) if you can guarantee that you are talking to the correct endpoint and there is - no man-in-the-middle attack happening. + - Validate SSL certificates. Only change this to V(false) if you can guarantee that you are talking to the correct endpoint + and there is no man-in-the-middle attack happening. type: bool default: true required: false @@ -99,7 +99,7 @@ options: - C(sysName) (C(0x10b5b)); - C(Vendor_Name) (C(0x11570)); - C(Description) (C(0x230017)). - - Hex IDs are the direct identifiers in Spectrum and will always work. + - Hex IDs are the direct identifiers in Spectrum and always work. - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' type: str required: true @@ -123,7 +123,9 @@ EXAMPLES = r""" - name: "isManaged" value: "false" - name: "Notes" - value: "MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} by {{ tower_user_name | default(ansible_user_id) }}" + value: >- + MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} + by {{ tower_user_name | default(ansible_user_id) }} delegate_to: localhost register: spectrum_model_attrs_status """ diff --git a/plugins/modules/spotinst_aws_elastigroup.py b/plugins/modules/spotinst_aws_elastigroup.py index a55e7506ea..759a094626 100644 --- a/plugins/modules/spotinst_aws_elastigroup.py +++ b/plugins/modules/spotinst_aws_elastigroup.py @@ -10,9 +10,10 @@ module: spotinst_aws_elastigroup short_description: Create, update or delete Spotinst AWS Elastigroups author: Spotinst (@talzur) description: - - Can create, update, or delete Spotinst AWS Elastigroups Launch configuration is part of the elastigroup configuration, so no additional modules - are necessary for handling the launch configuration. You will have to have a credentials file in this location - C($HOME/.spotinst/credentials). - The credentials file must contain a row that looks like this C(token = ). + - Can create, update, or delete Spotinst AWS Elastigroups Launch configuration is part of the elastigroup configuration, + so no additional modules are necessary for handling the launch configuration. You must have a credentials file in this + location - C($HOME/.spotinst/credentials). The credentials file must contain a row that looks like this C(token = ). - Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-). requirements: - spotinst_sdk >= 1.0.38 @@ -40,8 +41,8 @@ options: token: description: - A Personal API Access Token issued by Spotinst. - - 'When not specified, the module will try to obtain it, in that order, from: environment variable E(SPOTINST_TOKEN), or from the credentials - path.' + - When not specified, the module tries to obtain it, in that order, from environment variable E(SPOTINST_TOKEN), or + from the credentials path. type: str availability_vs_cost: @@ -53,24 +54,25 @@ options: availability_zones: description: - - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed - are name (String), subnet_id (String), placement_group_name (String),. + - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are name (String), subnet_id (String), placement_group_name (String),. required: true type: list elements: dict block_device_mappings: description: - - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; You can specify virtual devices and EBS volumes.; '[{"key":"value", - "key":"value"}]'; keys allowed are device_name (List of Strings), virtual_name (String), no_device (String), ebs (Object, expects the - following keys- delete_on_termination(Boolean), encrypted(Boolean), iops (Integer), snapshot_id(Integer), volume_type(String), volume_size(Integer)). + - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; You can specify virtual devices and + EBS volumes.; '[{"key":"value", "key":"value"}]'; keys allowed are device_name (List of Strings), virtual_name (String), + no_device (String), ebs (Object, expects the following keys- delete_on_termination(Boolean), encrypted(Boolean), iops + (Integer), snapshot_id(Integer), volume_type(String), volume_size(Integer)). type: list elements: dict chef: description: - - The Chef integration configuration.; Expects the following keys - chef_server (String), organization (String), user (String), pem_key - (String), chef_version (String). + - The Chef integration configuration.; Expects the following keys - chef_server (String), organization (String), user + (String), pem_key (String), chef_version (String). type: dict draining_timeout: @@ -80,13 +82,13 @@ options: ebs_optimized: description: - - Enable EBS optimization for supported instances which are not enabled by default.; Note - additional charges will be applied. + - Enable EBS optimization for supported instances which are not enabled by default. Note - additional charges are applied. type: bool ebs_volume_pool: description: - - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; '[{"key":"value", "key":"value"}]'; keys allowed - are - volume_ids (List of Strings), device_name (String). + - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; '[{"key":"value", "key":"value"}]'; + keys allowed are - volume_ids (List of Strings), device_name (String). type: list elements: dict @@ -97,13 +99,13 @@ options: elastic_ips: description: - - List of ElasticIps Allocation Ids (example V(eipalloc-9d4e16f8)) to associate to the group instances. + - List of ElasticIps Allocation IDs (example V(eipalloc-9d4e16f8)) to associate to the group instances. type: list elements: str fallback_to_od: description: - - In case of no spots available, Elastigroup will launch an On-demand instance instead. + - In case of no spots available, Elastigroup launches an On-demand instance instead. type: bool health_check_grace_period: @@ -137,13 +139,15 @@ options: id: description: - - The group id if it already exists and you want to update, or delete it. This will not work unless the uniqueness_by field is set to id. - When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. + - The group ID if it already exists and you want to update, or delete it. This does not work unless the O(uniqueness_by) + field is set to ID. When this is set, and the O(uniqueness_by) field is set, the group is either updated or deleted, + but not created. type: str image_id: description: - - The image Id used to launch the instance.; In case of conflict between Instance type and image type, an error will be returned. + - The image ID used to launch the instance.; In case of conflict between Instance type and image type, an error is be + returned. required: true type: str @@ -198,22 +202,24 @@ options: network_interfaces: description: - - A list of hash/dictionaries of network interfaces to add to the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - description - (String), device_index (Integer), secondary_private_ip_address_count (Integer), associate_public_ip_address (Boolean), delete_on_termination - (Boolean), groups (List of Strings), network_interface_id (String), private_ip_address (String), subnet_id (String), associate_ipv6_address - (Boolean), private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)). + - A list of hash/dictionaries of network interfaces to add to the elastigroup; '[{"key":"value", "key":"value"}]'; keys + allowed are - description (String), device_index (Integer), secondary_private_ip_address_count (Integer), associate_public_ip_address + (Boolean), delete_on_termination (Boolean), groups (List of Strings), network_interface_id (String), private_ip_address + (String), subnet_id (String), associate_ipv6_address (Boolean), private_ip_addresses (List of Objects, Keys are privateIpAddress + (String, required) and primary (Boolean)). type: list elements: dict on_demand_count: description: - Required if risk is not set. - - Number of on demand instances to launch. All other instances will be spot instances.; Either set this parameter or the risk parameter. + - Number of on demand instances to launch. All other instances are spot instances.; Either set this parameter or the + O(risk) parameter. type: int on_demand_instance_type: description: - - On-demand instance type that will be provisioned. + - On-demand instance type that is provisioned. type: str opsworks: @@ -230,14 +236,15 @@ options: product: description: - Operation system type. - - 'Available choices are: V(Linux/UNIX), V(SUSE Linux), V(Windows), V(Linux/UNIX (Amazon VPC)), V(SUSE Linux (Amazon VPC)).' + - 'Available choices are: V(Linux/UNIX), V(SUSE Linux), V(Windows), V(Linux/UNIX (Amazon VPC)), V(SUSE Linux (Amazon + VPC)).' required: true type: str rancher: description: - - The Rancher integration configuration.; Expects the following keys - version (String), access_key (String), secret_key (String), master_host - (String). + - The Rancher integration configuration.; Expects the following keys - version (String), access_key (String), secret_key + (String), master_host (String). type: dict right_scale: @@ -254,22 +261,23 @@ options: description: - Roll configuration. - If you would like the group to roll after updating, please use this feature. - - Accepts the following keys - batch_size_percentage(Integer, Required), grace_period - (Integer, Required), health_check_type(String, Optional). + - Accepts the following keys - batch_size_percentage(Integer, Required), grace_period - (Integer, Required), health_check_type(String, + Optional). type: dict scheduled_tasks: description: - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup, as in V([{"key":"value", "key":"value"}]). - - 'Keys allowed are: adjustment (Integer), scale_target_capacity (Integer), scale_min_capacity (Integer), scale_max_capacity (Integer), - adjustment_percentage (Integer), batch_size_percentage (Integer), cron_expression (String), frequency (String), grace_period (Integer), - task_type (String, required), is_enabled (Boolean).' + - 'Keys allowed are: adjustment (Integer), scale_target_capacity (Integer), scale_min_capacity (Integer), scale_max_capacity + (Integer), adjustment_percentage (Integer), batch_size_percentage (Integer), cron_expression (String), frequency (String), + grace_period (Integer), task_type (String, required), is_enabled (Boolean).' type: list elements: dict security_group_ids: description: - One or more security group IDs. - - In case of update it will override the existing Security Group with the new given array. + - In case of update it overrides the existing Security Group with the new given array. required: true type: list elements: str @@ -281,7 +289,8 @@ options: signals: description: - - A list of hash/dictionaries of signals to configure in the elastigroup; keys allowed are - name (String, required), timeout (Integer). + - A list of hash/dictionaries of signals to configure in the elastigroup; keys allowed are - name (String, required), + timeout (Integer). type: list elements: dict @@ -292,7 +301,7 @@ options: spot_instance_types: description: - - Spot instance type that will be provisioned. + - Spot instance type that is provisioned. required: true type: list elements: str @@ -343,29 +352,32 @@ options: up_scaling_policies: description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions (List of Objects, Keys allowed - are name (String, required) and value (String)), statistic (String, required) evaluation_periods (String, required), period (String, required), - threshold (String, required), cooldown (String, required), unit (String, required), operator (String, required), action_type (String, - required), adjustment (String), min_target_capacity (String), target (String), maximum (String), minimum (String). + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions + (List of Objects, Keys allowed are name (String, required) and value (String)), statistic (String, required) evaluation_periods + (String, required), period (String, required), threshold (String, required), cooldown (String, required), unit (String, + required), operator (String, required), action_type (String, required), adjustment (String), min_target_capacity (String), + target (String), maximum (String), minimum (String). type: list elements: dict down_scaling_policies: description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions ((List of Objects), Keys allowed - are name (String, required) and value (String)), statistic (String, required), evaluation_periods (String, required), period (String, - required), threshold (String, required), cooldown (String, required), unit (String, required), operator (String, required), action_type - (String, required), adjustment (String), max_target_capacity (String), target (String), maximum (String), minimum (String). + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions + ((List of Objects), Keys allowed are name (String, required) and value (String)), statistic (String, required), evaluation_periods + (String, required), period (String, required), threshold (String, required), cooldown (String, required), unit (String, + required), operator (String, required), action_type (String, required), adjustment (String), max_target_capacity (String), + target (String), maximum (String), minimum (String). type: list elements: dict target_tracking_policies: description: - - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed - are - policy_name (String, required), namespace (String, required), source (String, required), metric_name (String, required), statistic - (String, required), unit (String, required), cooldown (String, required), target (String, required). + - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are - policy_name (String, required), namespace (String, required), source (String, required), metric_name + (String, required), statistic (String, required), unit (String, required), cooldown (String, required), target (String, + required). type: list elements: dict @@ -374,8 +386,8 @@ options: - id - name description: - - If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property is set, you - must set a group_id in order to update or delete a group, otherwise a group will be created. + - If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property + is set, you must set a group_id in order to update or delete a group, otherwise a group is created. default: name type: str @@ -386,7 +398,7 @@ options: utilize_reserved_instances: description: - - In case of any available Reserved Instances, Elastigroup will utilize your reservations before purchasing Spot instances. + - In case of any available Reserved Instances, Elastigroup utilizes your reservations before purchasing Spot instances. type: bool wait_for_instances: @@ -684,18 +696,15 @@ instances: description: List of active elastigroup instances and their details. returned: success type: dict - sample: [ - { - "spotInstanceRequestId": "sir-regs25zp", - "instanceId": "i-09640ad8678234c", - "instanceType": "m4.large", - "product": "Linux/UNIX", - "availabilityZone": "us-west-2b", - "privateIp": "180.0.2.244", - "createdAt": "2017-07-17T12:46:18.000Z", + sample: + - "spotInstanceRequestId": "sir-regs25zp" + "instanceId": "i-09640ad8678234c" + "instanceType": "m4.large" + "product": "Linux/UNIX" + "availabilityZone": "us-west-2b" + "privateIp": "180.0.2.244" + "createdAt": "2017-07-17T12:46:18.000Z" "status": "fulfilled" - } - ] group_id: description: Created / Updated group's ID. returned: success diff --git a/plugins/modules/ss_3par_cpg.py b/plugins/modules/ss_3par_cpg.py index c9c9b4bd90..0869d67d84 100644 --- a/plugins/modules/ss_3par_cpg.py +++ b/plugins/modules/ss_3par_cpg.py @@ -38,7 +38,7 @@ options: type: str domain: description: - - Specifies the name of the domain in which the object will reside. + - Specifies the name of the domain in which the object resides. type: str growth_increment: description: @@ -46,11 +46,12 @@ options: type: str growth_limit: description: - - Specifies that the autogrow operation is limited to the specified storage amount that sets the growth limit(in MiB, GiB or TiB). + - Specifies that the autogrow operation is limited to the specified storage amount that sets the growth limit (in MiB, + GiB or TiB). type: str growth_warning: description: - - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded results in a warning alert. + - Specifies that the threshold (in MiB, GiB or TiB) of used logical disk space when exceeded results in a warning alert. type: str high_availability: choices: diff --git a/plugins/modules/ssh_config.py b/plugins/modules/ssh_config.py index 582d7c127e..6a83095f37 100644 --- a/plugins/modules/ssh_config.py +++ b/plugins/modules/ssh_config.py @@ -49,7 +49,7 @@ options: host: description: - The endpoint this configuration is valid for. - - Can be an actual address on the internet or an alias that will connect to the value of O(hostname). + - It can be an actual address on the internet or an alias that connects to the value of O(hostname). required: true type: str hostname: @@ -66,14 +66,14 @@ options: type: str identity_file: description: - - The path to an identity file (SSH private key) that will be used when connecting to this host. + - The path to an identity file (SSH private key) that is used when connecting to this host. - File need to exist and have mode V(0600) to be valid. type: path identities_only: description: - - Specifies that SSH should only use the configured authentication identity and certificate files (either the default files, or those explicitly - configured in the C(ssh_config) files or passed on the ssh command-line), even if ssh-agent or a PKCS11Provider or SecurityKeyProvider - offers more identities. + - Specifies that SSH should only use the configured authentication identity and certificate files (either the default + files, or those explicitly configured in the C(ssh_config) files or passed on the ssh command-line), even if C(ssh-agent) + or a C(PKCS11Provider) or C(SecurityKeyProvider) offers more identities. type: bool version_added: 8.2.0 user_known_hosts_file: @@ -139,6 +139,13 @@ options: - Sets the C(DynamicForward) option. type: str version_added: 10.1.0 + other_options: + description: + - Allows specifying arbitrary SSH config entry options using a dictionary. + - The key names must be lower case. Keys with upper case values are rejected. + - The values must be strings. Other values are rejected. + type: dict + version_added: 10.4.0 requirements: - paramiko """ @@ -152,6 +159,17 @@ EXAMPLES = r""" identity_file: "/home/akasurde/.ssh/id_rsa" port: '2223' state: present + other_options: + serveraliveinterval: '30' + +- name: Add SSH config with key auto-added to agent + community.general.ssh_config: + user: devops + host: "example.com" + hostname: "staging.example.com" + identity_file: "/home/devops/.ssh/id_rsa" + add_keys_to_agent: true + state: present - name: Delete a host from the configuration community.general.ssh_config: @@ -180,22 +198,27 @@ hosts_change_diff: description: A list of host diff changes. returned: on change type: list - sample: [ - { - "example.com": { - "new": { - "hostname": "github.com", - "identityfile": ["/tmp/test_ssh_config/fake_id_rsa"], - "port": "2224" - }, - "old": { - "hostname": "github.com", - "identityfile": ["/tmp/test_ssh_config/fake_id_rsa"], - "port": "2224" + sample: + [ + { + "example.com": { + "new": { + "hostname": "github.com", + "identityfile": [ + "/tmp/test_ssh_config/fake_id_rsa" + ], + "port": "2224" + }, + "old": { + "hostname": "github.com", + "identityfile": [ + "/tmp/test_ssh_config/fake_id_rsa" + ], + "port": "2224" + } } } - } - ] + ] """ import os @@ -204,6 +227,7 @@ from copy import deepcopy from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.six import string_types from ansible_collections.community.general.plugins.module_utils._stormssh import ConfigParser, HAS_PARAMIKO, PARAMIKO_IMPORT_ERROR from ansible_collections.community.general.plugins.module_utils.ssh import determine_config_file @@ -274,6 +298,17 @@ class SSHConfig(object): controlpersist=fix_bool_str(self.params.get('controlpersist')), dynamicforward=self.params.get('dynamicforward'), ) + if self.params.get('other_options'): + for key, value in self.params.get('other_options').items(): + if key.lower() != key: + self.module.fail_json(msg="The other_options key {key!r} must be lower case".format(key=key)) + if key not in args: + if not isinstance(value, string_types): + self.module.fail_json(msg="The other_options value provided for key {key!r} must be a string, got {type}".format(key=key, + type=type(value))) + args[key] = value + else: + self.module.fail_json(msg="Multiple values provided for key {key!r}".format(key=key)) config_changed = False hosts_changed = [] @@ -355,31 +390,28 @@ class SSHConfig(object): def main(): module = AnsibleModule( argument_spec=dict( - group=dict(default=None, type='str'), + group=dict(type='str'), host=dict(type='str', required=True), hostname=dict(type='str'), host_key_algorithms=dict(type='str', no_log=False), identity_file=dict(type='path'), identities_only=dict(type='bool'), + other_options=dict(type='dict'), port=dict(type='str'), - proxycommand=dict(type='str', default=None), - proxyjump=dict(type='str', default=None), + proxycommand=dict(type='str'), + proxyjump=dict(type='str'), forward_agent=dict(type='bool'), add_keys_to_agent=dict(type='bool'), remote_user=dict(type='str'), - ssh_config_file=dict(default=None, type='path'), + ssh_config_file=dict(type='path'), state=dict(type='str', default='present', choices=['present', 'absent']), - strict_host_key_checking=dict( - type='str', - default=None, - choices=['yes', 'no', 'ask', 'accept-new'], - ), - controlmaster=dict(type='str', default=None, choices=['yes', 'no', 'ask', 'auto', 'autoask']), - controlpath=dict(type='str', default=None), - controlpersist=dict(type='str', default=None), + strict_host_key_checking=dict(type='str', choices=['yes', 'no', 'ask', 'accept-new']), + controlmaster=dict(type='str', choices=['yes', 'no', 'ask', 'auto', 'autoask']), + controlpath=dict(type='str'), + controlpersist=dict(type='str'), dynamicforward=dict(type='str'), - user=dict(default=None, type='str'), - user_known_hosts_file=dict(type='str', default=None), + user=dict(type='str'), + user_known_hosts_file=dict(type='str'), ), supports_check_mode=True, mutually_exclusive=[ diff --git a/plugins/modules/stacki_host.py b/plugins/modules/stacki_host.py index 4d2a999317..095e0b7256 100644 --- a/plugins/modules/stacki_host.py +++ b/plugins/modules/stacki_host.py @@ -29,12 +29,14 @@ options: type: str stacki_user: description: - - Username for authenticating with Stacki API, but if not specified, the environment variable E(stacki_user) is used instead. + - Username for authenticating with Stacki API, but if not specified, the environment variable E(stacki_user) is used + instead. required: true type: str stacki_password: description: - - Password for authenticating with Stacki API, but if not specified, the environment variable E(stacki_password) is used instead. + - Password for authenticating with Stacki API, but if not specified, the environment variable E(stacki_password) is + used instead. required: true type: str stacki_endpoint: @@ -117,25 +119,6 @@ EXAMPLES = r""" state: absent """ -RETURN = r""" -changed: - description: Response to whether or not the API call completed successfully. - returned: always - type: bool - sample: true - -stdout: - description: The set of responses from the commands. - returned: always - type: list - sample: ['...', '...'] - -stdout_lines: - description: The value of stdout split into a list. - returned: always - type: list - sample: [['...', '...'], ['...'], ['...']] -""" import json diff --git a/plugins/modules/statusio_maintenance.py b/plugins/modules/statusio_maintenance.py index 6f17523e25..c823a286c5 100644 --- a/plugins/modules/statusio_maintenance.py +++ b/plugins/modules/statusio_maintenance.py @@ -107,11 +107,11 @@ options: maintenance_id: type: str description: - - The maintenance id number when deleting a maintenance window. + - The maintenance ID number when deleting a maintenance window. minutes: type: int description: - - The length of time in UTC that the maintenance will run (starting from playbook runtime). + - The duration of the maintenance window (starting from playbook runtime). default: 10 start_date: type: str @@ -344,30 +344,22 @@ def main(): api_id=dict(required=True), api_key=dict(required=True, no_log=True), statuspage=dict(required=True), - state=dict(required=False, default='present', - choices=['present', 'absent']), - url=dict(default='https://api.status.io', required=False), - components=dict(type='list', elements='str', required=False, default=None, - aliases=['component']), - containers=dict(type='list', elements='str', required=False, default=None, - aliases=['container']), - all_infrastructure_affected=dict(type='bool', default=False, - required=False), - automation=dict(type='bool', default=False, required=False), - title=dict(required=False, default='A new maintenance window'), - desc=dict(required=False, default='Created by Ansible'), - minutes=dict(type='int', required=False, default=10), - maintenance_notify_now=dict(type='bool', default=False, - required=False), - maintenance_notify_72_hr=dict(type='bool', default=False, - required=False), - maintenance_notify_24_hr=dict(type='bool', default=False, - required=False), - maintenance_notify_1_hr=dict(type='bool', default=False, - required=False), - maintenance_id=dict(required=False, default=None), - start_date=dict(default=None, required=False), - start_time=dict(default=None, required=False) + state=dict(default='present', choices=['present', 'absent']), + url=dict(default='https://api.status.io'), + components=dict(type='list', elements='str', aliases=['component']), + containers=dict(type='list', elements='str', aliases=['container']), + all_infrastructure_affected=dict(type='bool', default=False), + automation=dict(type='bool', default=False), + title=dict(default='A new maintenance window'), + desc=dict(default='Created by Ansible'), + minutes=dict(type='int', default=10), + maintenance_notify_now=dict(type='bool', default=False), + maintenance_notify_72_hr=dict(type='bool', default=False), + maintenance_notify_24_hr=dict(type='bool', default=False), + maintenance_notify_1_hr=dict(type='bool', default=False), + maintenance_id=dict(), + start_date=dict(), + start_time=dict() ), supports_check_mode=True, ) diff --git a/plugins/modules/sudoers.py b/plugins/modules/sudoers.py index 54a52faad4..f353859a98 100644 --- a/plugins/modules/sudoers.py +++ b/plugins/modules/sudoers.py @@ -42,7 +42,7 @@ options: required: true description: - The name of the sudoers rule. - - This will be used for the filename for the sudoers file managed by this rule. + - This is used for the filename for the sudoers file managed by this rule. type: str noexec: description: @@ -52,7 +52,7 @@ options: version_added: 8.4.0 nopassword: description: - - Whether a password is required when command is run with sudo. + - Whether a password is not required when command is run with sudo. default: true type: bool setenv: @@ -69,12 +69,12 @@ options: version_added: 6.2.0 runas: description: - - Specify the target user the command(s) will run as. + - Specify the target user the command(s) runs as. type: str version_added: 4.7.0 sudoers_path: description: - - The path which sudoers config files will be managed in. + - The path which sudoers config files are managed in. default: /etc/sudoers.d type: str state: @@ -92,9 +92,9 @@ options: type: str validation: description: - - If V(absent), the sudoers rule will be added without validation. - - If V(detect) and visudo is available, then the sudoers rule will be validated by visudo. - - If V(required), visudo must be available to validate the sudoers rule. + - If V(absent), the sudoers rule is added without validation. + - If V(detect) and C(visudo) is available, then the sudoers rule is validated by C(visudo). + - If V(required), C(visudo) must be available to validate the sudoers rule. type: str default: detect choices: [absent, detect, required] @@ -246,7 +246,7 @@ class Sudoers(object): rc, stdout, stderr = self.module.run_command(check_command, data=self.content()) if rc != 0: - raise Exception('Failed to validate sudoers rule:\n{stdout}'.format(stdout=stdout)) + self.module.fail_json(msg='Failed to validate sudoers rule:\n{stdout}'.format(stdout=stdout or stderr), stdout=stdout, stderr=stderr) def run(self): if self.state == 'absent': diff --git a/plugins/modules/supervisorctl.py b/plugins/modules/supervisorctl.py index e94a2f770b..c2ceb1a52b 100644 --- a/plugins/modules/supervisorctl.py +++ b/plugins/modules/supervisorctl.py @@ -11,7 +11,7 @@ __metaclass__ = type DOCUMENTATION = r""" module: supervisorctl -short_description: Manage the state of a program or group of programs managed by C(supervisord). +short_description: Manage the state of a program or group of programs managed by C(supervisord) description: - Manage the state of a program or group of programs managed by C(supervisord). extends_documentation_fragment: @@ -26,8 +26,8 @@ options: type: str description: - The name of the supervisord program or group to manage. - - The name will be taken as group name when it ends with a colon V(:). - - If O(name=all), all programs and program groups will be managed. + - The name is taken as group name when it ends with a colon V(:). + - If O(name=all), all programs and program groups are managed. required: true config: type: path @@ -67,10 +67,11 @@ options: description: - Path to C(supervisorctl) executable. notes: - - When O(state=present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. - - When O(state=restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). - - When O(state=absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group. If the - program/group is still running, the action will fail. If you want to stop the program/group before removing, use O(stop_before_removing=true). + - When O(state=present), the module calls C(supervisorctl reread) then C(supervisorctl add) if the program/group does not + exist. + - When O(state=restarted), the module calls C(supervisorctl update) then calls C(supervisorctl restart). + - When O(state=absent), the module calls C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group. + If the program/group is still running, the action fails. If you want to stop the program/group before removing, use O(stop_before_removing=true). requirements: ["supervisorctl"] author: - "Matt Wright (@mattupstate)" diff --git a/plugins/modules/svc.py b/plugins/modules/svc.py index 110b61856a..4a6e21ef5f 100644 --- a/plugins/modules/svc.py +++ b/plugins/modules/svc.py @@ -30,20 +30,21 @@ options: required: true state: description: - - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. - - V(restarted) will always bounce the svc (svc -t) and V(killed) will always bounce the svc (svc -k). - - V(reloaded) will send a sigusr1 (svc -1). - - V(once) will run a normally downed svc once (svc -o), not really an idempotent operation. + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the svc (svc -t) and V(killed) always bounces the svc (svc -k). + - V(reloaded) sends a sigusr1 (svc -1). + - V(once) runs a normally downed svc once (svc -o), not really an idempotent operation. type: str choices: [killed, once, reloaded, restarted, started, stopped] downed: description: - - Should a C(down) file exist or not, if it exists it disables auto startup. Defaults to V(false). Downed does not imply stopped. + - Should a C(down) file exist or not, if it exists it disables auto startup. Defaults to V(false). Downed does not imply + stopped. type: bool enabled: description: - - Whether the service is enabled or not, if disabled it also implies O(state=stopped). Take note that a service can be enabled and downed (no auto - restart). + - Whether the service is enabled or not, if disabled it also implies O(state=stopped). Take note that a service can + be enabled and downed (no auto restart). type: bool service_dir: description: diff --git a/plugins/modules/svr4pkg.py b/plugins/modules/svr4pkg.py index 926dbad448..76d65c8f43 100644 --- a/plugins/modules/svr4pkg.py +++ b/plugins/modules/svr4pkg.py @@ -16,7 +16,7 @@ short_description: Manage Solaris SVR4 packages description: - Manages SVR4 packages on Solaris 10 and 11. - These were the native packages on Solaris <= 10 and are available as a legacy feature in Solaris 11. - - Note that this is a very basic packaging system. It will not enforce dependencies on install or remove. + - Note that this is a very basic packaging system. It does not enforce dependencies on install or remove. author: "Boyd Adamson (@brontitall)" extends_documentation_fragment: - community.general.attributes @@ -36,7 +36,8 @@ options: description: - Whether to install (V(present)), or remove (V(absent)) a package. - If the package is to be installed, then O(src) is required. - - The SVR4 package system does not provide an upgrade operation. You need to uninstall the old, then install the new package. + - The SVR4 package system does not provide an upgrade operation. You need to uninstall the old, then install the new + package. required: true choices: ["present", "absent"] type: str @@ -44,9 +45,10 @@ options: src: description: - Specifies the location to install the package from. Required when O(state=present). - - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. For example: V(somefile.pkg), V(/dir/with/pkgs), V(http://server/mypkgs.pkg)." - - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them - there. + - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. For example: V(somefile.pkg), V(/dir/with/pkgs), + V(http://server/mypkgs.pkg)." + - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module + for a way to get them there. type: str proxy: description: @@ -190,10 +192,10 @@ def main(): argument_spec=dict( name=dict(required=True), state=dict(required=True, choices=['present', 'absent']), - src=dict(default=None), - proxy=dict(default=None), - response_file=dict(default=None), - zone=dict(required=False, default='all', choices=['current', 'all']), + src=dict(), + proxy=dict(), + response_file=dict(), + zone=dict(default='all', choices=['current', 'all']), category=dict(default=False, type='bool') ), supports_check_mode=True diff --git a/plugins/modules/swdepot.py b/plugins/modules/swdepot.py index 628c63f810..69ed726aa0 100644 --- a/plugins/modules/swdepot.py +++ b/plugins/modules/swdepot.py @@ -16,7 +16,7 @@ DOCUMENTATION = r""" module: swdepot short_description: Manage packages with swdepot package manager (HP-UX) description: - - Will install, upgrade and remove packages with swdepot package manager (HP-UX). + - Installs, upgrades, and removes packages with C(swdepot) package manager (HP-UX). notes: [] author: "Raul Melo (@melodous)" extends_documentation_fragment: @@ -134,7 +134,7 @@ def main(): argument_spec=dict( name=dict(aliases=['pkg'], required=True), state=dict(choices=['present', 'absent', 'latest'], required=True), - depot=dict(default=None, required=False) + depot=dict() ), supports_check_mode=True ) diff --git a/plugins/modules/swupd.py b/plugins/modules/swupd.py index 14c381bc5a..c01904821c 100644 --- a/plugins/modules/swupd.py +++ b/plugins/modules/swupd.py @@ -35,8 +35,8 @@ options: type: str manifest: description: - - The manifest contains information about the bundles at certain version of the OS. Specify a Manifest version to verify against that version - or leave unspecified to verify against the current version. + - The manifest contains information about the bundles at certain version of the OS. Specify a Manifest version to verify + against that version or leave unspecified to verify against the current version. aliases: [release, version] type: int name: @@ -46,7 +46,8 @@ options: type: str state: description: - - Indicates the desired (I)bundle state. V(present) ensures the bundle is installed while V(absent) ensures the (I)bundle is not installed. + - Indicates the desired (I)bundle state. V(present) ensures the bundle is installed while V(absent) ensures the (I)bundle + is not installed. default: present choices: [present, absent] type: str @@ -95,16 +96,6 @@ EXAMPLES = r""" manifest: 12920 """ -RETURN = r""" -stdout: - description: C(stdout) of C(swupd). - returned: always - type: str -stderr: - description: C(stderr) of C(swupd). - returned: always - type: str -""" import os from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/syslogger.py b/plugins/modules/syslogger.py index ca9aebfcfc..7f7dfabd38 100644 --- a/plugins/modules/syslogger.py +++ b/plugins/modules/syslogger.py @@ -35,8 +35,25 @@ options: type: str description: - Set the log facility. - choices: ["kern", "user", "mail", "daemon", "auth", "lpr", "news", "uucp", "cron", "syslog", "local0", "local1", "local2", "local3", "local4", - "local5", "local6", "local7"] + choices: + - kern + - user + - mail + - daemon + - auth + - lpr + - news + - uucp + - cron + - syslog + - local0 + - local1 + - local2 + - local3 + - local4 + - local5 + - local6 + - local7 default: "daemon" log_pid: description: @@ -150,17 +167,17 @@ def main(): module_args = dict( ident=dict(type='str', default='ansible_syslogger'), msg=dict(type='str', required=True), - priority=dict(type='str', required=False, + priority=dict(type='str', choices=["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"], default='info'), - facility=dict(type='str', required=False, + facility=dict(type='str', choices=["kern", "user", "mail", "daemon", "auth", "lpr", "news", "uucp", "cron", "syslog", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"], default='daemon'), - log_pid=dict(type='bool', required=False, default=False) + log_pid=dict(type='bool', default=False) ) module = AnsibleModule( diff --git a/plugins/modules/syspatch.py b/plugins/modules/syspatch.py index 3cedc220f7..f46671fa74 100644 --- a/plugins/modules/syspatch.py +++ b/plugins/modules/syspatch.py @@ -59,20 +59,6 @@ EXAMPLES = r""" """ RETURN = r""" -rc: - description: The command return code (0 means success). - returned: always - type: int -stdout: - description: C(syspatch) standard output. - returned: always - type: str - sample: "001_rip6cksum" -stderr: - description: C(syspatch) standard error. - returned: always - type: str - sample: "syspatch: need root privileges" reboot_needed: description: Whether or not a reboot is required after an update. returned: always @@ -103,7 +89,6 @@ def syspatch_run(module): cmd = module.get_bin_path('syspatch', True) changed = False reboot_needed = False - warnings = [] # Set safe defaults for run_flag and check_flag run_flag = ['-c'] @@ -145,11 +130,11 @@ def syspatch_run(module): # Kernel update applied reboot_needed = True elif out.lower().find('syspatch updated itself') >= 0: - warnings.append('Syspatch was updated. Please run syspatch again.') + module.warn('Syspatch was updated. Please run syspatch again.') # If no stdout, then warn user if len(out) == 0: - warnings.append('syspatch had suggested changes, but stdout was empty.') + module.warn('syspatch had suggested changes, but stdout was empty.') changed = True else: @@ -161,7 +146,6 @@ def syspatch_run(module): rc=rc, stderr=err, stdout=out, - warnings=warnings ) diff --git a/plugins/modules/sysrc.py b/plugins/modules/sysrc.py index d93bccd620..a3e24830a0 100644 --- a/plugins/modules/sysrc.py +++ b/plugins/modules/sysrc.py @@ -7,6 +7,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r""" @@ -94,163 +95,122 @@ EXAMPLES = r""" jail: testjail """ -RETURN = r""" -changed: - description: Return changed for sysrc actions. - returned: always - type: bool - sample: true -""" -from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + +import os import re -class Sysrc(object): - def __init__(self, module, name, value, path, delim, jail): - self.module = module - self.name = name - self.changed = False - self.value = value - self.path = path - self.delim = delim - self.jail = jail - self.sysrc = module.get_bin_path('sysrc', True) - - def has_unknown_variable(self, out, err): - # newer versions of sysrc use stderr instead of stdout - return err.find("unknown variable") > 0 or out.find("unknown variable") > 0 - - def exists(self): - # sysrc doesn't really use exit codes - (rc, out, err) = self.run_sysrc(self.name) - if self.value is None: - regex = "%s: " % re.escape(self.name) - else: - regex = "%s: %s$" % (re.escape(self.name), re.escape(self.value)) - - return not self.has_unknown_variable(out, err) and re.match(regex, out) is not None - - def contains(self): - (rc, out, err) = self.run_sysrc('-n', self.name) - if self.has_unknown_variable(out, err): - return False - - return self.value in out.strip().split(self.delim) - - def present(self): - if self.exists(): - return - - if self.module.check_mode: - self.changed = True - return - - (rc, out, err) = self.run_sysrc("%s=%s" % (self.name, self.value)) - if out.find("%s:" % self.name) == 0 and re.search("-> %s$" % re.escape(self.value), out) is not None: - self.changed = True - - def absent(self): - if not self.exists(): - return - - # inversed since we still need to mark as changed - if not self.module.check_mode: - (rc, out, err) = self.run_sysrc('-x', self.name) - if self.has_unknown_variable(out, err): - return - - self.changed = True - - def value_present(self): - if self.contains(): - return - - if self.module.check_mode: - self.changed = True - return - - setstring = '%s+=%s%s' % (self.name, self.delim, self.value) - (rc, out, err) = self.run_sysrc(setstring) - if out.find("%s:" % self.name) == 0: - values = out.split(' -> ')[1].strip().split(self.delim) - if self.value in values: - self.changed = True - - def value_absent(self): - if not self.contains(): - return - - if self.module.check_mode: - self.changed = True - return - - setstring = '%s-=%s%s' % (self.name, self.delim, self.value) - (rc, out, err) = self.run_sysrc(setstring) - if out.find("%s:" % self.name) == 0: - values = out.split(' -> ')[1].strip().split(self.delim) - if self.value not in values: - self.changed = True - - def run_sysrc(self, *args): - cmd = [self.sysrc, '-f', self.path] - if self.jail: - cmd += ['-j', self.jail] - cmd.extend(args) - - (rc, out, err) = self.module.run_command(cmd) - - return (rc, out, err) - - -def main(): - module = AnsibleModule( +class Sysrc(StateModuleHelper): + module = dict( argument_spec=dict( name=dict(type='str', required=True), - value=dict(type='str', default=None), + value=dict(type='str'), state=dict(type='str', default='present', choices=['absent', 'present', 'value_present', 'value_absent']), path=dict(type='str', default='/etc/rc.conf'), delim=dict(type='str', default=' '), - jail=dict(type='str', default=None), + jail=dict(type='str') ), - supports_check_mode=True, + supports_check_mode=True ) + output_params = ('value',) + use_old_vardict = False - name = module.params.pop('name') - # OID style names are not supported - if not re.match('^[a-zA-Z0-9_]+$', name): - module.fail_json( - msg="Name may only contain alphanumeric and underscore characters" - ) + def __init_module__(self): + # OID style names are not supported + if not re.match(r'^\w+$', self.vars.name, re.ASCII): + self.module.fail_json(msg="Name may only contain alpha-numeric and underscore characters") - value = module.params.pop('value') - state = module.params.pop('state') - path = module.params.pop('path') - delim = module.params.pop('delim') - jail = module.params.pop('jail') - result = dict( - name=name, - state=state, - value=value, - path=path, - delim=delim, - jail=jail - ) + self.sysrc = self.module.get_bin_path('sysrc', True) - rc_value = Sysrc(module, name, value, path, delim, jail) + def _contains(self): + value = self._get() + if value is None: + return False, None - if state == 'present': - rc_value.present() - elif state == 'absent': - rc_value.absent() - elif state == 'value_present': - rc_value.value_present() - elif state == 'value_absent': - rc_value.value_absent() + value = value.split(self.vars.delim) - result['changed'] = rc_value.changed + return self.vars.value in value, value - module.exit_json(**result) + def _get(self): + if not os.path.exists(self.vars.path): + return None + + (rc, out, err) = self._sysrc('-v', '-n', self.vars.name) + if "unknown variable" in err or "unknown variable" in out: + # Prior to FreeBSD 11.1 sysrc would write "unknown variable" to stdout and not stderr + # https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=229806 + return None + + if out.startswith(self.vars.path): + return out.split(':', 1)[1].strip() + + return None + + def _modify(self, op, changed): + (rc, out, err) = self._sysrc("%s%s=%s%s" % (self.vars.name, op, self.vars.delim, self.vars.value)) + if out.startswith("%s:" % self.vars.name): + return changed(out.split(' -> ')[1].strip().split(self.vars.delim)) + + return False + + def _sysrc(self, *args): + cmd = [self.sysrc, '-f', self.vars.path] + if self.vars.jail: + cmd += ['-j', self.vars.jail] + cmd.extend(args) + + (rc, out, err) = self.module.run_command(cmd) + if "Permission denied" in err: + self.module.fail_json(msg="Permission denied for %s" % self.vars.path) + + return rc, out, err + + def state_absent(self): + if self._get() is None: + return + + if not self.check_mode: + self._sysrc('-x', self.vars.name) + + self.changed = True + + def state_present(self): + value = self._get() + if value == self.vars.value: + return + + if self.vars.value is None: + self.vars.set('value', value) + return + + if not self.check_mode: + self._sysrc("%s=%s" % (self.vars.name, self.vars.value)) + + self.changed = True + + def state_value_absent(self): + (contains, _unused) = self._contains() + if not contains: + return + + self.changed = self.check_mode or self._modify('-', lambda values: self.vars.value not in values) + + def state_value_present(self): + (contains, value) = self._contains() + if contains: + return + + if self.vars.value is None: + self.vars.set('value', value) + return + + self.changed = self.check_mode or self._modify('+', lambda values: self.vars.value in values) + + +def main(): + Sysrc.execute() if __name__ == '__main__': diff --git a/plugins/modules/systemd_creds_decrypt.py b/plugins/modules/systemd_creds_decrypt.py index a6c7126b2e..c896737a93 100644 --- a/plugins/modules/systemd_creds_decrypt.py +++ b/plugins/modules/systemd_creds_decrypt.py @@ -13,9 +13,9 @@ DOCUMENTATION = r""" module: systemd_creds_decrypt short_description: C(systemd)'s C(systemd-creds decrypt) plugin description: - - This module decrypts input using C(systemd)'s C(systemd-creds decrypt). + - This module decrypts input using C(systemd)'s C(systemd-creds decrypt). author: - - Thomas Sjögren (@konstruktoid) + - Thomas Sjögren (@konstruktoid) version_added: '10.2.0' extends_documentation_fragment: - community.general.attributes @@ -29,50 +29,46 @@ attributes: details: - This action does not modify state. options: - name: - description: - - The credential name to validate the embedded credential name. - type: str - required: false - newline: - description: - - Whether to add a trailing newline character to the end of the output, - if not present. - type: bool - required: false - default: false - secret: - description: - - The secret to decrypt. - type: str - required: true - timestamp: - description: - - The timestamp to use to validate the V(not-after) timestamp that - was used during encryption. - - Takes a timestamp specification in the format described in - V(systemd.time(7\)). - type: str - required: false - transcode: - description: - - Whether to transcode the output before returning it. - type: str - choices: [ base64, unbase64, hex, unhex ] - required: false - user: - description: - - A user name or numeric UID when decrypting from a specific user context. - - If set to the special string V(self) it sets the user to the user - of the calling process. - - Requires C(systemd) 256 or later. - type: str - required: false + name: + description: + - The credential name to validate the embedded credential name. + type: str + required: false + newline: + description: + - Whether to add a trailing newline character to the end of the output, if not present. + type: bool + required: false + default: false + secret: + description: + - The secret to decrypt. + type: str + required: true + timestamp: + description: + - The timestamp to use to validate the V(not-after) timestamp that was used during encryption. + - Takes a timestamp specification in the format described in V(systemd.time(7\)). + type: str + required: false + transcode: + description: + - Whether to transcode the output before returning it. + type: str + choices: [base64, unbase64, hex, unhex] + required: false + user: + description: + - A user name or numeric UID when decrypting from a specific user context. + - If set to the special string V(self) it sets the user to the user of the calling process. + - Requires C(systemd) 256 or later. + type: str + required: false notes: - C(systemd-creds) requires C(systemd) 250 or later. """ -EXAMPLES = """ +EXAMPLES = r""" - name: Decrypt secret community.general.systemd_creds_decrypt: name: db @@ -86,16 +82,14 @@ EXAMPLES = """ RETURN = r""" value: - description: - - The decrypted secret. - - Note that Ansible only supports returning UTF-8 encoded strings. - If the decrypted secret is binary data, or a string encoded in another - way, use O(transcode=base64) or O(transcode=hex) to circument this - restriction. You then need to decode the data when using it, for - example using the P(ansible.builtin.b64decode#filter) filter. - type: str - returned: always - sample: "access_token" + description: + - The decrypted secret. + - Note that Ansible only supports returning UTF-8 encoded strings. If the decrypted secret is binary data, or a string + encoded in another way, use O(transcode=base64) or O(transcode=hex) to circument this restriction. You then need to + decode the data when using it, for example using the P(ansible.builtin.b64decode#filter) filter. + type: str + returned: always + sample: "access_token" """ @@ -106,16 +100,12 @@ def main(): """Decrypt secret using systemd-creds.""" module = AnsibleModule( argument_spec=dict( - name=dict(type="str", required=False), - newline=dict(type="bool", required=False, default=False), + name=dict(type="str"), + newline=dict(type="bool", default=False), secret=dict(type="str", required=True, no_log=True), - timestamp=dict(type="str", required=False), - transcode=dict( - type="str", - choices=["base64", "unbase64", "hex", "unhex"], - required=False, - ), - user=dict(type="str", required=False), + timestamp=dict(type="str"), + transcode=dict(type="str", choices=["base64", "unbase64", "hex", "unhex"]), + user=dict(type="str"), ), supports_check_mode=True, ) diff --git a/plugins/modules/systemd_creds_encrypt.py b/plugins/modules/systemd_creds_encrypt.py index 07b68f96f2..2c4912427e 100644 --- a/plugins/modules/systemd_creds_encrypt.py +++ b/plugins/modules/systemd_creds_encrypt.py @@ -13,9 +13,9 @@ DOCUMENTATION = r""" module: systemd_creds_encrypt short_description: C(systemd)'s C(systemd-creds encrypt) plugin description: - - This module encrypts input using C(systemd)'s C(systemd-creds encrypt). + - This module encrypts input using C(systemd)'s C(systemd-creds encrypt). author: - - Thomas Sjögren (@konstruktoid) + - Thomas Sjögren (@konstruktoid) version_added: '10.2.0' extends_documentation_fragment: - community.general.attributes @@ -29,50 +29,46 @@ attributes: details: - This action does not modify state. options: - name: - description: - - The credential name to embed in the encrypted credential data. - type: str - required: false - not_after: - description: - - The time when the credential shall not be used anymore. - - Takes a timestamp specification in the format described in - V(systemd.time(7\)). - type: str - required: false - pretty: - description: - - Pretty print the output so that it may be pasted directly into a - unit file. - type: bool - required: false - default: false - secret: - description: - - The secret to encrypt. - type: str - required: true - timestamp: - description: - - The timestamp to embed into the encrypted credential. - - Takes a timestamp specification in the format described in - V(systemd.time(7\)). - type: str - required: false - user: - description: - - A user name or numeric UID to encrypt the credential for. - - If set to the special string V(self) it sets the user to the user - of the calling process. - - Requires C(systemd) 256 or later. - type: str - required: false + name: + description: + - The credential name to embed in the encrypted credential data. + type: str + required: false + not_after: + description: + - The time when the credential shall not be used anymore. + - Takes a timestamp specification in the format described in V(systemd.time(7\)). + type: str + required: false + pretty: + description: + - Pretty print the output so that it may be pasted directly into a unit file. + type: bool + required: false + default: false + secret: + description: + - The secret to encrypt. + type: str + required: true + timestamp: + description: + - The timestamp to embed into the encrypted credential. + - Takes a timestamp specification in the format described in V(systemd.time(7\)). + type: str + required: false + user: + description: + - A user name or numeric UID to encrypt the credential for. + - If set to the special string V(self) it sets the user to the user of the calling process. + - Requires C(systemd) 256 or later. + type: str + required: false notes: - C(systemd-creds) requires C(systemd) 250 or later. """ -EXAMPLES = """ +EXAMPLES = r""" - name: Encrypt secret become: true community.general.systemd_creds_encrypt: @@ -88,10 +84,10 @@ EXAMPLES = """ RETURN = r""" value: - description: The Base64 encoded encrypted secret. - type: str - returned: always - sample: "WhQZht+JQJax1aZemmGLxmAAAA..." + description: The Base64 encoded encrypted secret. + type: str + returned: always + sample: "WhQZht+JQJax1aZemmGLxmAAAA..." """ from ansible.module_utils.basic import AnsibleModule @@ -101,12 +97,12 @@ def main(): """Encrypt secret using systemd-creds.""" module = AnsibleModule( argument_spec=dict( - name=dict(type="str", required=False), - not_after=dict(type="str", required=False), + name=dict(type="str"), + not_after=dict(type="str"), pretty=dict(type="bool", default=False), secret=dict(type="str", required=True, no_log=True), - timestamp=dict(type="str", required=False), - user=dict(type="str", required=False), + timestamp=dict(type="str"), + user=dict(type="str"), ), supports_check_mode=True, ) diff --git a/plugins/modules/systemd_info.py b/plugins/modules/systemd_info.py new file mode 100644 index 0000000000..12f308849c --- /dev/null +++ b/plugins/modules/systemd_info.py @@ -0,0 +1,419 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2025, Marco Noce +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r""" +module: systemd_info +short_description: Gather C(systemd) unit info +description: + - This module gathers info about systemd units (services, targets, sockets, mounts, timers). + - Timer units are supported since community.general 10.5.0. + - It runs C(systemctl list-units) (or processes selected units) and collects properties for each unit using C(systemctl + show). + - In case a unit has multiple properties with the same name, only the value of the first one is collected. + - Even if a unit has a RV(units.loadstate) of V(not-found) or V(masked), it is returned, but only with the minimal properties + (RV(units.name), RV(units.loadstate), RV(units.activestate), RV(units.substate)). + - When O(unitname) and O(extra_properties) are used, the module first checks if the unit exists, then check if properties + exist. If not, the module fails. + - When O(unitname) is used with wildcard expressions, the module checks for units that match the indicated expressions, + if units are not present for all the indicated expressions, the module fails. +version_added: "10.4.0" +options: + unitname: + description: + - List of unit names to process. + - It supports C(.service), C(.target), C(.socket), C(.mount) and C(.timer) units type. + - C(.timer) units are supported since community.general 10.5.0. + - Each name must correspond to the full name of the C(systemd) unit or to a wildcard expression like V('ssh*') and V('*.service'). + - Wildcard expressions in O(unitname) are supported since community.general 10.5.0. + type: list + elements: str + default: [] + extra_properties: + description: + - Additional properties to retrieve (appended to the default ones). + - Note that all property names are converted to lower-case. + type: list + elements: str + default: [] +author: + - Marco Noce (@NomakCooper) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +--- +# Gather info for all systemd services, targets, sockets, mount and timer +- name: Gather all systemd unit info + community.general.systemd_info: + register: results + +# Gather info for selected units with extra properties. +- name: Gather info for selected unit(s) + community.general.systemd_info: + unitname: + - systemd-journald.service + - systemd-journald.socket + - sshd-keygen.target + - -.mount + extra_properties: + - Description + register: results + +# Gather info using wildcards/expression +- name: Gather info of units that start with 'systemd-' + community.general.systemd_info: + unitname: + - 'systemd-*' + register: results + +# Gather info for systemd-tmpfiles-clean.timer with extra properties +- name: Gather info of systemd-tmpfiles-clean.timer and extra AccuracyUSec + community.general.systemd_info: + unitname: + - systemd-tmpfiles-clean.timer + extra_properties: + - AccuracyUSec + register: results +""" + +RETURN = r""" +units: + description: + - Dictionary of systemd unit info keyed by unit name. + - Additional fields are returned depending on the value of O(extra_properties). + returned: success + type: dict + elements: dict + contains: + name: + description: Unit full name. + returned: always + type: str + sample: systemd-journald.service + loadstate: + description: + - The state of the unit's configuration load. + - The most common values are V(loaded), V(not-found), and V(masked), but other values are possible as well. + returned: always + type: str + sample: loaded + activestate: + description: + - The current active state of the unit. + - The most common values are V(active), V(inactive), and V(failed), but other values are possible as well. + returned: always + type: str + sample: active + substate: + description: + - The detailed sub state of the unit. + - The most common values are V(running), V(dead), V(exited), V(failed), V(listening), V(active), and V(mounted), but + other values are possible as well. + returned: always + type: str + sample: running + fragmentpath: + description: Path to the unit's fragment file. + returned: always except for C(.mount) units. + type: str + sample: /usr/lib/systemd/system/systemd-journald.service + unitfilepreset: + description: + - The preset configuration state for the unit file. + - The most common values are V(enabled), V(disabled), and V(static), but other values are possible as well. + returned: always except for C(.mount) units. + type: str + sample: disabled + unitfilestate: + description: + - The actual configuration state for the unit file. + - The most common values are V(enabled), V(disabled), and V(static), but other values are possible as well. + returned: always except for C(.mount) units. + type: str + sample: enabled + mainpid: + description: PID of the main process of the unit. + returned: only for C(.service) units. + type: str + sample: 798 + execmainpid: + description: PID of the ExecStart process of the unit. + returned: only for C(.service) units. + type: str + sample: 799 + options: + description: The mount options. + returned: only for C(.mount) units. + type: str + sample: rw,relatime,noquota + type: + description: The filesystem type of the mounted device. + returned: only for C(.mount) units. + type: str + sample: ext4 + what: + description: The device that is mounted. + returned: only for C(.mount) units. + type: str + sample: /dev/sda1 + where: + description: The mount point where the device is mounted. + returned: only for C(.mount) units. + type: str + sample: / + sample: + { + "-.mount": { + "activestate": "active", + "description": "Root Mount", + "loadstate": "loaded", + "name": "-.mount", + "options": "rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,noquota", + "substate": "mounted", + "type": "xfs", + "what": "/dev/mapper/cs-root", + "where": "/" + }, + "sshd-keygen.target": { + "activestate": "active", + "description": "sshd-keygen.target", + "fragmentpath": "/usr/lib/systemd/system/sshd-keygen.target", + "loadstate": "loaded", + "name": "sshd-keygen.target", + "substate": "active", + "unitfilepreset": "disabled", + "unitfilestate": "static" + }, + "systemd-journald.service": { + "activestate": "active", + "description": "Journal Service", + "execmainpid": "613", + "fragmentpath": "/usr/lib/systemd/system/systemd-journald.service", + "loadstate": "loaded", + "mainpid": "613", + "name": "systemd-journald.service", + "substate": "running", + "unitfilepreset": "disabled", + "unitfilestate": "static" + }, + "systemd-journald.socket": { + "activestate": "active", + "description": "Journal Socket", + "fragmentpath": "/usr/lib/systemd/system/systemd-journald.socket", + "loadstate": "loaded", + "name": "systemd-journald.socket", + "substate": "running", + "unitfilepreset": "disabled", + "unitfilestate": "static" + } + } +""" + +import fnmatch +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.systemd import systemd_runner + + +def get_version(runner): + with runner("version") as ctx: + rc, stdout, stderr = ctx.run() + return stdout.strip() + + +def list_units(runner, types_value): + context = "list_units types all plain no_legend" + with runner(context) as ctx: + rc, stdout, stderr = ctx.run(types=types_value) + return stdout.strip() + + +def show_unit_properties(runner, prop_list, unit): + context = "show props dashdash unit" + with runner(context) as ctx: + rc, stdout, stderr = ctx.run(props=prop_list, unit=unit) + return stdout.strip() + + +def parse_show_output(output): + result = {} + for line in output.splitlines(): + if "=" in line: + key, val = line.split("=", 1) + key = key.lower() + if key not in result: + result[key] = val + return result + + +def get_unit_properties(runner, prop_list, unit): + output = show_unit_properties(runner, prop_list, unit) + return parse_show_output(output) + + +def determine_category(unit): + if unit.endswith('.service'): + return 'service' + elif unit.endswith('.target'): + return 'target' + elif unit.endswith('.socket'): + return 'socket' + elif unit.endswith('.mount'): + return 'mount' + elif unit.endswith('.timer'): + return 'timer' + else: + return None + + +def extract_unit_properties(unit_data, prop_list): + lowerprop = [x.lower() for x in prop_list] + return {prop: unit_data[prop] for prop in lowerprop if prop in unit_data} + + +def unit_exists(unit, units_info): + info = units_info.get(unit, {}) + loadstate = info.get("loadstate", "").lower() + return loadstate not in ("not-found", "masked") + + +def get_category_base_props(category): + base_props = { + 'service': ['FragmentPath', 'UnitFileState', 'UnitFilePreset', 'MainPID', 'ExecMainPID'], + 'target': ['FragmentPath', 'UnitFileState', 'UnitFilePreset'], + 'socket': ['FragmentPath', 'UnitFileState', 'UnitFilePreset'], + 'mount': ['Where', 'What', 'Options', 'Type'], + 'timer': ['FragmentPath', 'UnitFileState', 'UnitFilePreset'], + } + return base_props.get(category, []) + + +def validate_unit_and_properties(runner, unit, extra_properties, units_info, property_cache): + if not unit_exists(unit, units_info): + module.fail_json(msg="Unit '{0}' does not exist or is inaccessible.".format(unit)) + + category = determine_category(unit) + + if not category: + module.fail_json(msg="Could not determine the category for unit '{0}'.".format(unit)) + + state_props = ['LoadState', 'ActiveState', 'SubState'] + props = get_category_base_props(category) + full_props = set(props + state_props + extra_properties) + + if unit not in property_cache: + unit_data = get_unit_properties(runner, full_props, unit) + property_cache[unit] = unit_data + else: + unit_data = property_cache[unit] + if extra_properties: + missing_props = [prop for prop in extra_properties if prop.lower() not in unit_data] + if missing_props: + module.fail_json(msg="The following properties do not exist for unit '{0}': {1}".format(unit, ", ".join(missing_props))) + + return True + + +def process_wildcards(selected_units, all_units, module): + resolved_units = {} + non_matching_patterns = [] + + for pattern in selected_units: + matches = fnmatch.filter(all_units, pattern) + if not matches: + non_matching_patterns.append(pattern) + else: + for match in matches: + resolved_units[match] = True + + if not resolved_units: + module.fail_json(msg="No units match any of the provided patterns: {}".format(", ".join(non_matching_patterns))) + + return resolved_units, non_matching_patterns + + +def process_unit(runner, unit, extra_properties, units_info, property_cache, state_props): + if not unit_exists(unit, units_info): + return units_info.get(unit, {"name": unit, "loadstate": "not-found"}) + + validate_unit_and_properties(runner, unit, extra_properties, units_info, property_cache) + category = determine_category(unit) + + if not category: + module.fail_json(msg="Could not determine the category for unit '{0}'.".format(unit)) + + props = get_category_base_props(category) + full_props = set(props + state_props + extra_properties) + unit_data = property_cache[unit] + fact = {"name": unit} + minimal_keys = ["LoadState", "ActiveState", "SubState"] + fact.update(extract_unit_properties(unit_data, minimal_keys)) + ls = unit_data.get("loadstate", "").lower() + + if ls not in ("not-found", "masked"): + fact.update(extract_unit_properties(unit_data, full_props)) + + return fact + + +def main(): + global module + module_args = dict( + unitname=dict(type='list', elements='str', default=[]), + extra_properties=dict(type='list', elements='str', default=[]) + ) + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + systemctl_bin = module.get_bin_path('systemctl', required=True) + + base_runner = systemd_runner(module, systemctl_bin) + + get_version(base_runner) + + state_props = ['LoadState', 'ActiveState', 'SubState'] + results = {} + + unit_types = ["service", "target", "socket", "mount", "timer"] + + list_output = list_units(base_runner, unit_types) + units_info = {} + for line in list_output.splitlines(): + tokens = line.split() + if len(tokens) < 4: + continue + unit_name = tokens[0] + loadstate = tokens[1] + activestate = tokens[2] + substate = tokens[3] + units_info[unit_name] = { + "name": unit_name, + "loadstate": loadstate, + "activestate": activestate, + "substate": substate, + } + + property_cache = {} + extra_properties = module.params['extra_properties'] + + if module.params['unitname']: + selected_units = module.params['unitname'] + all_units = list(units_info) + resolved_units, non_matching = process_wildcards(selected_units, all_units, module) + units_to_process = sorted(resolved_units) + else: + units_to_process = list(units_info) + + for unit in units_to_process: + results[unit] = process_unit(base_runner, unit, extra_properties, units_info, property_cache, state_props) + module.exit_json(changed=False, units=results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sysupgrade.py b/plugins/modules/sysupgrade.py index 26232cd98d..d247e9d74c 100644 --- a/plugins/modules/sysupgrade.py +++ b/plugins/modules/sysupgrade.py @@ -25,7 +25,7 @@ options: snapshot: description: - Apply the latest snapshot. - - Otherwise release will be applied. + - Otherwise release is applied. default: false type: bool force: @@ -36,13 +36,13 @@ options: keep_files: description: - Keep the files under C(/home/_sysupgrade). - - By default, the files will be deleted after the upgrade. + - By default, the files are deleted after the upgrade. default: false type: bool fetch_only: description: - Fetch and verify files and create C(/bsd.upgrade) but do not reboot. - - Set to V(false) if you want C(sysupgrade) to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples. + - Set to V(false) if you want C(sysupgrade) to reboot. This causes the module to fail. See the examples. default: true type: bool installurl: @@ -78,21 +78,6 @@ EXAMPLES = r""" ignore_errors: true """ -RETURN = r""" -rc: - description: The command return code (0 means success). - returned: always - type: int -stdout: - description: Sysupgrade standard output. - returned: always - type: str -stderr: - description: Sysupgrade standard error. - returned: always - type: str - sample: "sysupgrade: need root privileges" -""" from ansible.module_utils.basic import AnsibleModule @@ -101,7 +86,6 @@ def sysupgrade_run(module): sysupgrade_bin = module.get_bin_path('/usr/sbin/sysupgrade', required=True) cmd = [sysupgrade_bin] changed = False - warnings = [] # Setup command flags if module.params['snapshot']: @@ -137,7 +121,6 @@ def sysupgrade_run(module): rc=rc, stderr=err, stdout=out, - warnings=warnings ) diff --git a/plugins/modules/taiga_issue.py b/plugins/modules/taiga_issue.py index b66db29dba..d7f8824c95 100644 --- a/plugins/modules/taiga_issue.py +++ b/plugins/modules/taiga_issue.py @@ -88,8 +88,8 @@ options: author: Alejandro Guirao (@lekum) requirements: [python-taiga] notes: - - The authentication is achieved either by the environment variable E(TAIGA_TOKEN) or by the pair - of environment variables E(TAIGA_USERNAME) and E(TAIGA_PASSWORD). + - The authentication is achieved either by the environment variable E(TAIGA_TOKEN) or by the pair of environment variables + E(TAIGA_USERNAME) and E(TAIGA_PASSWORD). """ EXAMPLES = r""" @@ -119,7 +119,7 @@ EXAMPLES = r""" state: absent """ -RETURN = """# """ +RETURN = """#""" import traceback from os import getenv @@ -255,18 +255,18 @@ def manage_issue(taiga_host, project_name, issue_subject, issue_priority, def main(): module = AnsibleModule( argument_spec=dict( - taiga_host=dict(type='str', required=False, default="https://api.taiga.io"), + taiga_host=dict(type='str', default="https://api.taiga.io"), project=dict(type='str', required=True), subject=dict(type='str', required=True), issue_type=dict(type='str', required=True), - priority=dict(type='str', required=False, default="Normal"), - status=dict(type='str', required=False, default="New"), - severity=dict(type='str', required=False, default="Normal"), - description=dict(type='str', required=False, default=""), - attachment=dict(type='path', required=False, default=None), - attachment_description=dict(type='str', required=False, default=""), - tags=dict(required=False, default=[], type='list', elements='str'), - state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), + priority=dict(type='str', default="Normal"), + status=dict(type='str', default="New"), + severity=dict(type='str', default="Normal"), + description=dict(type='str', default=""), + attachment=dict(type='path'), + attachment_description=dict(type='str', default=""), + tags=dict(default=[], type='list', elements='str'), + state=dict(type='str', choices=['present', 'absent'], default='present'), ), supports_check_mode=True ) diff --git a/plugins/modules/telegram.py b/plugins/modules/telegram.py index cb2e6df2dd..abaa72c83e 100644 --- a/plugins/modules/telegram.py +++ b/plugins/modules/telegram.py @@ -21,7 +21,7 @@ description: - Send notifications using telegram bot, to a verified group or user. - Also, the user may try to use any other telegram bot API method, if you specify O(api_method) argument. notes: - - You will require a telegram account and create telegram bot to use this module. + - You need a telegram account and create telegram bot to use this module. extends_documentation_fragment: - community.general.attributes attributes: @@ -55,7 +55,7 @@ EXAMPLES = r""" community.general.telegram: token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX' api_args: - chat_id: 000000 + chat_id: "000000" parse_mode: "markdown" text: "Your precious application has been deployed: https://example.com" disable_web_page_preview: true @@ -66,7 +66,7 @@ EXAMPLES = r""" token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX' api_method: forwardMessage api_args: - chat_id: 000000 + chat_id: "000000" from_chat_id: 111111 disable_notification: true message_id: '{{ saved_msg_id }}' diff --git a/plugins/modules/terraform.py b/plugins/modules/terraform.py index 9db63df68d..a5adbcbe7e 100644 --- a/plugins/modules/terraform.py +++ b/plugins/modules/terraform.py @@ -44,65 +44,68 @@ options: - When set, the plugin discovery and auto-download behavior of Terraform is disabled. - The directory structure in the plugin path can be tricky. The Terraform docs U(https://learn.hashicorp.com/tutorials/terraform/automate-terraform#pre-installed-plugins) - show a simple directory of files, but actually, the directory structure has to follow the same structure you would see if Terraform auto-downloaded - the plugins. See the examples below for a tree output of an example plugin directory. + show a simple directory of files, but actually, the directory structure has to follow the same structure you would + see if Terraform auto-downloaded the plugins. See the examples below for a tree output of an example plugin directory. type: list elements: path version_added: 3.0.0 workspace: description: - - The terraform workspace to work with. This sets the E(TF_WORKSPACE) environmental variable that is used to override workspace selection. - For more information about workspaces have a look at U(https://developer.hashicorp.com/terraform/language/state/workspaces). + - The terraform workspace to work with. This sets the E(TF_WORKSPACE) environmental variable that is used to override + workspace selection. For more information about workspaces have a look at U(https://developer.hashicorp.com/terraform/language/state/workspaces). type: str default: default purge_workspace: description: - Only works with state = absent. - - If true, the workspace will be deleted after the "terraform destroy" action. - - The 'default' workspace will not be deleted. + - If V(true), the O(workspace) is deleted after the C(terraform destroy) action. + - If O(workspace=default) then it is not deleted. default: false type: bool plan_file: description: - - The path to an existing Terraform plan file to apply. If this is not specified, Ansible will build a new TF plan and execute it. Note - that this option is required if 'state' has the 'planned' value. + - The path to an existing Terraform plan file to apply. If this is not specified, Ansible builds a new TF plan and execute + it. Note that this option is required if O(state=planned). type: path state_file: description: - - The path to an existing Terraform state file to use when building plan. If this is not specified, the default C(terraform.tfstate) will - be used. + - The path to an existing Terraform state file to use when building plan. If this is not specified, the default C(terraform.tfstate) + is used. - This option is ignored when plan is specified. type: path variables_files: description: - - The path to a variables file for Terraform to fill into the TF configurations. This can accept a list of paths to multiple variables files. + - The path to a variables file for Terraform to fill into the TF configurations. This can accept a list of paths to + multiple variables files. type: list elements: path aliases: ['variables_file'] variables: description: - - A group of key-values pairs to override template variables or those in variables files. By default, only string and number values are - allowed, which are passed on unquoted. - - Support complex variable structures (lists, dictionaries, numbers, and booleans) to reflect terraform variable syntax when O(complex_vars=true). + - A group of key-values pairs to override template variables or those in variables files. By default, only string and + number values are allowed, which are passed on unquoted. + - Support complex variable structures (lists, dictionaries, numbers, and booleans) to reflect terraform variable syntax + when O(complex_vars=true). - Ansible integers or floats are mapped to terraform numbers. - Ansible strings are mapped to terraform strings. - Ansible dictionaries are mapped to terraform objects. - Ansible lists are mapped to terraform lists. - Ansible booleans are mapped to terraform booleans. - - B(Note) passwords passed as variables will be visible in the log output. Make sure to use C(no_log=true) in production!. + - B(Note) passwords passed as variables are visible in the log output. Make sure to use C(no_log=true) in production!. type: dict complex_vars: description: - Enable/disable capability to handle complex variable structures for C(terraform). - - If V(true) the O(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform). Strings that are passed are correctly - quoted. + - If V(true) the O(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform). Strings that + are passed are correctly quoted. - When disabled, supports only simple variables (strings, integers, and floats), and passes them on unquoted. type: bool default: false version_added: 5.7.0 targets: description: - - A list of specific resources to target in this plan/application. The resources selected here will also auto-include any dependencies. + - A list of specific resources to target in this plan/application. The resources selected here are also auto-include + any dependencies. type: list elements: str default: [] @@ -117,8 +120,8 @@ options: type: int force_init: description: - - To avoid duplicating infra, if a state file cannot be found this will force a C(terraform init). Generally, this should be turned off unless - you intend to provision an entirely new Terraform deployment. + - To avoid duplicating infra, if a state file cannot be found this forces a C(terraform init). Generally, this should + be turned off unless you intend to provision an entirely new Terraform deployment. default: false type: bool overwrite_init: @@ -133,8 +136,8 @@ options: type: dict backend_config_files: description: - - The path to a configuration file to provide at init state to the -backend-config parameter. This can accept a list of paths to multiple - configuration files. + - The path to a configuration file to provide at init state to the -backend-config parameter. This can accept a list + of paths to multiple configuration files. type: list elements: path version_added: '0.2.0' @@ -152,8 +155,8 @@ options: version_added: '1.3.0' check_destroy: description: - - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions, but not "destroy and re-create" actions. This - option is ignored when O(state=absent). + - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions, but not "destroy and re-create" + actions. This option is ignored when O(state=absent). type: bool default: false version_added: '3.3.0' @@ -162,6 +165,13 @@ options: - Restrict concurrent operations when Terraform applies the plan. type: int version_added: '3.8.0' + no_color: + description: + - If V(true), suppress color codes in output from Terraform commands. + - If V(false), allows Terraform to use color codes in its output. + type: bool + default: true + version_added: 11.0.0 notes: - To just run a C(terraform plan), use check mode. requirements: ["terraform"] @@ -174,6 +184,12 @@ EXAMPLES = r""" project_path: '{{ project_dir }}' state: present +- name: Deploy with color output enabled + community.general.terraform: + project_path: '{{ project_dir }}' + state: present + no_color: false + - name: Define the backend configuration at init community.general.terraform: project_path: 'project/' @@ -239,7 +255,8 @@ EXAMPLES = r""" RETURN = r""" outputs: type: complex - description: A dictionary of all the TF outputs by their assigned name. Use RV(ignore:outputs.MyOutputName.value) to access the value. + description: A dictionary of all the TF outputs by their assigned name. Use RV(ignore:outputs.MyOutputName.value) to access + the value. returned: on success sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}' contains: @@ -255,14 +272,10 @@ outputs: type: str returned: always description: The value of the output as interpolated by Terraform. -stdout: - type: str - description: Full C(terraform) command stdout, in case you want to display it or examine the event log. - returned: always - sample: '' command: type: str - description: Full C(terraform) command built by this module, in case you want to re-run the command outside the module or debug a problem. + description: Full C(terraform) command built by this module, in case you want to re-run the command outside the module or + debug a problem. returned: always sample: terraform apply ... """ @@ -286,17 +299,20 @@ def get_version(bin_path): return terraform_version -def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None): +def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None, no_color=True): if project_path is None or '/' not in project_path: module.fail_json(msg="Path for Terraform project can not be None or ''.") if not os.path.exists(bin_path): module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path)) if not os.path.isdir(project_path): module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path)) + cmd = [bin_path, 'validate'] + if no_color: + cmd.append('-no-color') if LooseVersion(version) < LooseVersion('0.15.0'): - module.run_command([bin_path, 'validate', '-no-color'] + variables_args, check_rc=True, cwd=project_path) + module.run_command(cmd + variables_args, check_rc=True, cwd=project_path) else: - module.run_command([bin_path, 'validate', '-no-color'], check_rc=True, cwd=project_path) + module.run_command(cmd, check_rc=True, cwd=project_path) def _state_args(state_file): @@ -307,8 +323,10 @@ def _state_args(state_file): return ['-state', state_file] -def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace): - command = [bin_path, 'init', '-input=false', '-no-color'] +def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace, no_color=True): + command = [bin_path, 'init', '-input=false'] + if no_color: + command.append('-no-color') if backend_config: for key, val in backend_config.items(): command.extend([ @@ -328,9 +346,12 @@ def init_plugins(bin_path, project_path, backend_config, backend_config_files, i rc, out, err = module.run_command(command, check_rc=True, cwd=project_path, environ_update={"TF_WORKSPACE": workspace}) -def get_workspace_context(bin_path, project_path): +def get_workspace_context(bin_path, project_path, no_color=True): workspace_ctx = {"current": "default", "all": []} - command = [bin_path, 'workspace', 'list', '-no-color'] + command = [bin_path, 'workspace', 'list'] + if no_color: + command.append('-no-color') + rc, out, err = module.run_command(command, cwd=project_path) if rc != 0: module.warn("Failed to list Terraform workspaces:\n{0}".format(err)) @@ -346,25 +367,27 @@ def get_workspace_context(bin_path, project_path): return workspace_ctx -def _workspace_cmd(bin_path, project_path, action, workspace): - command = [bin_path, 'workspace', action, workspace, '-no-color'] +def _workspace_cmd(bin_path, project_path, action, workspace, no_color=True): + command = [bin_path, 'workspace', action, workspace] + if no_color: + command.append('-no-color') rc, out, err = module.run_command(command, check_rc=True, cwd=project_path) return rc, out, err -def create_workspace(bin_path, project_path, workspace): - _workspace_cmd(bin_path, project_path, 'new', workspace) +def create_workspace(bin_path, project_path, workspace, no_color=True): + _workspace_cmd(bin_path, project_path, 'new', workspace, no_color) -def select_workspace(bin_path, project_path, workspace): - _workspace_cmd(bin_path, project_path, 'select', workspace) +def select_workspace(bin_path, project_path, workspace, no_color=True): + _workspace_cmd(bin_path, project_path, 'select', workspace, no_color) -def remove_workspace(bin_path, project_path, workspace): - _workspace_cmd(bin_path, project_path, 'delete', workspace) +def remove_workspace(bin_path, project_path, workspace, no_color=True): + _workspace_cmd(bin_path, project_path, 'delete', workspace, no_color) -def build_plan(command, project_path, variables_args, state_file, targets, state, args, plan_path=None): +def build_plan(command, project_path, variables_args, state_file, targets, state, args, plan_path=None, no_color=True): if plan_path is None: f, plan_path = tempfile.mkstemp(suffix='.tfplan') @@ -386,7 +409,10 @@ def build_plan(command, project_path, variables_args, state_file, targets, state for a in args: plan_command.append(a) - plan_command.extend(['-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path]) + plan_options = ['-input=false', '-detailed-exitcode', '-out', plan_path] + if no_color: + plan_options.insert(0, '-no-color') + plan_command.extend(plan_options) for t in targets: plan_command.extend(['-target', t]) @@ -490,6 +516,7 @@ def main(): check_destroy=dict(type='bool', default=False), parallelism=dict(type='int'), provider_upgrade=dict(type='bool', default=False), + no_color=dict(type='bool', default=True), ), required_if=[('state', 'planned', ['plan_file'])], supports_check_mode=True, @@ -513,6 +540,7 @@ def main(): overwrite_init = module.params.get('overwrite_init') check_destroy = module.params.get('check_destroy') provider_upgrade = module.params.get('provider_upgrade') + no_color = module.params.get('no_color') if bin_path is not None: command = [bin_path] @@ -522,22 +550,30 @@ def main(): checked_version = get_version(command[0]) if LooseVersion(checked_version) < LooseVersion('0.15.0'): - DESTROY_ARGS = ('destroy', '-no-color', '-force') - APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true') + if no_color: + DESTROY_ARGS = ('destroy', '-no-color', '-force') + APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true') + else: + DESTROY_ARGS = ('destroy', '-force') + APPLY_ARGS = ('apply', '-input=false', '-auto-approve=true') else: - DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve') - APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve') + if no_color: + DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve') + APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve') + else: + DESTROY_ARGS = ('destroy', '-auto-approve') + APPLY_ARGS = ('apply', '-input=false', '-auto-approve') if force_init: if overwrite_init or not os.path.isfile(os.path.join(project_path, ".terraform", "terraform.tfstate")): - init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace) + init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace, no_color) - workspace_ctx = get_workspace_context(command[0], project_path) + workspace_ctx = get_workspace_context(command[0], project_path, no_color) if workspace_ctx["current"] != workspace: if workspace not in workspace_ctx["all"]: - create_workspace(command[0], project_path, workspace) + create_workspace(command[0], project_path, workspace, no_color) else: - select_workspace(command[0], project_path, workspace) + select_workspace(command[0], project_path, workspace, no_color) if state == 'present': command.extend(APPLY_ARGS) @@ -622,7 +658,7 @@ def main(): for f in variables_files: variables_args.extend(['-var-file', f]) - preflight_validation(command[0], project_path, checked_version, variables_args) + preflight_validation(command[0], project_path, checked_version, variables_args, plan_file, no_color) if module.params.get('lock') is not None: if module.params.get('lock'): @@ -649,7 +685,7 @@ def main(): module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file)) else: plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file, - module.params.get('targets'), state, APPLY_ARGS, plan_file) + module.params.get('targets'), state, APPLY_ARGS, plan_file, no_color) if state == 'present' and check_destroy and '- destroy' in out: module.fail_json(msg="Aborting command because it would destroy some resources. " "Consider switching the 'check_destroy' to false to suppress this error") @@ -660,13 +696,13 @@ def main(): if state == 'absent': plan_absent_args = ['-destroy'] plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file, - module.params.get('targets'), state, plan_absent_args, plan_file) + module.params.get('targets'), state, plan_absent_args, plan_file, no_color) diff_command = [command[0], 'show', '-json', plan_file] rc, diff_output, err = module.run_command(diff_command, check_rc=False, cwd=project_path) changed, result_diff = get_diff(diff_output) if rc != 0: if workspace_ctx["current"] != workspace: - select_workspace(command[0], project_path, workspace_ctx["current"]) + select_workspace(command[0], project_path, workspace_ctx["current"], no_color) module.fail_json(msg=err.rstrip(), rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err, stderr_lines=err.splitlines(), @@ -676,7 +712,7 @@ def main(): rc, out, err = module.run_command(command, check_rc=False, cwd=project_path) if rc != 0: if workspace_ctx["current"] != workspace: - select_workspace(command[0], project_path, workspace_ctx["current"]) + select_workspace(command[0], project_path, workspace_ctx["current"], no_color) module.fail_json(msg=err.rstrip(), rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err, stderr_lines=err.splitlines(), @@ -685,7 +721,11 @@ def main(): if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out: changed = True - outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file) + if no_color: + outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file) + else: + outputs_command = [command[0], 'output', '-json'] + _state_args(state_file) + rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path) outputs = {} if rc == 1: @@ -700,9 +740,9 @@ def main(): # Restore the Terraform workspace found when running the module if workspace_ctx["current"] != workspace: - select_workspace(command[0], project_path, workspace_ctx["current"]) + select_workspace(command[0], project_path, workspace_ctx["current"], no_color) if state == 'absent' and workspace != 'default' and purge_workspace is True: - remove_workspace(command[0], project_path, workspace) + remove_workspace(command[0], project_path, workspace, no_color) result = { 'state': state, diff --git a/plugins/modules/timezone.py b/plugins/modules/timezone.py index 12a0addd33..6e105c0bad 100644 --- a/plugins/modules/timezone.py +++ b/plugins/modules/timezone.py @@ -12,14 +12,14 @@ DOCUMENTATION = r""" module: timezone short_description: Configure timezone setting description: - - This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up the NTP, use - M(ansible.builtin.service) module. + - This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up + the NTP, use M(ansible.builtin.service) module. - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time. - - Several different tools are used depending on the OS/Distribution involved. For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) - or C(/etc/timezone) and C(hwclock). On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified. On - AIX, C(chtz) is used. - - Make sure that the zoneinfo files are installed with the appropriate OS package, like C(tzdata) (usually always installed, when not using - a minimal installation like Alpine Linux). + - Several different tools are used depending on the OS/Distribution involved. For Linux it can use C(timedatectl) or edit + C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock). On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), + for BSD, C(/etc/localtime) is modified. On AIX, C(chtz) is used. + - Make sure that the zoneinfo files are installed with the appropriate OS package, like C(tzdata) (usually always installed, + when not using a minimal installation like Alpine Linux). - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails. extends_documentation_fragment: - community.general.attributes @@ -39,7 +39,8 @@ options: description: - Whether the hardware clock is in UTC or in local timezone. - Default is to keep current setting. - - Note that this option is recommended not to change and may fail to configure, especially on virtual environments such as AWS. + - Note that this option is recommended not to change and may fail to configure, especially on virtual environments such + as AWS. - B(At least one) of O(name) and O(hwclock) are required. - I(Only used on Linux). type: str @@ -48,28 +49,14 @@ options: notes: - On Ubuntu 24.04 the C(util-linux-extra) package is required to provide the C(hwclock) command. - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone. - - On AIX only Olson/tz database timezones are usable (POSIX is not supported). An OS reboot is also required on AIX for the new timezone setting - to take effect. Note that AIX 6.1+ is needed (OS level 61 or newer). + - On AIX only Olson/tz database timezones are usable (POSIX is not supported). An OS reboot is also required on AIX for + the new timezone setting to take effect. Note that AIX 6.1+ is needed (OS level 61 or newer). author: - Shinichi TAMURA (@tmshn) - Jasper Lievisse Adriaanse (@jasperla) - Indrajit Raychaudhuri (@indrajitr) """ -RETURN = r""" -diff: - description: The differences about the given arguments. - returned: success - type: complex - contains: - before: - description: The values before change. - type: dict - after: - description: The values after change. - type: dict -""" - EXAMPLES = r""" - name: Set timezone to Asia/Tokyo become: true @@ -395,7 +382,8 @@ class NosystemdTimezone(Timezone): self.conf_files['name'] = '/etc/sysconfig/clock' self.conf_files['hwclock'] = '/etc/sysconfig/clock' try: - f = open(self.conf_files['name'], 'r') + with open(self.conf_files['name'], 'r') as f: + sysconfig_clock = f.read() except IOError as err: if self._allow_ioerror(err, 'name'): # If the config file doesn't exist detect the distribution and set regexps. @@ -413,8 +401,6 @@ class NosystemdTimezone(Timezone): # The key for timezone might be `ZONE` or `TIMEZONE` # (the former is used in RHEL/CentOS and the latter is used in SUSE linux). # So check the content of /etc/sysconfig/clock and decide which key to use. - sysconfig_clock = f.read() - f.close() if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE): # For SUSE self.regexps['name'] = self.dist_regexps['SuSE'] @@ -447,15 +433,13 @@ class NosystemdTimezone(Timezone): """ # Read the file try: - file = open(filename, 'r') + with open(filename, 'r') as file: + lines = file.readlines() except IOError as err: if self._allow_ioerror(err, key): lines = [] else: self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) - else: - lines = file.readlines() - file.close() # Find the all matched lines matched_indices = [] for i, line in enumerate(lines): @@ -472,18 +456,17 @@ class NosystemdTimezone(Timezone): lines.insert(insert_line, value) # Write the changes try: - file = open(filename, 'w') + with open(filename, 'w') as file: + file.writelines(lines) except IOError: self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename)) - else: - file.writelines(lines) - file.close() self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename)) def _get_value_from_config(self, key, phase): filename = self.conf_files[key] try: - file = open(filename, mode='r') + with open(filename, mode='r') as file: + status = file.read() except IOError as err: if self._allow_ioerror(err, key): if key == 'hwclock': @@ -495,8 +478,6 @@ class NosystemdTimezone(Timezone): else: self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) else: - status = file.read() - file.close() try: value = self.regexps[key].search(status).group(1) except AttributeError: @@ -627,11 +608,11 @@ class SmartOSTimezone(Timezone): """ if key == 'name': try: - f = open('/etc/default/init', 'r') - for line in f: - m = re.match('^TZ=(.*)$', line.strip()) - if m: - return m.groups()[0] + with open('/etc/default/init', 'r') as f: + for line in f: + m = re.match('^TZ=(.*)$', line.strip()) + if m: + return m.groups()[0] except Exception: self.module.fail_json(msg='Failed to read /etc/default/init') else: @@ -810,9 +791,8 @@ class AIXTimezone(Timezone): def __get_timezone(self): """ Return the current value of TZ= in /etc/environment """ try: - f = open('/etc/environment', 'r') - etcenvironment = f.read() - f.close() + with open('/etc/environment', 'r') as f: + etcenvironment = f.read() except Exception: self.module.fail_json(msg='Issue reading contents of /etc/environment') diff --git a/plugins/modules/twilio.py b/plugins/modules/twilio.py index 21cc09a272..4d9dd6ac19 100644 --- a/plugins/modules/twilio.py +++ b/plugins/modules/twilio.py @@ -15,9 +15,10 @@ short_description: Sends a text message to a mobile phone through Twilio description: - Sends a text message to a phone number through the Twilio messaging API. notes: - - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that the module fails. - - Like the other notification modules, this one requires an external dependency to work. In this case, you need a Twilio account with a purchased - or verified phone number to send the text message. + - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that + the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you need a Twilio + account with a purchased or verified phone number to send the text message. extends_documentation_fragment: - community.general.attributes attributes: @@ -149,7 +150,7 @@ def main(): msg=dict(required=True), from_number=dict(required=True), to_numbers=dict(required=True, aliases=['to_number'], type='list', elements='str'), - media_url=dict(default=None, required=False), + media_url=dict(), ), supports_check_mode=True ) diff --git a/plugins/modules/typetalk.py b/plugins/modules/typetalk.py index 6364cdc45b..8728bfb21a 100644 --- a/plugins/modules/typetalk.py +++ b/plugins/modules/typetalk.py @@ -14,6 +14,10 @@ module: typetalk short_description: Send a message to typetalk description: - Send a message to typetalk using typetalk API. +deprecated: + removed_in: 13.0.0 + why: The typetalk service will be discontinued on Dec 2025. See U(https://nulab.com/blog/company-news/typetalk-sunsetting/). + alternative: There is none. extends_documentation_fragment: - community.general.attributes attributes: @@ -35,7 +39,7 @@ options: topic: type: int description: - - Topic id to post message. + - Topic ID to post message. required: true msg: type: str diff --git a/plugins/modules/udm_dns_record.py b/plugins/modules/udm_dns_record.py index c5bab67b97..989757f10c 100644 --- a/plugins/modules/udm_dns_record.py +++ b/plugins/modules/udm_dns_record.py @@ -16,8 +16,8 @@ author: - Tobias Rüetschi (@keachi) short_description: Manage DNS entries on a univention corporate server description: - - This module allows to manage DNS records on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object - or edit it. + - This module allows to manage DNS records on a univention corporate server (UCS). It uses the Python API of the UCS to + create a new object or edit it. requirements: - Univention - ipaddress (for O(type=ptr_record)) @@ -51,8 +51,8 @@ options: type: str required: true description: - - Define the record type. V(host_record) is a A or AAAA record, V(alias) is a CNAME, V(ptr_record) is a PTR record, V(srv_record) is a SRV - record and V(txt_record) is a TXT record. + - Define the record type. V(host_record) is a A or AAAA record, V(alias) is a CNAME, V(ptr_record) is a PTR record, + V(srv_record) is a SRV record and V(txt_record) is a TXT record. - 'The available choices are: V(host_record), V(alias), V(ptr_record), V(srv_record), V(txt_record).' data: type: dict diff --git a/plugins/modules/udm_dns_zone.py b/plugins/modules/udm_dns_zone.py index 7aa5711a9b..7075572e73 100644 --- a/plugins/modules/udm_dns_zone.py +++ b/plugins/modules/udm_dns_zone.py @@ -16,8 +16,8 @@ author: - Tobias Rüetschi (@keachi) short_description: Manage DNS zones on a univention corporate server description: - - This module allows to manage DNS zones on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object or - edit it. + - This module allows to manage DNS zones on a univention corporate server (UCS). It uses the Python API of the UCS to create + a new object or edit it. extends_documentation_fragment: - community.general.attributes attributes: @@ -102,7 +102,7 @@ EXAMPLES = r""" """ -RETURN = """# """ +RETURN = """#""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.univention_umc import ( diff --git a/plugins/modules/udm_group.py b/plugins/modules/udm_group.py index 238b0182ed..b8cb70d4dd 100644 --- a/plugins/modules/udm_group.py +++ b/plugins/modules/udm_group.py @@ -16,8 +16,8 @@ author: - Tobias Rüetschi (@keachi) short_description: Manage of the posix group description: - - This module allows to manage user groups on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object - or edit it. + - This module allows to manage user groups on a univention corporate server (UCS). It uses the Python API of the UCS to + create a new object or edit it. extends_documentation_fragment: - community.general.attributes attributes: @@ -85,7 +85,7 @@ EXAMPLES = r""" """ -RETURN = """# """ +RETURN = """#""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.univention_umc import ( diff --git a/plugins/modules/udm_share.py b/plugins/modules/udm_share.py index 689181d83e..d0554375db 100644 --- a/plugins/modules/udm_share.py +++ b/plugins/modules/udm_share.py @@ -16,8 +16,8 @@ author: - Tobias Rüetschi (@keachi) short_description: Manage samba shares on a univention corporate server description: - - This module allows to manage samba shares on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object - or edit it. + - This module allows to manage samba shares on a univention corporate server (UCS). It uses the Python API of the UCS to + create a new object or edit it. extends_documentation_fragment: - community.general.attributes attributes: @@ -340,7 +340,7 @@ EXAMPLES = r""" """ -RETURN = """# """ +RETURN = """#""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.univention_umc import ( diff --git a/plugins/modules/udm_user.py b/plugins/modules/udm_user.py index bb431ca75f..46f6e696b2 100644 --- a/plugins/modules/udm_user.py +++ b/plugins/modules/udm_user.py @@ -16,11 +16,11 @@ author: - Tobias Rüetschi (@keachi) short_description: Manage posix users on a univention corporate server description: - - This module allows to manage posix users on a univention corporate server (UCS). It uses the Python API of the UCS to create a new object - or edit it. + - This module allows to manage posix users on a univention corporate server (UCS). It uses the Python API of the UCS to + create a new object or edit it. notes: - - This module requires the deprecated L(crypt Python module, https://docs.python.org/3.12/library/crypt.html) library which was removed from - Python 3.13. For Python 3.13 or newer, you need to install L(legacycrypt, https://pypi.org/project/legacycrypt/). + - This module requires the deprecated L(crypt Python module, https://docs.python.org/3.12/library/crypt.html) library which + was removed from Python 3.13. For Python 3.13 or newer, you need to install L(legacycrypt, https://pypi.org/project/legacycrypt/). requirements: - legacycrypt (on Python 3.13 or newer) extends_documentation_fragment: @@ -104,7 +104,7 @@ options: groups: default: [] description: - - 'POSIX groups, the LDAP DNs of the groups will be found with the LDAP filter for each group as $GROUP: V((&(objectClass=posixGroup\)(cn=$GROUP\)\)).' + - 'POSIX groups, the LDAP DNs of the groups is found with the LDAP filter for each group as $GROUP: V((&(objectClass=posixGroup\)(cn=$GROUP\)\)).' type: list elements: str home_share: @@ -273,7 +273,8 @@ options: default: always choices: [always, on_create] description: - - V(always) will update passwords if they differ. V(on_create) will only set the password for newly created users. + - V(always) updates passwords if they differ. + - V(on_create) only sets the password for newly created users. type: str ou: default: '' @@ -316,7 +317,7 @@ EXAMPLES = r""" """ -RETURN = """# """ +RETURN = """#""" from datetime import date, timedelta import traceback diff --git a/plugins/modules/ufw.py b/plugins/modules/ufw.py index e0d765eeac..ca4e977f4f 100644 --- a/plugins/modules/ufw.py +++ b/plugins/modules/ufw.py @@ -22,9 +22,9 @@ author: - Ahti Kitsik (@ahtik) notes: - See C(man ufw) for more examples. - - "B(Warning:) Whilst the module itself can be run using concurrent strategies, C(ufw) does not support concurrency, as firewall rules are meant - to be ordered and parallel executions do not guarantee order. B(Do not use concurrency:) The results are unpredictable and the module may - fail silently if you do." + - B(Warning:) Whilst the module itself can be run using concurrent strategies, C(ufw) does not support concurrency, as firewall + rules are meant to be ordered and parallel executions do not guarantee order. B(Do not use concurrency:) The results are + unpredictable and the module may fail silently if you do. requirements: - C(ufw) package extends_documentation_fragment: @@ -69,14 +69,14 @@ options: description: - Allows to interpret the index in O(insert) relative to a position. - V(zero) interprets the rule number as an absolute index (that is, 1 is the first rule). - - V(first-ipv4) interprets the rule number relative to the index of the first IPv4 rule, or relative to the position where the first IPv4 - rule would be if there is currently none. - - V(last-ipv4) interprets the rule number relative to the index of the last IPv4 rule, or relative to the position where the last IPv4 rule - would be if there is currently none. - - V(first-ipv6) interprets the rule number relative to the index of the first IPv6 rule, or relative to the position where the first IPv6 - rule would be if there is currently none. - - V(last-ipv6) interprets the rule number relative to the index of the last IPv6 rule, or relative to the position where the last IPv6 rule - would be if there is currently none. + - V(first-ipv4) interprets the rule number relative to the index of the first IPv4 rule, or relative to the position + where the first IPv4 rule would be if there is currently none. + - V(last-ipv4) interprets the rule number relative to the index of the last IPv4 rule, or relative to the position where + the last IPv4 rule would be if there is currently none. + - V(first-ipv6) interprets the rule number relative to the index of the first IPv6 rule, or relative to the position + where the first IPv6 rule would be if there is currently none. + - V(last-ipv6) interprets the rule number relative to the index of the last IPv6 rule, or relative to the position where + the last IPv6 rule would be if there is currently none. type: str choices: [first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero] default: zero @@ -114,8 +114,9 @@ options: proto: description: - TCP/IP protocol. + - The value V(vrrp) is supported since community.general 10.3.0. type: str - choices: [any, tcp, udp, ipv6, esp, ah, gre, igmp] + choices: [any, tcp, udp, ipv6, esp, ah, gre, igmp, vrrp] aliases: [protocol] name: description: @@ -130,22 +131,22 @@ options: default: false interface: description: - - Specify interface for the rule. The direction (in or out) used for the interface depends on the value of O(direction). See O(interface_in) - and O(interface_out) for routed rules that needs to supply both an input and output interface. Mutually exclusive with O(interface_in) - and O(interface_out). + - Specify interface for the rule. The direction (in or out) used for the interface depends on the value of O(direction). + See O(interface_in) and O(interface_out) for routed rules that needs to supply both an input and output interface. + Mutually exclusive with O(interface_in) and O(interface_out). type: str aliases: [if] interface_in: description: - - Specify input interface for the rule. This is mutually exclusive with O(direction) and O(interface). However, it is compatible with O(interface_out) - for routed rules. + - Specify input interface for the rule. This is mutually exclusive with O(direction) and O(interface). However, it is + compatible with O(interface_out) for routed rules. type: str aliases: [if_in] version_added: '0.2.0' interface_out: description: - - Specify output interface for the rule. This is mutually exclusive with O(direction) and O(interface). However, it is compatible with O(interface_in) - for routed rules. + - Specify output interface for the rule. This is mutually exclusive with O(direction) and O(interface). However, it + is compatible with O(interface_in) for routed rules. type: str aliases: [if_out] version_added: '0.2.0' @@ -341,7 +342,7 @@ def main(): from_port=dict(type='str'), to_ip=dict(type='str', default='any', aliases=['dest', 'to']), to_port=dict(type='str', aliases=['port']), - proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']), + proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp', 'vrrp']), name=dict(type='str', aliases=['app']), comment=dict(type='str'), ), diff --git a/plugins/modules/uptimerobot.py b/plugins/modules/uptimerobot.py index ed6b6431f1..62f92ee0d0 100644 --- a/plugins/modules/uptimerobot.py +++ b/plugins/modules/uptimerobot.py @@ -12,7 +12,7 @@ DOCUMENTATION = r""" module: uptimerobot short_description: Pause and start Uptime Robot monitoring description: - - This module will let you start and pause Uptime Robot Monitoring. + - This module lets you start and pause Uptime Robot Monitoring. author: "Nate Kingsley (@nate-kingsley)" requirements: - Valid Uptime Robot API Key diff --git a/plugins/modules/urpmi.py b/plugins/modules/urpmi.py index 9c08a22c21..454921eaf3 100644 --- a/plugins/modules/urpmi.py +++ b/plugins/modules/urpmi.py @@ -54,7 +54,8 @@ options: default: true root: description: - - Specifies an alternative install root, relative to which all packages will be installed. Corresponds to the C(--root) option for C(urpmi). + - Specifies an alternative install root, relative to which all packages are installed. Corresponds to the C(--root) + option for C(urpmi). aliases: [installroot] type: str author: @@ -183,7 +184,7 @@ def install_packages(module, pkgspec, root, force=True, no_recommends=True): def root_option(root): - if (root): + if root: return "--root=%s" % (root) else: return "" diff --git a/plugins/modules/utm_aaa_group.py b/plugins/modules/utm_aaa_group.py index b29f3d50af..d1444b5bd7 100644 --- a/plugins/modules/utm_aaa_group.py +++ b/plugins/modules/utm_aaa_group.py @@ -28,7 +28,7 @@ attributes: options: name: description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. type: str required: true adirectory_groups: @@ -210,20 +210,20 @@ def main(): module = UTMModule( argument_spec=dict( name=dict(type='str', required=True), - adirectory_groups=dict(type='list', elements='str', required=False, default=[]), - adirectory_groups_sids=dict(type='dict', required=False, default={}), - backend_match=dict(type='str', required=False, default="none", + adirectory_groups=dict(type='list', elements='str', default=[]), + adirectory_groups_sids=dict(type='dict', default={}), + backend_match=dict(type='str', default="none", choices=["none", "adirectory", "edirectory", "radius", "tacacs", "ldap"]), - comment=dict(type='str', required=False, default=""), - dynamic=dict(type='str', required=False, default="none", choices=["none", "ipsec_dn", "directory_groups"]), - edirectory_groups=dict(type='list', elements='str', required=False, default=[]), - ipsec_dn=dict(type='str', required=False, default=""), - ldap_attribute=dict(type='str', required=False, default=""), - ldap_attribute_value=dict(type='str', required=False, default=""), - members=dict(type='list', elements='str', required=False, default=[]), - network=dict(type='str', required=False, default=""), - radius_groups=dict(type='list', elements='str', required=False, default=[]), - tacacs_groups=dict(type='list', elements='str', required=False, default=[]), + comment=dict(type='str', default=""), + dynamic=dict(type='str', default="none", choices=["none", "ipsec_dn", "directory_groups"]), + edirectory_groups=dict(type='list', elements='str', default=[]), + ipsec_dn=dict(type='str', default=""), + ldap_attribute=dict(type='str', default=""), + ldap_attribute_value=dict(type='str', default=""), + members=dict(type='list', elements='str', default=[]), + network=dict(type='str', default=""), + radius_groups=dict(type='list', elements='str', default=[]), + tacacs_groups=dict(type='list', elements='str', default=[]), ) ) try: diff --git a/plugins/modules/utm_aaa_group_info.py b/plugins/modules/utm_aaa_group_info.py index 4f073176f2..ee0d1c1234 100644 --- a/plugins/modules/utm_aaa_group_info.py +++ b/plugins/modules/utm_aaa_group_info.py @@ -29,7 +29,7 @@ options: name: type: str description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true extends_documentation_fragment: @@ -77,7 +77,7 @@ result: description: The comment string. type: str dynamic: - description: Whether the group match is ipsec_dn or directory_group. + description: Whether the group match is V(ipsec_dn) or V(directory_group). type: str edirectory_groups: description: List of eDirectory Groups. diff --git a/plugins/modules/utm_ca_host_key_cert.py b/plugins/modules/utm_ca_host_key_cert.py index b67531c061..1e6fa1c713 100644 --- a/plugins/modules/utm_ca_host_key_cert.py +++ b/plugins/modules/utm_ca_host_key_cert.py @@ -29,7 +29,7 @@ attributes: options: name: description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true type: str ca: @@ -148,9 +148,9 @@ def main(): ca=dict(type='str', required=True), meta=dict(type='str', required=True), certificate=dict(type='str', required=True), - comment=dict(type='str', required=False), - encrypted=dict(type='bool', required=False, default=False), - key=dict(type='str', required=False, no_log=True), + comment=dict(type='str'), + encrypted=dict(type='bool', default=False), + key=dict(type='str', no_log=True), ) ) try: diff --git a/plugins/modules/utm_ca_host_key_cert_info.py b/plugins/modules/utm_ca_host_key_cert_info.py index cab6657ab6..a0fcb97146 100644 --- a/plugins/modules/utm_ca_host_key_cert_info.py +++ b/plugins/modules/utm_ca_host_key_cert_info.py @@ -28,7 +28,7 @@ options: name: type: str description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true extends_documentation_fragment: diff --git a/plugins/modules/utm_dns_host.py b/plugins/modules/utm_dns_host.py index 3e0613a9cd..e1a63e1f73 100644 --- a/plugins/modules/utm_dns_host.py +++ b/plugins/modules/utm_dns_host.py @@ -29,7 +29,7 @@ options: name: type: str description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true address: type: str @@ -53,7 +53,7 @@ options: interface: type: str description: - - The reference name of the interface to use. If not provided the default interface will be used. + - The reference name of the interface to use. If not provided the default interface is used. default: '' resolved: description: @@ -130,7 +130,7 @@ result: description: Whether the ipv6 address is resolved or not. type: bool timeout: - description: The timeout until a new resolving will be attempted. + description: The timeout until a new resolving is attempted. type: int """ @@ -144,14 +144,14 @@ def main(): module = UTMModule( argument_spec=dict( name=dict(type='str', required=True), - address=dict(type='str', required=False, default='0.0.0.0'), - address6=dict(type='str', required=False, default='::'), - comment=dict(type='str', required=False, default=""), - hostname=dict(type='str', required=False), - interface=dict(type='str', required=False, default=""), - resolved=dict(type='bool', required=False, default=False), - resolved6=dict(type='bool', required=False, default=False), - timeout=dict(type='int', required=False, default=0), + address=dict(type='str', default='0.0.0.0'), + address6=dict(type='str', default='::'), + comment=dict(type='str', default=""), + hostname=dict(type='str'), + interface=dict(type='str', default=""), + resolved=dict(type='bool', default=False), + resolved6=dict(type='bool', default=False), + timeout=dict(type='int', default=0), ) ) try: diff --git a/plugins/modules/utm_network_interface_address.py b/plugins/modules/utm_network_interface_address.py index 1e3d2ee5c3..7212897655 100644 --- a/plugins/modules/utm_network_interface_address.py +++ b/plugins/modules/utm_network_interface_address.py @@ -29,7 +29,7 @@ options: name: type: str description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true address: type: str @@ -123,10 +123,10 @@ def main(): argument_spec=dict( name=dict(type='str', required=True), address=dict(type='str', required=True), - comment=dict(type='str', required=False, default=""), - address6=dict(type='str', required=False), - resolved=dict(type='bool', required=False), - resolved6=dict(type='bool', required=False), + comment=dict(type='str', default=""), + address6=dict(type='str'), + resolved=dict(type='bool'), + resolved6=dict(type='bool'), ) ) try: diff --git a/plugins/modules/utm_network_interface_address_info.py b/plugins/modules/utm_network_interface_address_info.py index b9c394c848..a5b3ff7b3b 100644 --- a/plugins/modules/utm_network_interface_address_info.py +++ b/plugins/modules/utm_network_interface_address_info.py @@ -27,7 +27,7 @@ options: name: type: str description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true extends_documentation_fragment: diff --git a/plugins/modules/utm_proxy_auth_profile.py b/plugins/modules/utm_proxy_auth_profile.py index 207c4ba156..96ae3aa869 100644 --- a/plugins/modules/utm_proxy_auth_profile.py +++ b/plugins/modules/utm_proxy_auth_profile.py @@ -30,7 +30,7 @@ options: name: type: str description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true aaa: type: list @@ -316,29 +316,29 @@ def main(): name=dict(type='str', required=True), aaa=dict(type='list', elements='str', required=True), basic_prompt=dict(type='str', required=True), - backend_mode=dict(type='str', required=False, default="None", choices=['Basic', 'None']), - backend_strip_basic_auth=dict(type='bool', required=False, default=True), - backend_user_prefix=dict(type='str', required=False, default=""), - backend_user_suffix=dict(type='str', required=False, default=""), - comment=dict(type='str', required=False, default=""), - frontend_cookie=dict(type='str', required=False), - frontend_cookie_secret=dict(type='str', required=False, no_log=True), - frontend_form=dict(type='str', required=False), - frontend_form_template=dict(type='str', required=False, default=""), - frontend_login=dict(type='str', required=False), - frontend_logout=dict(type='str', required=False), - frontend_mode=dict(type='str', required=False, default="Basic", choices=['Basic', 'Form']), - frontend_realm=dict(type='str', required=False), - frontend_session_allow_persistency=dict(type='bool', required=False, default=False), + backend_mode=dict(type='str', default="None", choices=['Basic', 'None']), + backend_strip_basic_auth=dict(type='bool', default=True), + backend_user_prefix=dict(type='str', default=""), + backend_user_suffix=dict(type='str', default=""), + comment=dict(type='str', default=""), + frontend_cookie=dict(type='str'), + frontend_cookie_secret=dict(type='str', no_log=True), + frontend_form=dict(type='str'), + frontend_form_template=dict(type='str', default=""), + frontend_login=dict(type='str'), + frontend_logout=dict(type='str'), + frontend_mode=dict(type='str', default="Basic", choices=['Basic', 'Form']), + frontend_realm=dict(type='str'), + frontend_session_allow_persistency=dict(type='bool', default=False), frontend_session_lifetime=dict(type='int', required=True), - frontend_session_lifetime_limited=dict(type='bool', required=False, default=True), - frontend_session_lifetime_scope=dict(type='str', required=False, default="hours", choices=['days', 'hours', 'minutes']), + frontend_session_lifetime_limited=dict(type='bool', default=True), + frontend_session_lifetime_scope=dict(type='str', default="hours", choices=['days', 'hours', 'minutes']), frontend_session_timeout=dict(type='int', required=True), - frontend_session_timeout_enabled=dict(type='bool', required=False, default=True), - frontend_session_timeout_scope=dict(type='str', required=False, default="minutes", choices=['days', 'hours', 'minutes']), - logout_delegation_urls=dict(type='list', elements='str', required=False, default=[]), - logout_mode=dict(type='str', required=False, default="None", choices=['None', 'Delegation']), - redirect_to_requested_url=dict(type='bool', required=False, default=False) + frontend_session_timeout_enabled=dict(type='bool', default=True), + frontend_session_timeout_scope=dict(type='str', default="minutes", choices=['days', 'hours', 'minutes']), + logout_delegation_urls=dict(type='list', elements='str', default=[]), + logout_mode=dict(type='str', default="None", choices=['None', 'Delegation']), + redirect_to_requested_url=dict(type='bool', default=False) ) ) try: diff --git a/plugins/modules/utm_proxy_exception.py b/plugins/modules/utm_proxy_exception.py index 96cb592e59..174156394c 100644 --- a/plugins/modules/utm_proxy_exception.py +++ b/plugins/modules/utm_proxy_exception.py @@ -29,7 +29,7 @@ attributes: options: name: description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true type: str op: @@ -220,20 +220,20 @@ def main(): module = UTMModule( argument_spec=dict( name=dict(type='str', required=True), - op=dict(type='str', required=False, default='AND', choices=['AND', 'OR']), - path=dict(type='list', elements='str', required=False, default=[]), - skip_custom_threats_filters=dict(type='list', elements='str', required=False, default=[]), - skip_threats_filter_categories=dict(type='list', elements='str', required=False, default=[]), - skipav=dict(type='bool', required=False, default=False), - skipbadclients=dict(type='bool', required=False, default=False), - skipcookie=dict(type='bool', required=False, default=False), - skipform=dict(type='bool', required=False, default=False), - skipform_missingtoken=dict(type='bool', required=False, default=False), - skiphtmlrewrite=dict(type='bool', required=False, default=False), - skiptft=dict(type='bool', required=False, default=False), - skipurl=dict(type='bool', required=False, default=False), - source=dict(type='list', elements='str', required=False, default=[]), - status=dict(type='bool', required=False, default=True), + op=dict(type='str', default='AND', choices=['AND', 'OR']), + path=dict(type='list', elements='str', default=[]), + skip_custom_threats_filters=dict(type='list', elements='str', default=[]), + skip_threats_filter_categories=dict(type='list', elements='str', default=[]), + skipav=dict(type='bool', default=False), + skipbadclients=dict(type='bool', default=False), + skipcookie=dict(type='bool', default=False), + skipform=dict(type='bool', default=False), + skipform_missingtoken=dict(type='bool', default=False), + skiphtmlrewrite=dict(type='bool', default=False), + skiptft=dict(type='bool', default=False), + skipurl=dict(type='bool', default=False), + source=dict(type='list', elements='str', default=[]), + status=dict(type='bool', default=True), ) ) try: diff --git a/plugins/modules/utm_proxy_frontend.py b/plugins/modules/utm_proxy_frontend.py index 1c3489f493..5330311516 100644 --- a/plugins/modules/utm_proxy_frontend.py +++ b/plugins/modules/utm_proxy_frontend.py @@ -30,7 +30,7 @@ options: name: type: str description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true add_content_type_header: description: @@ -76,12 +76,12 @@ options: default: [] htmlrewrite: description: - - Whether to enable html rewrite or not. + - Whether to enable HTML rewrite or not. type: bool default: false htmlrewrite_cookies: description: - - Whether to enable html rewrite cookie or not. + - Whether to enable HTML rewrite cookie or not. type: bool default: false implicitredirect: @@ -204,10 +204,10 @@ result: description: List of associated proxy exceptions. type: list htmlrewrite: - description: State of html rewrite. + description: State of HTML rewrite. type: bool htmlrewrite_cookies: - description: Whether the html rewrite cookie will be set. + description: Whether the HTML rewrite cookie is set. type: bool implicitredirect: description: Whether to use implicit redirection. @@ -251,26 +251,26 @@ def main(): module = UTMModule( argument_spec=dict( name=dict(type='str', required=True), - add_content_type_header=dict(type='bool', required=False, default=False), - address=dict(type='str', required=False, default="REF_DefaultInternalAddress"), - allowed_networks=dict(type='list', elements='str', required=False, default=["REF_NetworkAny"]), - certificate=dict(type='str', required=False, default=""), - comment=dict(type='str', required=False, default=""), - disable_compression=dict(type='bool', required=False, default=False), - domain=dict(type='list', elements='str', required=False), - exceptions=dict(type='list', elements='str', required=False, default=[]), - htmlrewrite=dict(type='bool', required=False, default=False), - htmlrewrite_cookies=dict(type='bool', required=False, default=False), - implicitredirect=dict(type='bool', required=False, default=False), - lbmethod=dict(type='str', required=False, default="bybusyness", + add_content_type_header=dict(type='bool', default=False), + address=dict(type='str', default="REF_DefaultInternalAddress"), + allowed_networks=dict(type='list', elements='str', default=["REF_NetworkAny"]), + certificate=dict(type='str', default=""), + comment=dict(type='str', default=""), + disable_compression=dict(type='bool', default=False), + domain=dict(type='list', elements='str'), + exceptions=dict(type='list', elements='str', default=[]), + htmlrewrite=dict(type='bool', default=False), + htmlrewrite_cookies=dict(type='bool', default=False), + implicitredirect=dict(type='bool', default=False), + lbmethod=dict(type='str', default="bybusyness", choices=['bybusyness', 'bytraffic', 'byrequests', '']), - locations=dict(type='list', elements='str', required=False, default=[]), - port=dict(type='int', required=False, default=80), - preservehost=dict(type='bool', required=False, default=False), - profile=dict(type='str', required=False, default=""), - status=dict(type='bool', required=False, default=True), - type=dict(type='str', required=False, default="http", choices=['http', 'https']), - xheaders=dict(type='bool', required=False, default=False), + locations=dict(type='list', elements='str', default=[]), + port=dict(type='int', default=80), + preservehost=dict(type='bool', default=False), + profile=dict(type='str', default=""), + status=dict(type='bool', default=True), + type=dict(type='str', default="http", choices=['http', 'https']), + xheaders=dict(type='bool', default=False), ) ) try: diff --git a/plugins/modules/utm_proxy_frontend_info.py b/plugins/modules/utm_proxy_frontend_info.py index 0709cad01e..859ee67de1 100644 --- a/plugins/modules/utm_proxy_frontend_info.py +++ b/plugins/modules/utm_proxy_frontend_info.py @@ -29,7 +29,7 @@ options: name: type: str description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true extends_documentation_fragment: @@ -90,10 +90,10 @@ result: description: List of associated proxy exceptions. type: list htmlrewrite: - description: State of html rewrite. + description: State of HTML rewrite. type: bool htmlrewrite_cookies: - description: Whether the html rewrite cookie will be set. + description: Whether the HTML rewrite cookie is set. type: bool implicitredirect: description: Whether to use implicit redirection. diff --git a/plugins/modules/utm_proxy_location.py b/plugins/modules/utm_proxy_location.py index 944050bfb6..15b89bb1a2 100644 --- a/plugins/modules/utm_proxy_location.py +++ b/plugins/modules/utm_proxy_location.py @@ -30,7 +30,7 @@ options: name: type: str description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true access_control: description: @@ -92,7 +92,7 @@ options: stickysession_id: type: str description: - - The stickysession id. + - The stickysession ID. default: ROUTEID stickysession_status: description: @@ -182,7 +182,7 @@ result: description: Whether to use stickysession or not. type: bool websocket_passthrough: - description: Whether websocket passthrough will be used or not. + description: Whether websocket passthrough is used or not. type: bool """ @@ -198,19 +198,19 @@ def main(): module = UTMModule( argument_spec=dict( name=dict(type='str', required=True), - access_control=dict(type='str', required=False, default="0", choices=['0', '1']), - allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']), - auth_profile=dict(type='str', required=False, default=""), - backend=dict(type='list', elements='str', required=False, default=[]), - be_path=dict(type='str', required=False, default=""), - comment=dict(type='str', required=False, default=""), - denied_networks=dict(type='list', elements='str', required=False, default=[]), - hot_standby=dict(type='bool', required=False, default=False), - path=dict(type='str', required=False, default="/"), - status=dict(type='bool', required=False, default=True), - stickysession_id=dict(type='str', required=False, default='ROUTEID'), - stickysession_status=dict(type='bool', required=False, default=False), - websocket_passthrough=dict(type='bool', required=False, default=False), + access_control=dict(type='str', default="0", choices=['0', '1']), + allowed_networks=dict(type='list', elements='str', default=['REF_NetworkAny']), + auth_profile=dict(type='str', default=""), + backend=dict(type='list', elements='str', default=[]), + be_path=dict(type='str', default=""), + comment=dict(type='str', default=""), + denied_networks=dict(type='list', elements='str', default=[]), + hot_standby=dict(type='bool', default=False), + path=dict(type='str', default="/"), + status=dict(type='bool', default=True), + stickysession_id=dict(type='str', default='ROUTEID'), + stickysession_status=dict(type='bool', default=False), + websocket_passthrough=dict(type='bool', default=False), ) ) try: diff --git a/plugins/modules/utm_proxy_location_info.py b/plugins/modules/utm_proxy_location_info.py index a7ea37ea79..7a8db919c2 100644 --- a/plugins/modules/utm_proxy_location_info.py +++ b/plugins/modules/utm_proxy_location_info.py @@ -29,7 +29,7 @@ options: name: type: str description: - - The name of the object. Will be used to identify the entry. + - The name of the object that identifies the entry. required: true extends_documentation_fragment: @@ -101,7 +101,7 @@ result: description: Whether to use stickysession or not. type: bool websocket_passthrough: - description: Whether websocket passthrough will be used or not. + description: Whether websocket passthrough is used or not. type: bool """ diff --git a/plugins/modules/vdo.py b/plugins/modules/vdo.py index 55706da56a..dbfa44f5b6 100644 --- a/plugins/modules/vdo.py +++ b/plugins/modules/vdo.py @@ -19,8 +19,8 @@ short_description: Module to control VDO description: - This module controls the VDO dedupe and compression device. - - VDO, or Virtual Data Optimizer, is a device-mapper target that provides inline block-level deduplication, compression, and thin provisioning - capabilities to primary storage. + - VDO, or Virtual Data Optimizer, is a device-mapper target that provides inline block-level deduplication, compression, + and thin provisioning capabilities to primary storage. extends_documentation_fragment: - community.general.attributes @@ -38,19 +38,21 @@ options: required: true state: description: - - Whether this VDO volume should be V(present) or V(absent). If a V(present) VDO volume does not exist, it will be created. If a V(present) - VDO volume already exists, it will be modified, by updating the configuration, which will take effect when the VDO volume is restarted. - Not all parameters of an existing VDO volume can be modified; the C(statusparamkeys) list in the code contains the parameters that can - be modified after creation. If an V(absent) VDO volume does not exist, it will not be removed. + - Whether this VDO volume should be V(present) or V(absent). If a V(present) VDO volume does not exist, it is created. + If a V(present) VDO volume already exists, it is modified by updating the configuration, which takes effect when the + VDO volume is restarted. Not all parameters of an existing VDO volume can be modified; the C(statusparamkeys) list + in the code contains the parameters that can be modified after creation. If an V(absent) VDO volume does not exist, + it is not removed. type: str choices: [absent, present] default: present activated: description: - - The C(activate) status for a VDO volume. If this is set to V(false), the VDO volume cannot be started, and it will not start on system - startup. However, on initial creation, a VDO volume with "activated" set to "off" will be running, until stopped. This is the default - behavior of the C(vdo create) command; it provides the user an opportunity to write a base amount of metadata (filesystem, LVM headers, - and so on) to the VDO volume prior to stopping the volume, and leaving it deactivated until ready to use. + - The C(activate) status for a VDO volume. If this is set to V(false), the VDO volume cannot be started, and it does + not start on system startup. However, on initial creation, a VDO volume with O(activated=false) is set to be running + until stopped. This is the default behavior of the C(vdo create) command; it provides the user an opportunity to write + a base amount of metadata (filesystem, LVM headers, and so on) to the VDO volume prior to stopping the volume, and + leaving it deactivated until ready to use. type: bool running: description: @@ -64,131 +66,135 @@ options: type: str logicalsize: description: - - The logical size of the VDO volume (in megabytes, or LVM suffix format). If not specified for a new volume, this defaults to the same - size as the underlying storage device, which is specified in the O(device) parameter. Existing volumes will maintain their size if the - logicalsize parameter is not specified, or is smaller than or identical to the current size. If the specified size is larger than the - current size, a C(growlogical) operation will be performed. + - The logical size of the VDO volume (in megabytes, or LVM suffix format). If not specified for a new volume, this defaults + to the same size as the underlying storage device, which is specified in the O(device) parameter. Existing volumes + maintain their size if the O(logicalsize) parameter is not specified, or is smaller than or identical to the current + size. If the specified size is larger than the current size, a C(growlogical) operation is performed. type: str deduplication: description: - - Configures whether deduplication is enabled. The default for a created volume is V(enabled). Existing volumes will maintain their previously - configured setting unless a different value is specified in the playbook. + - Configures whether deduplication is enabled. The default for a created volume is V(enabled). Existing volumes maintain + their previously configured setting unless a different value is specified in the playbook. type: str choices: [disabled, enabled] compression: description: - - Configures whether compression is enabled. The default for a created volume is V(enabled). Existing volumes will maintain their previously - configured setting unless a different value is specified in the playbook. + - Configures whether compression is enabled. The default for a created volume is V(enabled). Existing volumes maintain + their previously configured setting unless a different value is specified in the playbook. type: str choices: [disabled, enabled] blockmapcachesize: description: - - The amount of memory allocated for caching block map pages, in megabytes (or may be issued with an LVM-style suffix of K, M, G, or T). - The default (and minimum) value is V(128M). The value specifies the size of the cache; there is a 15% memory usage overhead. Each 1.25G - of block map covers 1T of logical blocks, therefore a small amount of block map cache memory can cache a significantly large amount of - block map data. - - Existing volumes will maintain their previously configured setting unless a different value is specified in the playbook. + - The amount of memory allocated for caching block map pages, in megabytes (or may be issued with an LVM-style suffix + of K, M, G, or T). The default (and minimum) value is V(128M). The value specifies the size of the cache; there is + a 15% memory usage overhead. Each 1.25G of block map covers 1T of logical blocks, therefore a small amount of block + map cache memory can cache a significantly large amount of block map data. + - Existing volumes maintain their previously configured setting unless a different value is specified in the playbook. type: str readcache: description: - - Enables or disables the read cache. The default is V(disabled). Choosing V(enabled) enables a read cache which may improve performance - for workloads of high deduplication, read workloads with a high level of compression, or on hard disk storage. Existing volumes will maintain - their previously configured setting unless a different value is specified in the playbook. + - Enables or disables the read cache. The default is V(disabled). Choosing V(enabled) enables a read cache which may + improve performance for workloads of high deduplication, read workloads with a high level of compression, or on hard + disk storage. Existing volumes maintain their previously configured setting unless a different value is specified + in the playbook. - The read cache feature is available in VDO 6.1 and older. type: str choices: [disabled, enabled] readcachesize: description: - - Specifies the extra VDO device read cache size in megabytes. This is in addition to a system-defined minimum. Using a value with a suffix - of K, M, G, or T is optional. The default value is V(0). 1.125 MB of memory per bio thread will be used per 1 MB of read cache specified - (for example, a VDO volume configured with 4 bio threads will have a read cache memory usage overhead of 4.5 MB per 1 MB of read cache - specified). Existing volumes will maintain their previously configured setting unless a different value is specified in the playbook. + - Specifies the extra VDO device read cache size in megabytes. This is in addition to a system-defined minimum. Using + a value with a suffix of K, M, G, or T is optional. The default value is V(0). 1.125 MB of memory per bio thread is + used per 1 MB of read cache specified (for example, a VDO volume configured with 4 bio threads has a read cache memory + usage overhead of 4.5 MB per 1 MB of read cache specified). Existing volumes maintain their previously configured + setting unless a different value is specified in the playbook. - The read cache feature is available in VDO 6.1 and older. type: str emulate512: description: - - Enables 512-byte emulation mode, allowing drivers or filesystems to access the VDO volume at 512-byte granularity, instead of the default - 4096-byte granularity. + - Enables 512-byte emulation mode, allowing drivers or filesystems to access the VDO volume at 512-byte granularity, + instead of the default 4096-byte granularity. - Only recommended when a driver or filesystem requires 512-byte sector level access to a device. - This option is only available when creating a new volume, and cannot be changed for an existing volume. type: bool default: false growphysical: description: - - Specifies whether to attempt to execute a C(growphysical) operation, if there is enough unused space on the device. A C(growphysical) - operation will be executed if there is at least 64 GB of free space, relative to the previous physical size of the affected VDO volume. + - Specifies whether to attempt to execute a C(growphysical) operation, if there is enough unused space on the device. + A C(growphysical) operation is executed if there is at least 64 GB of free space, relative to the previous physical + size of the affected VDO volume. type: bool default: false slabsize: description: - - The size of the increment by which the physical size of a VDO volume is grown, in megabytes (or may be issued with an LVM-style suffix - of K, M, G, or T). Must be a power of two between 128M and 32G. The default is V(2G), which supports volumes having a physical size up - to 16T. The maximum, V(32G), supports a physical size of up to 256T. This option is only available when creating a new volume, and cannot - be changed for an existing volume. + - The size of the increment by which the physical size of a VDO volume is grown, in megabytes (or may be issued with + an LVM-style suffix of K, M, G, or T). Must be a power of two between 128M and 32G. The default is V(2G), which supports + volumes having a physical size up to 16T. The maximum, V(32G), supports a physical size of up to 256T. This option + is only available when creating a new volume, and cannot be changed for an existing volume. type: str writepolicy: description: - Specifies the write policy of the VDO volume. - The V(sync) mode acknowledges writes only after data is on stable storage. - The V(async) mode acknowledges writes when data has been cached for writing to stable storage. - - The default (and highly recommended) V(auto) mode checks the storage device to determine whether it supports flushes. Devices that support - flushes will result in a VDO volume in V(async) mode, while devices that do not support flushes will run in V(sync) mode. - - Existing volumes will maintain their previously configured setting unless a different value is specified in the playbook. + - The default (and highly recommended) V(auto) mode checks the storage device to determine whether it supports flushes. + Devices that support flushes result in a VDO volume in V(async) mode, while devices that do not support flushes run + in V(sync) mode. + - Existing volumes maintain their previously configured setting unless a different value is specified in the playbook. type: str choices: [async, auto, sync] indexmem: description: - - Specifies the amount of index memory in gigabytes. The default is V(0.25). The special decimal values V(0.25), V(0.5), and V(0.75) can - be used, as can any positive integer. This option is only available when creating a new volume, and cannot be changed for an existing - volume. + - Specifies the amount of index memory in gigabytes. The default is V(0.25). The special decimal values V(0.25), V(0.5), + and V(0.75) can be used, as can any positive integer. This option is only available when creating a new volume, and + cannot be changed for an existing volume. type: str indexmode: description: - Specifies the index mode of the Albireo index. - - The default is V(dense), which has a deduplication window of 1 GB of index memory per 1 TB of incoming data, requiring 10 GB of index - data on persistent storage. - - The V(sparse) mode has a deduplication window of 1 GB of index memory per 10 TB of incoming data, but requires 100 GB of index data on - persistent storage. + - The default is V(dense), which has a deduplication window of 1 GB of index memory per 1 TB of incoming data, requiring + 10 GB of index data on persistent storage. + - The V(sparse) mode has a deduplication window of 1 GB of index memory per 10 TB of incoming data, but requires 100 + GB of index data on persistent storage. - This option is only available when creating a new volume, and cannot be changed for an existing volume. type: str choices: [dense, sparse] ackthreads: description: - - Specifies the number of threads to use for acknowledging completion of requested VDO I/O operations. Valid values are integer values from - V(1) to V(100) (lower numbers are preferable due to overhead). The default is V(1). Existing volumes will maintain their previously configured - setting unless a different value is specified in the playbook. + - Specifies the number of threads to use for acknowledging completion of requested VDO I/O operations. Valid values + are integer values from V(1) to V(100) (lower numbers are preferable due to overhead). The default is V(1). Existing + volumes maintain their previously configured setting unless a different value is specified in the playbook. type: str biothreads: description: - - Specifies the number of threads to use for submitting I/O operations to the storage device. Valid values are integer values from V(1) - to V(100) (lower numbers are preferable due to overhead). The default is V(4). Existing volumes will maintain their previously configured - setting unless a different value is specified in the playbook. + - Specifies the number of threads to use for submitting I/O operations to the storage device. Valid values are integer + values from V(1) to V(100) (lower numbers are preferable due to overhead). The default is V(4). Existing volumes maintain + their previously configured setting unless a different value is specified in the playbook. type: str cputhreads: description: - - Specifies the number of threads to use for CPU-intensive work such as hashing or compression. Valid values are integer values from V(1) - to V(100) (lower numbers are preferable due to overhead). The default is V(2). Existing volumes will maintain their previously configured - setting unless a different value is specified in the playbook. + - Specifies the number of threads to use for CPU-intensive work such as hashing or compression. Valid values are integer + values from V(1) to V(100) (lower numbers are preferable due to overhead). The default is V(2). Existing volumes maintain + their previously configured setting unless a different value is specified in the playbook. type: str logicalthreads: description: - - Specifies the number of threads across which to subdivide parts of the VDO processing based on logical block addresses. Valid values are - integer values from V(1) to V(100) (lower numbers are preferable due to overhead). The default is V(1). Existing volumes will maintain - their previously configured setting unless a different value is specified in the playbook. + - Specifies the number of threads across which to subdivide parts of the VDO processing based on logical block addresses. + Valid values are integer values from V(1) to V(100) (lower numbers are preferable due to overhead). The default is + V(1). Existing volumes maintain their previously configured setting unless a different value is specified in the playbook. type: str physicalthreads: description: - - Specifies the number of threads across which to subdivide parts of the VDO processing based on physical block addresses. Valid values - are integer values from V(1) to V(16) (lower numbers are preferable due to overhead). The physical space used by the VDO volume must be - larger than (O(slabsize) * O(physicalthreads)). The default is V(1). Existing volumes will maintain their previously configured setting - unless a different value is specified in the playbook. + - Specifies the number of threads across which to subdivide parts of the VDO processing based on physical block addresses. + Valid values are integer values from V(1) to V(16) (lower numbers are preferable due to overhead). The physical space + used by the VDO volume must be larger than (O(slabsize) * O(physicalthreads)). The default is V(1). Existing volumes + maintain their previously configured setting unless a different value is specified in the playbook. type: str force: description: - - When creating a volume, ignores any existing file system or VDO signature already present in the storage device. When stopping or removing - a VDO volume, first unmounts the file system stored on the device if mounted. - - B(Warning:) Since this parameter removes all safety checks it is important to make sure that all parameters provided are accurate and - intentional. + - When creating a volume, ignores any existing file system or VDO signature already present in the storage device. When + stopping or removing a VDO volume, first unmounts the file system stored on the device if mounted. + - B(Warning:) Since this parameter removes all safety checks it is important to make sure that all parameters provided + are accurate and intentional. type: bool default: false version_added: 2.4.0 @@ -214,7 +220,7 @@ EXAMPLES = r""" state: absent """ -RETURN = r"""# """ +RETURN = r"""#""" from ansible.module_utils.basic import AnsibleModule, missing_required_lib import re diff --git a/plugins/modules/vertica_configuration.py b/plugins/modules/vertica_configuration.py index 9ce2e42d15..d97fbf5ed4 100644 --- a/plugins/modules/vertica_configuration.py +++ b/plugins/modules/vertica_configuration.py @@ -56,11 +56,11 @@ options: type: str notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly - configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either - C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to - be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed + on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) + section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and + C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" """ @@ -139,12 +139,12 @@ def main(): module = AnsibleModule( argument_spec=dict( parameter=dict(required=True, aliases=['name']), - value=dict(default=None), - db=dict(default=None), + value=dict(), + db=dict(), cluster=dict(default='localhost'), port=dict(default='5433'), login_user=dict(default='dbadmin'), - login_password=dict(default=None, no_log=True), + login_password=dict(no_log=True), ), supports_check_mode=True) if not pyodbc_found: diff --git a/plugins/modules/vertica_info.py b/plugins/modules/vertica_info.py index bfb99552a0..340e782f31 100644 --- a/plugins/modules/vertica_info.py +++ b/plugins/modules/vertica_info.py @@ -42,11 +42,11 @@ options: type: str notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) are installed on the host and properly - configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either - C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to - be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) are installed + on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) + section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and + C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" """ @@ -227,9 +227,9 @@ def main(): argument_spec=dict( cluster=dict(default='localhost'), port=dict(default='5433'), - db=dict(default=None), + db=dict(), login_user=dict(default='dbadmin'), - login_password=dict(default=None, no_log=True), + login_password=dict(no_log=True), ), supports_check_mode=True) if not pyodbc_found: diff --git a/plugins/modules/vertica_role.py b/plugins/modules/vertica_role.py index c3e15b4b95..550c612b8d 100644 --- a/plugins/modules/vertica_role.py +++ b/plugins/modules/vertica_role.py @@ -64,11 +64,11 @@ options: type: str notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly - configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either - C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to - be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed + on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) + section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and + C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" """ @@ -180,7 +180,7 @@ def main(): module = AnsibleModule( argument_spec=dict( role=dict(required=True, aliases=['name']), - assigned_roles=dict(default=None, aliases=['assigned_role']), + assigned_roles=dict(aliases=['assigned_role']), state=dict(default='present', choices=['absent', 'present']), db=dict(), cluster=dict(default='localhost'), diff --git a/plugins/modules/vertica_schema.py b/plugins/modules/vertica_schema.py index b9e243ec7b..11a7f9c77c 100644 --- a/plugins/modules/vertica_schema.py +++ b/plugins/modules/vertica_schema.py @@ -14,8 +14,9 @@ module: vertica_schema short_description: Adds or removes Vertica database schema and roles description: - Adds or removes Vertica database schema and, optionally, roles with schema access privileges. - - A schema will not be removed until all the objects have been dropped. - - In such a situation, if the module tries to remove the schema it will fail and only remove roles created for the schema if they have no dependencies. + - A schema is not removed until all the objects have been dropped. + - In such a situation, if the module tries to remove the schema it fails and only remove roles created for the schema if + they have no dependencies. extends_documentation_fragment: - community.general.attributes attributes: @@ -75,11 +76,11 @@ options: type: str notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly - configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either - C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to - be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed + on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) + section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and + C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" """ @@ -92,7 +93,8 @@ EXAMPLES = r""" community.general.vertica_schema: name=schema_name owner=dbowner db=db_name state=present - name: Creating a new schema with roles - community.general.vertica_schema: name=schema_name create_roles=schema_name_all usage_roles=schema_name_ro,schema_name_rw db=db_name state=present + community.general.vertica_schema: name=schema_name create_roles=schema_name_all usage_roles=schema_name_ro,schema_name_rw + db=db_name state=present """ import traceback diff --git a/plugins/modules/vertica_user.py b/plugins/modules/vertica_user.py index 74ac784c1b..e3a4292aec 100644 --- a/plugins/modules/vertica_user.py +++ b/plugins/modules/vertica_user.py @@ -13,8 +13,8 @@ module: vertica_user short_description: Adds or removes Vertica database users and assigns roles description: - Adds or removes Vertica database user and, optionally, assigns roles. - - A user will not be removed until all the dependencies have been dropped. - - In such a situation, if the module tries to remove the user it will fail and only remove roles granted to the user. + - A user is not removed until all the dependencies have been dropped. + - In such a situation, if the module tries to remove the user it fails and only remove roles granted to the user. extends_documentation_fragment: - community.general.attributes attributes: @@ -40,8 +40,8 @@ options: password: description: - The user's password encrypted by the MD5 algorithm. - - The password must be generated with the format C("md5" + md5[password + username]), resulting in a total of 35 characters. An easy way - to do this is by querying the Vertica database with select V('md5'||md5(''\)). + - The password must be generated with the format C("md5" + md5[password + username]), resulting in a total of 35 characters. + An easy way to do this is by querying the Vertica database with select V('md5'|| md5(''\)). type: str expired: description: @@ -50,7 +50,7 @@ options: ldap: description: - Set to V(true) if users are authenticated using LDAP. - - The user will be created with password expired and set to V($ldap$). + - The user is created with password expired and set to V($ldap$). type: bool roles: description: @@ -88,11 +88,11 @@ options: type: str notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. - - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly - configured. - - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either - C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to - be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed + on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) + section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and + C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: ['unixODBC', 'pyodbc'] author: "Dariusz Owczarek (@dareko)" """ diff --git a/plugins/modules/vexata_eg.py b/plugins/modules/vexata_eg.py index f7184d68b0..27bd9f129e 100644 --- a/plugins/modules/vexata_eg.py +++ b/plugins/modules/vexata_eg.py @@ -14,8 +14,8 @@ module: vexata_eg short_description: Manage export groups on Vexata VX100 storage arrays description: - Create or delete export groups on a Vexata VX100 array. - - An export group is a tuple of a volume group, initiator group and port group that allows a set of volumes to be exposed to one or more hosts - through specific array ports. + - An export group is a tuple of a volume group, initiator group and port group that allows a set of volumes to be exposed + to one or more hosts through specific array ports. author: - Sandeep Kasargod (@vexata) attributes: diff --git a/plugins/modules/vmadm.py b/plugins/modules/vmadm.py index 4b21ead77e..fc7504fba5 100644 --- a/plugins/modules/vmadm.py +++ b/plugins/modules/vmadm.py @@ -26,7 +26,7 @@ options: archive_on_delete: required: false description: - - When enabled, the zone dataset will be mounted on C(/zones/archive) upon removal. + - When enabled, the zone dataset is mounted on C(/zones/archive) upon removal. type: bool autoboot: required: false @@ -52,7 +52,8 @@ options: cpu_shares: required: false description: - - Sets a limit on the number of fair share scheduler (FSS) CPU shares for a VM. This limit is relative to all other VMs on the system. + - Sets a limit on the number of fair share scheduler (FSS) CPU shares for a VM. This limit is relative to all other + VMs on the system. type: int cpu_type: required: false @@ -103,6 +104,13 @@ options: description: - Enables the firewall, allowing fwadm(1M) rules to be applied. type: bool + flexible_disk_size: + required: false + description: + - This sets an upper bound for the amount of space that a bhyve instance may use for its disks and snapshots of those + disks (in MiBs). + type: int + version_added: 10.5.0 force: required: false description: @@ -141,7 +149,8 @@ options: internal_metadata_namespace: required: false description: - - List of namespaces to be set as C(internal_metadata-only); these namespaces will come from O(internal_metadata) rather than O(customer_metadata). + - List of namespaces to be set as C(internal_metadata-only); these namespaces come from O(internal_metadata) rather + than O(customer_metadata). type: str kernel_version: required: false @@ -156,7 +165,7 @@ options: maintain_resolvers: required: false description: - - Resolvers in C(/etc/resolv.conf) will be updated when updating the O(resolvers) property. + - Resolvers in C(/etc/resolv.conf) are updated when updating the O(resolvers) property. type: bool max_locked_memory: required: false @@ -205,10 +214,17 @@ options: description: - Consider the provisioning complete when the VM first starts, rather than when the VM has rebooted. type: bool + owner_uuid: + required: false + description: + - Define the UUID of the owner of the VM. + type: str + version_added: 10.5.0 qemu_opts: required: false description: - - Additional qemu arguments for KVM guests. This overwrites the default arguments provided by vmadm(1M) and should only be used for debugging. + - Additional qemu arguments for KVM guests. This overwrites the default arguments provided by vmadm(1M) and should only + be used for debugging. type: str qemu_extra_opts: required: false @@ -234,7 +250,7 @@ options: routes: required: false description: - - Dictionary that maps destinations to gateways, these will be set as static routes in the VM. + - Dictionary that maps destinations to gateways, these are set as static routes in the VM. type: dict spice_opts: required: false @@ -244,20 +260,22 @@ options: spice_password: required: false description: - - Password required to connect to SPICE. By default no password is set. Please note this can be read from the Global Zone. + - Password required to connect to SPICE. By default no password is set. Please note this can be read from the Global + Zone. type: str state: choices: [present, running, absent, deleted, stopped, created, restarted, rebooted] default: running description: - - States for the VM to be in. Please note that V(present), V(stopped) and V(restarted) operate on a VM that is currently provisioned. V(present) - means that the VM will be created if it was absent, and that it will be in a running state. V(absent) will shutdown the zone before removing - it. V(stopped) means the zone will be created if it does not exist already, before shutting it down. + - States for the VM to be in. Please note that V(present), V(stopped) and V(restarted) operate on a VM that is currently + provisioned. V(present) means that the VM is created if it was absent, and that it is in a running state. V(absent) + shutdowns the zone before removing it. V(stopped) means the zone is created if it does not exist already, before shutting + it down. type: str tmpfs: required: false description: - - Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem. + - Amount of memory (in MiBs) that is available in the VM for the C(/tmp) filesystem. type: int uuid: required: false @@ -332,7 +350,7 @@ options: zpool: required: false description: - - ZFS pool the VM's zone dataset will be created in. + - ZFS pool the VM's zone dataset is created in. type: str """ @@ -628,9 +646,9 @@ def main(): 'str': [ 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname', 'image_uuid', 'internal_metadata_namespace', 'kernel_version', - 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts', - 'spice_opts', 'uuid', 'vga', 'zfs_data_compression', - 'zfs_root_compression', 'zpool' + 'limit_priv', 'nic_driver', 'owner_uuid', 'qemu_opts', + 'qemu_extra_opts', 'spice_opts', 'uuid', 'vga', + 'zfs_data_compression', 'zfs_root_compression', 'zpool' ], 'bool': [ 'archive_on_delete', 'autoboot', 'delegate_dataset', @@ -638,12 +656,12 @@ def main(): 'indestructible_zoneroot', 'maintain_resolvers', 'nowait' ], 'int': [ - 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps', - 'max_physical_memory', 'max_swap', 'mdata_exec_timeout', - 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst', - 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize', - 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize', - 'zfs_snapshot_limit' + 'cpu_cap', 'cpu_shares', 'flexible_disk_size', + 'max_locked_memory', 'max_lwps', 'max_physical_memory', + 'max_swap', 'mdata_exec_timeout', 'quota', 'ram', + 'tmpfs', 'vcpus', 'virtio_txburst', 'virtio_txtimer', + 'vnc_port', 'zfs_data_recsize', 'zfs_filesystem_limit', + 'zfs_io_priority', 'zfs_root_recsize', 'zfs_snapshot_limit' ], 'dict': ['customer_metadata', 'internal_metadata', 'routes'], } diff --git a/plugins/modules/wdc_redfish_command.py b/plugins/modules/wdc_redfish_command.py index 680bd4b3f9..20afa8add1 100644 --- a/plugins/modules/wdc_redfish_command.py +++ b/plugins/modules/wdc_redfish_command.py @@ -17,6 +17,7 @@ description: - Manages OOB controller firmware. For example, Firmware Activate, Update and Activate. extends_documentation_fragment: - community.general.attributes + - community.general.redfish attributes: check_mode: support: full @@ -87,6 +88,12 @@ options: description: - The password for retrieving the update image. type: str + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 notes: - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. - Ioms is a list of FQDNs for the enclosure's IOMs. @@ -195,6 +202,7 @@ msg: """ from ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils import WdcRedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native @@ -213,26 +221,28 @@ CATEGORY_COMMANDS_ALL = { def main(): - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - ioms=dict(type='list', elements='str'), - baseuri=dict(), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - update_creds=dict( - type='dict', - options=dict( - username=dict(), - password=dict(no_log=True) - ) - ), - resource_id=dict(), - update_image_uri=dict(), - timeout=dict(type='int', default=10) + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + ioms=dict(type='list', elements='str'), + baseuri=dict(), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + update_creds=dict( + type='dict', + options=dict( + username=dict(), + password=dict(no_log=True) + ) ), + resource_id=dict(), + update_image_uri=dict(), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/wdc_redfish_info.py b/plugins/modules/wdc_redfish_info.py index caaa9c7fd9..19a513c00d 100644 --- a/plugins/modules/wdc_redfish_info.py +++ b/plugins/modules/wdc_redfish_info.py @@ -17,6 +17,7 @@ description: extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module + - community.general.redfish options: category: required: true @@ -55,6 +56,12 @@ options: - Timeout in seconds for URL requests to OOB controller. default: 10 type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 notes: - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. @@ -118,6 +125,7 @@ StatusCode: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils import WdcRedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC CATEGORY_COMMANDS_ALL = { "Update": ["SimpleUpdateStatus"] @@ -126,17 +134,19 @@ CATEGORY_COMMANDS_ALL = { def main(): result = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + ioms=dict(type='list', elements='str'), + baseuri=dict(), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - ioms=dict(type='list', elements='str'), - baseuri=dict(), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/xattr.py b/plugins/modules/xattr.py index 11b036ff66..cbd9dcc05c 100644 --- a/plugins/modules/xattr.py +++ b/plugins/modules/xattr.py @@ -13,7 +13,8 @@ module: xattr short_description: Manage user defined extended attributes description: - Manages filesystem user defined extended attributes. - - Requires that extended attributes are enabled on the target filesystem and that the C(setfattr)/C(getfattr) utilities are present. + - Requires that extended attributes are enabled on the target filesystem and that the C(setfattr)/C(getfattr) utilities + are present. extends_documentation_fragment: - community.general.attributes attributes: @@ -208,7 +209,7 @@ def main(): not (namespace == 'user' and key.startswith('user.'))): key = '%s.%s' % (namespace, key) - if (state == 'present' or value is not None): + if state == 'present' or value is not None: current = get_xattr(module, path, key, follow) if current is None or key not in current or value != current[key]: if not module.check_mode: diff --git a/plugins/modules/xbps.py b/plugins/modules/xbps.py index 9f6cb59d98..dc9d131bd7 100644 --- a/plugins/modules/xbps.py +++ b/plugins/modules/xbps.py @@ -40,13 +40,14 @@ options: type: str recurse: description: - - When removing a package, also remove its dependencies, provided that they are not required by other packages and were not explicitly installed - by a user. + - When removing a package, also remove its dependencies, provided that they are not required by other packages and were + not explicitly installed by a user. type: bool default: false update_cache: description: - - Whether or not to refresh the master package lists. This can be run as part of a package installation or as a separate step. + - Whether or not to refresh the master package lists. This can be run as part of a package installation or as a separate + step. type: bool default: true upgrade: @@ -56,8 +57,9 @@ options: default: false upgrade_xbps: description: - - Whether or not to upgrade the xbps package when necessary. Before installing new packages, xbps requires the user to update the xbps package - itself. Thus when this option is set to V(false), upgrades and installations will fail when xbps is not up to date. + - Whether or not to upgrade the C(xbps) package when necessary. Before installing new packages, C(xbps) requires the + user to update itself. Thus when this option is set to V(false), upgrades and installations fail when C(xbps) is not + up to date. type: bool default: true version_added: '0.2.0' @@ -68,8 +70,8 @@ options: version_added: '10.2.0' repositories: description: - - Repository URL(s) to prepend to the repository list for the package installation. The URL can be a URL to a repository for remote repositories - or a path for local repositories. + - Repository URL(s) to prepend to the repository list for the package installation. The URL can be remote repositories + or paths for local repositories. type: list elements: str version_added: '10.2.0' @@ -361,7 +363,7 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict(default=None, aliases=['pkg', 'package'], type='list', elements='str'), + name=dict(aliases=['pkg', 'package'], type='list', elements='str'), state=dict(default='present', choices=['present', 'installed', 'latest', 'absent', 'removed']), diff --git a/plugins/modules/xcc_redfish_command.py b/plugins/modules/xcc_redfish_command.py index d6d1857c4c..9dbbe8016f 100644 --- a/plugins/modules/xcc_redfish_command.py +++ b/plugins/modules/xcc_redfish_command.py @@ -13,14 +13,15 @@ module: xcc_redfish_command short_description: Manages Lenovo Out-Of-Band controllers using Redfish APIs version_added: 2.4.0 description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action or get information back or update a configuration - attribute. + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action or get information back or update + a configuration attribute. - Manages virtual media. - Supports getting information back using GET method. - Supports updating a configuration attribute using PATCH method. - Supports performing an action using POST method. extends_documentation_fragment: - community.general.attributes + - community.general.redfish attributes: check_mode: support: none @@ -110,13 +111,19 @@ options: resource_uri: required: false description: - - The resource uri to get or patch or post. + - The resource URI to get or patch or post. type: str request_body: required: false description: - The request body to patch or post. type: dict + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: "Yuyan Pan (@panyy3)" """ @@ -266,38 +273,39 @@ redfish_facts: description: Resource content. returned: when command == GetResource or command == GetCollectionResource type: dict - sample: '{ - "redfish_facts": { - "data": { - "@odata.etag": "\"3179bf00d69f25a8b3c\"", - "@odata.id": "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS", - "@odata.type": "#LenovoDNS.v1_0_0.LenovoDNS", - "DDNS": [ - { - "DDNSEnable": true, - "DomainName": "", - "DomainNameSource": "DHCP" - } - ], - "DNSEnable": true, - "Description": "This resource is used to represent a DNS resource for a Redfish implementation.", - "IPv4Address1": "10.103.62.178", - "IPv4Address2": "0.0.0.0", - "IPv4Address3": "0.0.0.0", - "IPv6Address1": "::", - "IPv6Address2": "::", - "IPv6Address3": "::", - "Id": "LenovoDNS", - "PreferredAddresstype": "IPv4" - }, - "ret": true + sample: + { + "redfish_facts": { + "data": { + "@odata.etag": "\"3179bf00d69f25a8b3c\"", + "@odata.id": "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS", + "@odata.type": "#LenovoDNS.v1_0_0.LenovoDNS", + "DDNS": [ + { + "DDNSEnable": true, + "DomainName": "", + "DomainNameSource": "DHCP" + } + ], + "DNSEnable": true, + "Description": "This resource is used to represent a DNS resource for a Redfish implementation.", + "IPv4Address1": "10.103.62.178", + "IPv4Address2": "0.0.0.0", + "IPv4Address3": "0.0.0.0", + "IPv6Address1": "::", + "IPv6Address2": "::", + "IPv6Address3": "::", + "Id": "LenovoDNS", + "PreferredAddresstype": "IPv4" + }, + "ret": true + } } - }' """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC class XCCRedfishUtils(RedfishUtils): @@ -678,34 +686,36 @@ CATEGORY_COMMANDS_ALL = { def main(): result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10), - resource_id=dict(), - virtual_media=dict( - type='dict', - options=dict( - media_types=dict(type='list', elements='str', default=[]), - image_url=dict(), - inserted=dict(type='bool', default=True), - write_protected=dict(type='bool', default=True), - username=dict(), - password=dict(no_log=True), - transfer_protocol_type=dict(), - transfer_method=dict(), - ) - ), - resource_uri=dict(), - request_body=dict( - type='dict', - ), + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10), + resource_id=dict(), + virtual_media=dict( + type='dict', + options=dict( + media_types=dict(type='list', elements='str', default=[]), + image_url=dict(), + inserted=dict(type='bool', default=True), + write_protected=dict(type='bool', default=True), + username=dict(), + password=dict(no_log=True), + transfer_protocol_type=dict(), + transfer_method=dict(), + ) ), + resource_uri=dict(), + request_body=dict( + type='dict', + ), + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/xdg_mime.py b/plugins/modules/xdg_mime.py new file mode 100644 index 0000000000..cf297187a4 --- /dev/null +++ b/plugins/modules/xdg_mime.py @@ -0,0 +1,142 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Marcos Alano +# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# In memory: This code is dedicated to my late grandmother, Maria Marlene. 1936-2025. Rest in peace, grandma. +# -Marcos Alano- + +# TODO: Add support for diff mode + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r""" +module: xdg_mime +author: + - "Marcos Alano (@mhalano)" +short_description: Set default handler for MIME types, for applications using XDG tools +version_added: 10.7.0 +description: + - This module allows configuring the default handler for specific MIME types when you use applications that rely on XDG. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + mime_types: + description: + - One or more MIME types for which a default handler is set. + type: list + elements: str + required: true + handler: + description: + - Sets the default handler for the specified MIME types. + - The desktop file must be installed in the system. If the desktop file is not installed, the module does not fail, + but the handler is not set either. + - You must pass a handler in the form V(*.desktop), otherwise the module fails. + type: str + required: true +notes: + - This module is a thin wrapper around C(xdg-mime) tool. + - See man xdg-mime(1) for more details. +seealso: + - name: C(xdg-mime) command manual page + description: Manual page for the command. + link: https://portland.freedesktop.org/doc/xdg-mime.html + - name: xdg-utils Documentation + description: Reference documentation for xdg-utils. + link: https://www.freedesktop.org/wiki/Software/xdg-utils/ +""" + +EXAMPLES = r""" +- name: Set Chrome as the default handler for HTTPS + community.general.xdg_mime: + mime_types: x-scheme-handler/https + handler: google-chrome.desktop + register: result + +- name: Set Chrome as the default handler for both HTTP and HTTPS + community.general.xdg_mime: + mime_types: + - x-scheme-handler/http + - x-scheme-handler/https + handler: google-chrome.desktop + register: result +""" + +RETURN = r""" +current_handlers: + description: + - Currently set handlers for the passed MIME types. + returned: success + type: list + elements: str + sample: + - google-chrome.desktop + - firefox.desktop +version: + description: Version of the C(xdg-mime) tool. + type: str + returned: always + sample: "1.2.1" +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.xdg_mime import xdg_mime_runner, xdg_mime_get + + +class XdgMime(ModuleHelper): + output_params = ['handler'] + + module = dict( + argument_spec=dict( + mime_types=dict(type='list', elements='str', required=True), + handler=dict(type='str', required=True), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = xdg_mime_runner(self.module, check_rc=True) + + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.replace("xdg-mime ", "").strip() + + if not self.vars.handler.endswith(".desktop"): + self.do_raise(msg="Handler must be a .desktop file") + + self.vars.current_handlers = [] + for mime in self.vars.mime_types: + handler_value = xdg_mime_get(self.runner, mime) + if not handler_value: + handler_value = '' + self.vars.current_handlers.append(handler_value) + + def __run__(self): + check_mode_return = (0, 'Module executed in check mode', '') + + if any(h != self.vars.handler for h in self.vars.current_handlers): + self.changed = True + + if self.has_changed(): + with self.runner.context(args_order="default handler mime_types", check_mode_skip=True, check_mode_return=check_mode_return) as ctx: + rc, out, err = ctx.run() + self.vars.stdout = out + self.vars.stderr = err + self.vars.set("run_info", ctx.run_info, verbosity=1) + + +def main(): + XdgMime.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/xenserver_guest.py b/plugins/modules/xenserver_guest.py index 2f6a19be4d..44f9192b7e 100644 --- a/plugins/modules/xenserver_guest.py +++ b/plugins/modules/xenserver_guest.py @@ -12,34 +12,35 @@ DOCUMENTATION = r""" module: xenserver_guest short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool description: >- - This module can be used to create new virtual machines from templates or other virtual machines, modify various virtual machine components like - network and disk, rename a virtual machine and remove a virtual machine with associated components. + This module can be used to create new virtual machines from templates or other virtual machines, modify various virtual + machine components like network and disk, rename a virtual machine and remove a virtual machine with associated components. author: - Bojan Vitnik (@bvitnik) notes: - Minimal supported version of XenServer is 5.6. - Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. - - 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside Citrix - Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the C(XenAPI.py) file from the SDK to your Python site-packages on your Ansible - Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py).' - - 'If no scheme is specified in O(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you - are accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' - - 'To use C(https://) scheme for O(hostname) you have to either import host certificate to your OS certificate store or use O(validate_certs=false) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' - - 'Network configuration inside a guest OS, by using parameters O(networks[].type), O(networks[].ip), O(networks[].gateway) and so on, is supported - on XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try - to detect if such support is available and utilize it, else it will use a custom method of configuration using xenstore. Since XenServer Guest - agent only support None and Static types of network configuration, where None means DHCP configured interface, O(networks[].type) and O(networks[].type6) - values V(none) and V(dhcp) have same effect. More info here: - U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html).' - - 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore - C(vm-data/networks/) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or through WMI - interface on Windows guests. - They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user to implement a boot time - scripts or custom agent that will read the parameters from xenstore and configure network with given parameters. Take note that for xenstore - data to become available inside a guest, a VM restart is needed hence module will require VM restart if any parameter is changed. This is - a limitation of XenAPI and xenstore. Considering these limitations, network configuration through xenstore is most useful for bootstrapping + - 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be + found inside Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the C(XenAPI.py) file from the SDK + to your Python site-packages on your Ansible Control Node to use it. Latest version of the library can also be acquired + from GitHub: U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py).' + - If no scheme is specified in O(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. + Make sure you are accessing XenServer host in trusted environment or use C(https://) scheme explicitly. + - To use C(https://) scheme for O(hostname) you have to either import host certificate to your OS certificate store or use + O(validate_certs=false) which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer. + - 'Network configuration inside a guest OS, by using parameters O(networks[].type), O(networks[].ip), O(networks[].gateway) + and so on, is supported on XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for + network configuration. The module tries to detect if such support is available and utilize it, else it uses a custom method + of configuration using xenstore. Since XenServer Guest agent only support None and Static types of network configuration, + where None means DHCP configured interface, O(networks[].type) and O(networks[].type6) values V(none) and V(dhcp) have + same effect. More info here: + U(https://web.archive.org/web/20180218110151/https://xenserver.org/blog/entry/set-windows-guest-vm-static-ip-address-in-xenserver.html).' + - 'On platforms without official support for network configuration inside a guest OS, network parameters are written to + xenstore C(vm-data/networks/) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) + tools on \*nix guests or through WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) + key as returned by the module. It is up to the user to implement a boot time scripts or custom agent that reads the parameters + from xenstore and configure network with given parameters. Take note that for xenstore data to become available inside + a guest, a VM restart is needed hence module requires VM restart if any parameter is changed. This is a limitation of + XenAPI and xenstore. Considering these limitations, network configuration through xenstore is most useful for bootstrapping newly deployed VMs, much less for reconfiguring existing ones. More info here: U(https://support.citrix.com/article/CTX226713).' requirements: - XenAPI @@ -55,14 +56,16 @@ options: - If O(state) is set to V(present) and VM exists, ensure the VM configuration conforms to given parameters. - If O(state) is set to V(present) and VM does not exist, then VM is deployed with given parameters. - If O(state) is set to V(absent) and VM exists, then VM is removed with its associated components. - - If O(state) is set to V(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically. + - If O(state) is set to V(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on + automatically. type: str default: present choices: [present, absent, poweredon] name: description: - Name of the VM to work with. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - VMs running on XenServer do not necessarily have unique names. The module fails if multiple VMs with same name are + found. - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage. - This parameter is case sensitive. type: str @@ -75,15 +78,15 @@ options: description: - UUID of the VM to manage if known. This is XenServer's unique identifier. - It is required if name is not unique. - - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally. + - Please note that a supplied UUID is ignored on VM creation, as XenServer creates the UUID internally. type: str template: description: - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM. - - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are - found. + - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module fails if multiple templates + with same name are found. - In case of multiple templates/VMs/snapshots with same name, use O(template_uuid) to uniquely specify source template. - - If VM already exists, this setting will be ignored. + - If VM already exists, this setting is ignored. - This parameter is case sensitive. type: str aliases: [template_src] @@ -101,8 +104,7 @@ options: description: - Destination folder for VM. - This parameter is case sensitive. - - 'Example:' - - ' folder: /folder1/folder2' + - 'Example: O(folder=/folder1/folder2).' type: str hardware: description: @@ -126,7 +128,8 @@ options: - A list of disks to add to VM. - All parameters are case sensitive. - Removing or detaching existing disks of VM is not supported. - - New disks are required to have either a O(disks[].size) or one of O(ignore:disks[].size_[tb,gb,mb,kb,b]) parameters specified. + - New disks are required to have either a O(disks[].size) or one of O(ignore:disks[].size_[tb,gb,mb,kb,b]) parameters + specified. - VM needs to be shut down to reconfigure disk size. type: list elements: dict @@ -134,7 +137,8 @@ options: suboptions: size: description: - - 'Disk size with unit. Unit must be: V(b), V(kb), V(mb), V(gb), V(tb). VM needs to be shut down to reconfigure this parameter.' + - 'Disk size with unit. Unit must be: V(b), V(kb), V(mb), V(gb), V(tb). VM needs to be shut down to reconfigure + this parameter.' - If no unit is specified, size is assumed to be in bytes. type: str size_b: @@ -168,7 +172,8 @@ options: type: str sr: description: - - Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR. + - Storage Repository to create disk on. If not specified, it uses default SR. Cannot be used for moving disk to + other SR. type: str sr_uuid: description: @@ -182,12 +187,12 @@ options: suboptions: type: description: - - The type of CD-ROM. With V(none) the CD-ROM device will be present but empty. + - The type of CD-ROM. When V(none) the CD-ROM device is present but empty. type: str choices: [none, iso] iso_name: description: - - 'The file name of an ISO image from one of the XenServer ISO Libraries (implies O(cdrom.type=iso)).' + - The file name of an ISO image from one of the XenServer ISO Libraries (implies O(cdrom.type=iso)). - Required if O(cdrom.type) is set to V(iso). type: str networks: @@ -211,13 +216,14 @@ options: type: description: - Type of IPv4 assignment. Value V(none) means whatever is default for OS. - - On some operating systems it could be DHCP configured (for example Windows) or unconfigured interface (for example Linux). + - On some operating systems it could be DHCP configured (for example Windows) or unconfigured interface (for example + Linux). type: str choices: [none, dhcp, static] ip: description: - - Static IPv4 address (implies O(networks[].type=static)). Can include prefix in format C(/) instead of using - C(netmask). + - Static IPv4 address (implies O(networks[].type=static)). Can include prefix in format C(/) + instead of using C(netmask). type: str netmask: description: @@ -234,7 +240,7 @@ options: choices: [none, dhcp, static] ip6: description: - - 'Static IPv6 address (implies O(networks[].type6=static)) with prefix in format C(/).' + - Static IPv6 address (implies O(networks[].type6=static)) with prefix in format C(/). type: str gateway6: description: @@ -242,7 +248,7 @@ options: type: str home_server: description: - - Name of a XenServer host that will be a Home Server for the VM. + - Name of a XenServer host that is a Home Server for the VM. - This parameter is case sensitive. type: str custom_params: @@ -271,14 +277,15 @@ options: default: false state_change_timeout: description: - - 'By default, module will wait indefinitely for VM to acquire an IP address if O(wait_for_ip_address=true).' - - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. - - In case of timeout, module will generate an error message. + - By default, the module waits indefinitely for VM to acquire an IP address if O(wait_for_ip_address=true). + - If this parameter is set to a positive value, the module instead waits the specified number of seconds for the state + change. + - In case of timeout, module generates an error message. type: int default: 0 linked_clone: description: - - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy. + - Whether to create a Linked Clone from the template, existing VM or snapshot. If V(false), it creates a full copy. - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter. type: bool default: false @@ -387,141 +394,143 @@ instance: description: Metadata about the VM. returned: always type: dict - sample: { - "cdrom": { - "type": "none" - }, - "customization_agent": "native", - "disks": [ - { - "name": "testvm_11-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" + sample: + { + "cdrom": { + "type": "none" }, - { - "name": "testvm_11-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" + "customization_agent": "native", + "disks": [ + { + "name": "testvm_11-0", + "name_desc": "", + "os_device": "xvda", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "0" + }, + { + "name": "testvm_11-1", + "name_desc": "", + "os_device": "xvdb", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "1" + } + ], + "domid": "56", + "folder": "", + "hardware": { + "memory_mb": 8192, + "num_cpu_cores_per_socket": 2, + "num_cpus": 4 + }, + "home_server": "", + "is_template": false, + "name": "testvm_11", + "name_desc": "", + "networks": [ + { + "gateway": "192.168.0.254", + "gateway6": "fc00::fffe", + "ip": "192.168.0.200", + "ip6": [ + "fe80:0000:0000:0000:e9cb:625a:32c5:c291", + "fc00:0000:0000:0000:0000:0000:0000:0001" + ], + "mac": "ba:91:3a:48:20:76", + "mtu": "1500", + "name": "Pool-wide network associated with eth1", + "netmask": "255.255.255.128", + "prefix": "25", + "prefix6": "64", + "vif_device": "0" + } + ], + "other_config": { + "base_template_name": "Windows Server 2016 (64-bit)", + "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", + "install-methods": "cdrom", + "instant": "true", + "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" + }, + "platform": { + "acpi": "1", + "apic": "true", + "cores-per-socket": "2", + "device_id": "0002", + "hpet": "true", + "nx": "true", + "pae": "true", + "timeoffset": "-25200", + "vga": "std", + "videoram": "8", + "viridian": "true", + "viridian_reference_tsc": "true", + "viridian_time_ref_count": "true" + }, + "state": "poweredon", + "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", + "xenstore_data": { + "vm-data": "" } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "testvm_11", - "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } - ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" } - } changes: description: Detected or made changes to VM. returned: always type: list - sample: [ - { - "hardware": [ - "num_cpus" - ] - }, - { - "disks_changed": [ - [], - [ - "size" + sample: + [ + { + "hardware": [ + "num_cpus" ] - ] - }, - { - "disks_new": [ - { - "name": "new-disk", - "name_desc": "", - "position": 2, - "size_gb": "4", - "vbd_userdevice": "2" - } - ] - }, - { - "cdrom": [ - "type", - "iso_name" - ] - }, - { - "networks_changed": [ - [ - "mac" - ], - ] - }, - { - "networks_new": [ - { - "name": "Pool-wide network associated with eth2", - "position": 1, - "vif_device": "1" - } - ] - }, - "need_poweredoff" - ] + }, + { + "disks_changed": [ + [], + [ + "size" + ] + ] + }, + { + "disks_new": [ + { + "name": "new-disk", + "name_desc": "", + "position": 2, + "size_gb": "4", + "vbd_userdevice": "2" + } + ] + }, + { + "cdrom": [ + "type", + "iso_name" + ] + }, + { + "networks_changed": [ + [ + "mac" + ] + ] + }, + { + "networks_new": [ + { + "name": "Pool-wide network associated with eth2", + "position": 1, + "vif_device": "1" + } + ] + }, + "need_poweredoff" + ] """ import re diff --git a/plugins/modules/xenserver_guest_info.py b/plugins/modules/xenserver_guest_info.py index 10cd11839c..d11a69025b 100644 --- a/plugins/modules/xenserver_guest_info.py +++ b/plugins/modules/xenserver_guest_info.py @@ -17,21 +17,22 @@ author: notes: - Minimal supported version of XenServer is 5.6. - Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. - - 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside Citrix - Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the C(XenAPI.py) file from the SDK to your Python site-packages on your Ansible - Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' - - 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you - are accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' - - 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use O(validate_certs=no) which - requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' + - 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be + found inside Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the C(XenAPI.py) file from the SDK + to your Python site-packages on your Ansible Control Node to use it. Latest version of the library can also be acquired + from GitHub: U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py).' + - If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. + Make sure you are accessing XenServer host in trusted environment or use C(https://) scheme explicitly. + - To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use + O(validate_certs=no) which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer. requirements: - XenAPI options: name: description: - Name of the VM to gather facts from. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - VMs running on XenServer do not necessarily have unique names. The module fails if multiple VMs with same name are + found. - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage. - This parameter is case sensitive. type: str @@ -60,90 +61,91 @@ EXAMPLES = r""" RETURN = r""" instance: - description: Metadata about the VM. - returned: always - type: dict - sample: { - "cdrom": { - "type": "none" + description: Metadata about the VM. + returned: always + type: dict + sample: + { + "cdrom": { + "type": "none" + }, + "customization_agent": "native", + "disks": [ + { + "name": "testvm_11-0", + "name_desc": "", + "os_device": "xvda", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "0" }, - "customization_agent": "native", - "disks": [ - { - "name": "testvm_11-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" - }, - { - "name": "testvm_11-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" - } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "testvm_11", - "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } - ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" + { + "name": "testvm_11-1", + "name_desc": "", + "os_device": "xvdb", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "1" } + ], + "domid": "56", + "folder": "", + "hardware": { + "memory_mb": 8192, + "num_cpu_cores_per_socket": 2, + "num_cpus": 4 + }, + "home_server": "", + "is_template": false, + "name": "testvm_11", + "name_desc": "", + "networks": [ + { + "gateway": "192.168.0.254", + "gateway6": "fc00::fffe", + "ip": "192.168.0.200", + "ip6": [ + "fe80:0000:0000:0000:e9cb:625a:32c5:c291", + "fc00:0000:0000:0000:0000:0000:0000:0001" + ], + "mac": "ba:91:3a:48:20:76", + "mtu": "1500", + "name": "Pool-wide network associated with eth1", + "netmask": "255.255.255.128", + "prefix": "25", + "prefix6": "64", + "vif_device": "0" + } + ], + "other_config": { + "base_template_name": "Windows Server 2016 (64-bit)", + "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", + "install-methods": "cdrom", + "instant": "true", + "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" + }, + "platform": { + "acpi": "1", + "apic": "true", + "cores-per-socket": "2", + "device_id": "0002", + "hpet": "true", + "nx": "true", + "pae": "true", + "timeoffset": "-25200", + "vga": "std", + "videoram": "8", + "viridian": "true", + "viridian_reference_tsc": "true", + "viridian_time_ref_count": "true" + }, + "state": "poweredon", + "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", + "xenstore_data": { + "vm-data": "" + } } """ diff --git a/plugins/modules/xenserver_guest_powerstate.py b/plugins/modules/xenserver_guest_powerstate.py index 86a21b56dc..c3436300fe 100644 --- a/plugins/modules/xenserver_guest_powerstate.py +++ b/plugins/modules/xenserver_guest_powerstate.py @@ -11,20 +11,21 @@ __metaclass__ = type DOCUMENTATION = r""" module: xenserver_guest_powerstate short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool -description: This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine. +description: This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown + guest OS of virtual machine. author: - Bojan Vitnik (@bvitnik) notes: - Minimal supported version of XenServer is 5.6. - Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. - - 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside Citrix - Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the C(XenAPI.py) file from the SDK to your Python site-packages on your Ansible - Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py).' - - 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you - are accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' - - 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: - no) which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' + - 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be + found inside Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the C(XenAPI.py) file from the SDK + to your Python site-packages on your Ansible Control Node to use it. Latest version of the library can also be acquired + from GitHub: U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py).' + - If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. + Make sure you are accessing XenServer host in trusted environment or use C(https://) scheme explicitly. + - 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or + use C(validate_certs: no) which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' requirements: - XenAPI attributes: @@ -44,7 +45,8 @@ options: name: description: - Name of the VM to manage. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - VMs running on XenServer do not necessarily have unique names. The module fails if multiple VMs with same name are + found. - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage. - This parameter is case sensitive. type: str @@ -62,9 +64,9 @@ options: default: false state_change_timeout: description: - - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if O(wait_for_ip_address=true).' - - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. - - In case of timeout, module will generate an error message. + - By default, module waits indefinitely for VM to change state or acquire an IP address if O(wait_for_ip_address=true). + - If this parameter is set to positive value, the module instead waits specified number of seconds for the state change. + - In case of timeout, module generates an error message. type: int default: 0 extends_documentation_fragment: @@ -86,90 +88,91 @@ EXAMPLES = r""" RETURN = r""" instance: - description: Metadata about the VM. - returned: always - type: dict - sample: { - "cdrom": { - "type": "none" + description: Metadata about the VM. + returned: always + type: dict + sample: + { + "cdrom": { + "type": "none" + }, + "customization_agent": "native", + "disks": [ + { + "name": "windows-template-testing-0", + "name_desc": "", + "os_device": "xvda", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "0" }, - "customization_agent": "native", - "disks": [ - { - "name": "windows-template-testing-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" - }, - { - "name": "windows-template-testing-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" - } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "windows-template-testing", - "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } - ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" + { + "name": "windows-template-testing-1", + "name_desc": "", + "os_device": "xvdb", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "1" } + ], + "domid": "56", + "folder": "", + "hardware": { + "memory_mb": 8192, + "num_cpu_cores_per_socket": 2, + "num_cpus": 4 + }, + "home_server": "", + "is_template": false, + "name": "windows-template-testing", + "name_desc": "", + "networks": [ + { + "gateway": "192.168.0.254", + "gateway6": "fc00::fffe", + "ip": "192.168.0.200", + "ip6": [ + "fe80:0000:0000:0000:e9cb:625a:32c5:c291", + "fc00:0000:0000:0000:0000:0000:0000:0001" + ], + "mac": "ba:91:3a:48:20:76", + "mtu": "1500", + "name": "Pool-wide network associated with eth1", + "netmask": "255.255.255.128", + "prefix": "25", + "prefix6": "64", + "vif_device": "0" + } + ], + "other_config": { + "base_template_name": "Windows Server 2016 (64-bit)", + "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", + "install-methods": "cdrom", + "instant": "true", + "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" + }, + "platform": { + "acpi": "1", + "apic": "true", + "cores-per-socket": "2", + "device_id": "0002", + "hpet": "true", + "nx": "true", + "pae": "true", + "timeoffset": "-25200", + "vga": "std", + "videoram": "8", + "viridian": "true", + "viridian_reference_tsc": "true", + "viridian_time_ref_count": "true" + }, + "state": "poweredon", + "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", + "xenstore_data": { + "vm-data": "" + } } """ diff --git a/plugins/modules/xfconf.py b/plugins/modules/xfconf.py index c13f7b7f45..1cae7fb12b 100644 --- a/plugins/modules/xfconf.py +++ b/plugins/modules/xfconf.py @@ -37,28 +37,30 @@ attributes: options: channel: description: - - A Xfconf preference channel is a top-level tree key, inside of the Xfconf repository that corresponds to the location for which all application - properties/keys are stored. See man xfconf-query(1). + - A Xfconf preference channel is a top-level tree key, inside of the Xfconf repository that corresponds to the location + for which all application properties/keys are stored. See man xfconf-query(1). required: true type: str property: description: - - A Xfce preference key is an element in the Xfconf repository that corresponds to an application preference. See man xfconf-query(1). + - A Xfce preference key is an element in the Xfconf repository that corresponds to an application preference. See man + xfconf-query(1). required: true type: str value: description: - - Preference properties typically have simple values such as strings, integers, or lists of strings and integers. See man xfconf-query(1). + - Preference properties typically have simple values such as strings, integers, or lists of strings and integers. See + man xfconf-query(1). type: list elements: raw value_type: description: - The type of value being set. - When providing more than one O(value_type), the length of the list must be equal to the length of O(value). - - If only one O(value_type) is provided, but O(value) contains more than on element, that O(value_type) will be applied to all elements - of O(value). - - If the O(property) being set is an array and it can possibly have only one element in the array, then O(force_array=true) must be used - to ensure that C(xfconf-query) will interpret the value as an array rather than a scalar. + - If only one O(value_type) is provided, but O(value) contains more than on element, that O(value_type) is applied to + all elements of O(value). + - If the O(property) being set is an array and it can possibly have only one element in the array, then O(force_array=true) + must be used to ensure that C(xfconf-query) interprets the value as an array rather than a scalar. - Support for V(uchar), V(char), V(uint64), and V(int64) has been added in community.general 4.8.0. type: list elements: str @@ -67,7 +69,8 @@ options: type: str description: - The action to take upon the property/value. - - The state V(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead. + - The state V(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) + instead. choices: [present, absent] default: "present" force_array: @@ -116,21 +119,24 @@ property: sample: "/Xft/DPI" value_type: description: - - The type of the value that was changed (V(none) for O(state=reset)). Either a single string value or a list of strings for array types. + - The type of the value that was changed (V(none) for O(state=reset)). Either a single string value or a list of strings + for array types. - This is a string or a list of strings. returned: success type: any sample: '"int" or ["str", "str", "str"]' value: description: - - The value of the preference key after executing the module. Either a single string value or a list of strings for array types. + - The value of the preference key after executing the module. Either a single string value or a list of strings for array + types. - This is a string or a list of strings. returned: success type: any sample: "'192' or ['orange', 'yellow', 'violet']" previous_value: description: - - The value of the preference key before executing the module. Either a single string value or a list of strings for array types. + - The value of the preference key before executing the module. Either a single string value or a list of strings for array + types. - This is a string or a list of strings. returned: success type: any @@ -172,7 +178,7 @@ class XFConfProperty(StateModuleHelper): output_params = ('property', 'channel', 'value') module = dict( argument_spec=dict( - state=dict(type='str', choices=("present", "absent"), default="present"), + state=dict(type='str', choices=('present', 'absent'), default='present'), channel=dict(type='str', required=True), property=dict(type='str', required=True), value_type=dict(type='list', elements='str', @@ -184,9 +190,6 @@ class XFConfProperty(StateModuleHelper): required_together=[('value', 'value_type')], supports_check_mode=True, ) - use_old_vardict = False - - default_state = 'present' def __init_module__(self): self.runner = xfconf_runner(self.module) @@ -203,8 +206,8 @@ class XFConfProperty(StateModuleHelper): self.do_raise('xfconf-query failed with error (rc={0}): {1}'.format(rc, err)) result = out.rstrip() - if "Value is an array with" in result: - result = result.split("\n") + if 'Value is an array with' in result: + result = result.split('\n') result.pop(0) result.pop(0) @@ -220,7 +223,7 @@ class XFConfProperty(StateModuleHelper): self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd - self.vars.set("run_info", ctx.run_info, verbosity=4) + self.vars.set('run_info', ctx.run_info, verbosity=4) self.vars.value = None def state_present(self): @@ -250,7 +253,7 @@ class XFConfProperty(StateModuleHelper): self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd - self.vars.set("run_info", ctx.run_info, verbosity=4) + self.vars.set('run_info', ctx.run_info, verbosity=4) if not self.vars.is_array: self.vars.value = self.vars.value[0] diff --git a/plugins/modules/xfconf_info.py b/plugins/modules/xfconf_info.py index 36de7daecc..74bebf35cb 100644 --- a/plugins/modules/xfconf_info.py +++ b/plugins/modules/xfconf_info.py @@ -25,15 +25,15 @@ attributes: options: channel: description: - - "A Xfconf preference channel is a top-level tree key, inside of the Xfconf repository that corresponds to the location for which all application - properties/keys are stored." - - If not provided, the module will list all available channels. + - A Xfconf preference channel is a top-level tree key, inside of the Xfconf repository that corresponds to the location + for which all application properties/keys are stored. + - If not provided, the module lists all available channels. type: str property: description: - - "A Xfce preference key is an element in the Xfconf repository that corresponds to an application preference." + - A Xfce preference key is an element in the Xfconf repository that corresponds to an application preference. - If provided, then O(channel) is required. - - If not provided and a O(channel) is provided, then the module will list all available properties in that O(channel). + - If not provided and a O(channel) is provided, then the module lists all available properties in that O(channel). type: str notes: - See man xfconf-query(1) for more details. @@ -142,7 +142,6 @@ class XFConfInfo(ModuleHelper): ), supports_check_mode=True, ) - use_old_vardict = False def __init_module__(self): self.runner = xfconf_runner(self.module, check_rc=True) diff --git a/plugins/modules/xml.py b/plugins/modules/xml.py index a28e5dcefc..3a41cd8514 100644 --- a/plugins/modules/xml.py +++ b/plugins/modules/xml.py @@ -69,8 +69,8 @@ options: add_children: description: - Add additional child-element(s) to a selected element for a given O(xpath). - - Child elements must be given in a list and each item may be either a string (for example C(children=ansible) to add an empty C() - child element), or a hash where the key is an element name and the value is the element value. + - Child elements must be given in a list and each item may be either a string (for example C(children=ansible) to add + an empty C() child element), or a hash where the key is an element name and the value is the element value. - This parameter requires O(xpath) to be set. type: list elements: raw @@ -113,7 +113,8 @@ options: default: yaml backup: description: - - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. type: bool default: false strip_cdata_tags: @@ -125,16 +126,16 @@ options: insertbefore: description: - Add additional child-element(s) before the first selected element for a given O(xpath). - - Child elements must be given in a list and each item may be either a string (for example C(children=ansible) to add an empty C() - child element), or a hash where the key is an element name and the value is the element value. + - Child elements must be given in a list and each item may be either a string (for example C(children=ansible) to add + an empty C() child element), or a hash where the key is an element name and the value is the element value. - This parameter requires O(xpath) to be set. type: bool default: false insertafter: description: - Add additional child-element(s) after the last selected element for a given O(xpath). - - Child elements must be given in a list and each item may be either a string (for example C(children=ansible) to add an empty C() - child element), or a hash where the key is an element name and the value is the element value. + - Child elements must be given in a list and each item may be either a string (for example C(children=ansible) to add + an empty C() child element), or a hash where the key is an element name and the value is the element value. - This parameter requires O(xpath) to be set. type: bool default: false @@ -145,7 +146,8 @@ notes: - The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure. - This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions. - Beware that in case your XML elements are namespaced, you need to use the O(namespaces) parameter, see the examples. - - Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them. + - Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is + defined for them. seealso: - name: XML module development community wiki (archived) description: More information related to the development of this xml module. @@ -296,10 +298,12 @@ EXAMPLES = r""" xpath: /business add_children: - building: - # Attributes +# Attributes name: Scumm bar location: Monkey island - # Subnodes + # Value + +value: unreal + # Subnodes _: - floor: Pirate hall - floor: Grog storage @@ -329,12 +333,15 @@ actions: description: A dictionary with the original xpath, namespaces and state. type: dict returned: success - sample: {xpath: xpath, namespaces: [namespace1, namespace2], state: present} -backup_file: - description: The name of the backup file that was created. - type: str - returned: when O(backup=true) - sample: /path/to/file.xml.1942.2017-08-24@14:16:01~ + sample: + { + "xpath": "xpath", + "namespaces": [ + "namespace1", + "namespace2" + ], + "state": "present" + } count: description: The count of xpath matches. type: int @@ -344,10 +351,6 @@ matches: description: The xpath matches found. type: list returned: when parameter O(print_match) is set -msg: - description: A message related to the performed action(s). - type: str - returned: always xmlstring: description: An XML string of the resulting output. type: str @@ -631,7 +634,7 @@ def check_or_make_target(module, tree, xpath, namespaces): # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) elif eoa == "": for node in tree.xpath(inner_xpath, namespaces=namespaces): - if (node.text != eoa_value): + if node.text != eoa_value: node.text = eoa_value changed = True @@ -754,6 +757,7 @@ def child_to_element(module, child, in_type): (key, value) = next(iteritems(child)) if isinstance(value, MutableMapping): children = value.pop('_', None) + child_value = value.pop('+value', None) node = etree.Element(key, value) @@ -763,6 +767,9 @@ def child_to_element(module, child, in_type): subnodes = children_to_nodes(module, children) node.extend(subnodes) + + if child_value is not None: + node.text = child_value else: node = etree.Element(key) node.text = value @@ -918,29 +925,34 @@ def main(): elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'): module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.') - # Check if the file exists - if xml_string: - infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict')) - elif os.path.isfile(xml_file): - infile = open(xml_file, 'rb') - else: - module.fail_json(msg="The target XML source '%s' does not exist." % xml_file) - - # Parse and evaluate xpath expression - if xpath is not None: - try: - etree.XPath(xpath) - except etree.XPathSyntaxError as e: - module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e)) - except etree.XPathEvalError as e: - module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e)) - - # Try to parse in the target XML file + infile = None try: - parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags) - doc = etree.parse(infile, parser) - except etree.XMLSyntaxError as e: - module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e)) + # Check if the file exists + if xml_string: + infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict')) + elif os.path.isfile(xml_file): + infile = open(xml_file, 'rb') + else: + module.fail_json(msg="The target XML source '%s' does not exist." % xml_file) + + # Parse and evaluate xpath expression + if xpath is not None: + try: + etree.XPath(xpath) + except etree.XPathSyntaxError as e: + module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e)) + except etree.XPathEvalError as e: + module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e)) + + # Try to parse in the target XML file + try: + parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags) + doc = etree.parse(infile, parser) + except etree.XMLSyntaxError as e: + module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e)) + finally: + if infile: + infile.close() # Ensure we have the original copy to compare global orig_doc diff --git a/plugins/modules/yarn.py b/plugins/modules/yarn.py index 7109145ce8..75b624e9d2 100644 --- a/plugins/modules/yarn.py +++ b/plugins/modules/yarn.py @@ -16,6 +16,7 @@ module: yarn short_description: Manage Node.js packages with Yarn description: - Manage Node.js packages with the Yarn package manager U(https://yarnpkg.com/). + - Note that at the moment, this module B(only works with Yarn Classic). author: - "David Gunter (@verkaufer)" - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module)" @@ -37,7 +38,7 @@ options: path: type: path description: - - The base path where Node.js libraries will be installed. + - The base path where Node.js installs libraries. - This is where the C(node_modules) folder lives. required: false version: @@ -66,7 +67,7 @@ options: production: description: - Install dependencies in production mode. - - Yarn will ignore any dependencies under devDependencies in C(package.json). + - C(yarn) ignores any dependencies under devDependencies in C(package.json). required: false type: bool default: false @@ -84,7 +85,7 @@ options: default: present choices: ["present", "absent", "latest"] requirements: - - Yarn installed in bin path (typically C(/usr/local/bin)) + - Yarn Classic installed in bin path (typically C(/usr/local/bin)) """ EXAMPLES = r""" @@ -130,8 +131,8 @@ out: description: Output generated from Yarn. returned: always type: str - sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4] Building fresh packages...success - Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s." + sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4] Building + fresh packages...success Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s." """ import os @@ -282,12 +283,12 @@ class Yarn(object): def main(): arg_spec = dict( - name=dict(default=None), - path=dict(default=None, type='path'), - version=dict(default=None), + name=dict(), + path=dict(type='path'), + version=dict(), production=dict(default=False, type='bool'), - executable=dict(default=None, type='path'), - registry=dict(default=None), + executable=dict(type='path'), + registry=dict(), state=dict(default='present', choices=['present', 'absent', 'latest']), ignore_scripts=dict(default=False, type='bool'), ) diff --git a/plugins/modules/yum_versionlock.py b/plugins/modules/yum_versionlock.py index 4a618a9d17..183ffdc6fe 100644 --- a/plugins/modules/yum_versionlock.py +++ b/plugins/modules/yum_versionlock.py @@ -31,8 +31,8 @@ options: elements: str state: description: - - If state is V(present), package(s) will be added to yum versionlock list. - - If state is V(absent), package(s) will be removed from yum versionlock list. + - If state is V(present), package(s) is/are added to yum versionlock list. + - If state is V(absent), package(s) is/are removed from yum versionlock list. choices: ['absent', 'present'] type: str default: present @@ -80,7 +80,7 @@ packages: returned: success type: list elements: str - sample: ['httpd'] + sample: ["httpd"] state: description: State of package(s). returned: success diff --git a/plugins/modules/zfs.py b/plugins/modules/zfs.py index 1b00010d8a..29910310b3 100644 --- a/plugins/modules/zfs.py +++ b/plugins/modules/zfs.py @@ -20,9 +20,9 @@ attributes: check_mode: support: partial details: - - In certain situations it may report a task as changed that will not be reported as changed when C(check_mode) is disabled. - - For example, this might occur when the zpool C(altroot) option is set or when a size is written using human-readable notation, such as - V(1M) or V(1024K), instead of as an unqualified byte count, such as V(1048576). + - In certain situations it may report a task as changed that is not reported as changed when C(check_mode) is disabled. + - For example, this might occur when the zpool C(altroot) option is set or when a size is written using human-readable + notation, such as V(1M) or V(1024K), instead of as an unqualified byte count, such as V(1048576). diff_mode: support: full options: @@ -33,8 +33,8 @@ options: type: str state: description: - - Whether to create (V(present)), or remove (V(absent)) a file system, snapshot or volume. All parents/children will be created/destroyed - as needed to reach the desired state. + - Whether to create (V(present)), or remove (V(absent)) a file system, snapshot or volume. All parents/children are + created/destroyed as needed to reach the desired state. choices: [absent, present] required: true type: str @@ -98,10 +98,10 @@ from ansible.module_utils.basic import AnsibleModule class Zfs(object): - def __init__(self, module, name, properties): + def __init__(self, module, name, extra_zfs_properties): self.module = module self.name = name - self.properties = properties + self.extra_zfs_properties = extra_zfs_properties self.changed = False self.zfs_cmd = module.get_bin_path('zfs', True) self.zpool_cmd = module.get_bin_path('zpool', True) @@ -142,7 +142,7 @@ class Zfs(object): if self.module.check_mode: self.changed = True return - properties = self.properties + extra_zfs_properties = self.extra_zfs_properties origin = self.module.params.get('origin') cmd = [self.zfs_cmd] @@ -158,8 +158,8 @@ class Zfs(object): if action in ['create', 'clone']: cmd += ['-p'] - if properties: - for prop, value in properties.items(): + if extra_zfs_properties: + for prop, value in extra_zfs_properties.items(): if prop == 'volsize': cmd += ['-V', value] elif prop == 'volblocksize': @@ -189,45 +189,62 @@ class Zfs(object): def set_properties_if_changed(self): diff = {'before': {'extra_zfs_properties': {}}, 'after': {'extra_zfs_properties': {}}} - current_properties = self.get_current_properties() - for prop, value in self.properties.items(): - current_value = current_properties.get(prop, None) + current_properties = self.list_properties() + for prop, value in self.extra_zfs_properties.items(): + current_value = self.get_property(prop, current_properties) if current_value != value: self.set_property(prop, value) diff['before']['extra_zfs_properties'][prop] = current_value diff['after']['extra_zfs_properties'][prop] = value if self.module.check_mode: return diff - updated_properties = self.get_current_properties() - for prop in self.properties: - value = updated_properties.get(prop, None) + updated_properties = self.list_properties() + for prop in self.extra_zfs_properties: + value = self.get_property(prop, updated_properties) if value is None: self.module.fail_json(msg="zfsprop was not present after being successfully set: %s" % prop) - if current_properties.get(prop, None) != value: + if self.get_property(prop, current_properties) != value: self.changed = True if prop in diff['after']['extra_zfs_properties']: diff['after']['extra_zfs_properties'][prop] = value return diff - def get_current_properties(self): - cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,value,source"] + def list_properties(self): + cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,source"] if self.enhanced_sharing: cmd += ['-e'] cmd += ['all', self.name] rc, out, err = self.module.run_command(cmd) - properties = dict() + properties = [] for line in out.splitlines(): - prop, value, source = line.split('\t') + prop, source = line.split('\t') # include source '-' so that creation-only properties are not removed # to avoids errors when the dataset already exists and the property is not changed # this scenario is most likely when the same playbook is run more than once if source in ('local', 'received', '-'): - properties[prop] = value + properties.append(prop) + return properties + + def get_property(self, name, list_of_properties): # Add alias for enhanced sharing properties if self.enhanced_sharing: - properties['sharenfs'] = properties.get('share.nfs', None) - properties['sharesmb'] = properties.get('share.smb', None) - return properties + if name == 'sharenfs': + name = 'share.nfs' + elif name == 'sharesmb': + name = 'share.smb' + if name not in list_of_properties: + return None + cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "value"] + if self.enhanced_sharing: + cmd += ['-e'] + cmd += [name, self.name] + rc, out, err = self.module.run_command(cmd) + if rc != 0: + return None + # + # Strip last newline + # + return out[:-1] def main(): @@ -282,7 +299,7 @@ def main(): result['diff']['before_header'] = name result['diff']['after_header'] = name - result.update(zfs.properties) + result.update(zfs.extra_zfs_properties) result['changed'] = zfs.changed module.exit_json(**result) diff --git a/plugins/modules/zfs_delegate_admin.py b/plugins/modules/zfs_delegate_admin.py index 796cbd4595..7158c7638f 100644 --- a/plugins/modules/zfs_delegate_admin.py +++ b/plugins/modules/zfs_delegate_admin.py @@ -12,13 +12,13 @@ DOCUMENTATION = r""" module: zfs_delegate_admin short_description: Manage ZFS delegated administration (user admin privileges) description: - - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS operations normally restricted - to the superuser. + - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS operations + normally restricted to the superuser. - See the C(zfs allow) section of V(zfs(1M\)) for detailed explanations of options. - This module attempts to adhere to the behavior of the command line tool as much as possible. requirements: - - "A ZFS/OpenZFS implementation that supports delegation with C(zfs allow), including: Solaris >= 10, illumos (all versions), FreeBSD >= 8.0R, - ZFS on Linux >= 0.7.0." + - "A ZFS/OpenZFS implementation that supports delegation with C(zfs allow), including: Solaris >= 10, illumos (all versions), + FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0." extends_documentation_fragment: - community.general.attributes attributes: @@ -36,7 +36,8 @@ options: description: - Whether to allow (V(present)), or unallow (V(absent)) a permission. - When set to V(present), at least one "entity" param of O(users), O(groups), or O(everyone) are required. - - When set to V(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified. + - When set to V(absent), removes permissions from the specified entities, or removes all permissions if no entity params + are specified. choices: [absent, present] default: present type: str diff --git a/plugins/modules/zfs_facts.py b/plugins/modules/zfs_facts.py index 115e8e3e7a..1bbe73cb27 100644 --- a/plugins/modules/zfs_facts.py +++ b/plugins/modules/zfs_facts.py @@ -38,16 +38,18 @@ options: default: false properties: description: - - Specifies which dataset properties should be queried in comma-separated format. For more information about dataset properties, check zfs(1M) - man page. + - Specifies which dataset properties should be queried in comma-separated format. For more information about dataset + properties, check zfs(1M) man page. default: all type: str type: description: - - Specifies which datasets types to display. Multiple values have to be provided in comma-separated form. + - Specifies which datasets types to display. Multiple values have to be provided as a list or in comma-separated form. + - Value V(all) cannot be used together with other values. choices: ['all', 'filesystem', 'volume', 'snapshot', 'bookmark'] - default: all - type: str + default: [all] + type: list + elements: str depth: description: - Specifies recursion depth. @@ -91,21 +93,68 @@ zfs_datasets: description: ZFS dataset facts. returned: always type: str - sample: {"aclinherit": "restricted", "aclmode": "discard", "atime": "on", "available": "43.8G", "canmount": "on", "casesensitivity": "sensitive", - "checksum": "on", "compression": "off", "compressratio": "1.00x", "copies": "1", "creation": "Thu Jun 16 11:37 2016", "dedup": "off", "devices": "on", - "exec": "on", "filesystem_count": "none", "filesystem_limit": "none", "logbias": "latency", "logicalreferenced": "18.5K", "logicalused": "3.45G", - "mlslabel": "none", "mounted": "yes", "mountpoint": "/rpool", "name": "rpool", "nbmand": "off", "normalization": "none", "org.openindiana.caiman:install": - "ready", "primarycache": "all", "quota": "none", "readonly": "off", "recordsize": "128K", "redundant_metadata": "all", "refcompressratio": "1.00x", - "referenced": "29.5K", "refquota": "none", "refreservation": "none", "reservation": "none", "secondarycache": "all", "setuid": "on", "sharenfs": "off", - "sharesmb": "off", "snapdir": "hidden", "snapshot_count": "none", "snapshot_limit": "none", "sync": "standard", "type": "filesystem", "used": "4.41G", - "usedbychildren": "4.41G", "usedbydataset": "29.5K", "usedbyrefreservation": "0", "usedbysnapshots": "0", "utf8only": "off", "version": "5", - "vscan": "off", "written": "29.5K", "xattr": "on", "zoned": "off"} + sample: + "aclinherit": "restricted" + "aclmode": "discard" + "atime": "on" + "available": "43.8G" + "canmount": "on" + "casesensitivity": "sensitive" + "checksum": "on" + "compression": "off" + "compressratio": "1.00x" + "copies": "1" + "creation": "Thu Jun 16 11:37 2016" + "dedup": "off" + "devices": "on" + "exec": "on" + "filesystem_count": "none" + "filesystem_limit": "none" + "logbias": "latency" + "logicalreferenced": "18.5K" + "logicalused": "3.45G" + "mlslabel": "none" + "mounted": "yes" + "mountpoint": "/rpool" + "name": "rpool" + "nbmand": "off" + "normalization": "none" + "org.openindiana.caiman:install": "ready" + "primarycache": "all" + "quota": "none" + "readonly": "off" + "recordsize": "128K" + "redundant_metadata": "all" + "refcompressratio": "1.00x" + "referenced": "29.5K" + "refquota": "none" + "refreservation": "none" + "reservation": "none" + "secondarycache": "all" + "setuid": "on" + "sharenfs": "off" + "sharesmb": "off" + "snapdir": "hidden" + "snapshot_count": "none" + "snapshot_limit": "none" + "sync": "standard" + "type": "filesystem" + "used": "4.41G" + "usedbychildren": "4.41G" + "usedbydataset": "29.5K" + "usedbyrefreservation": "0" + "usedbysnapshots": "0" + "utf8only": "off" + "version": "5" + "vscan": "off" + "written": "29.5K" + "xattr": "on" + "zoned": "off" """ from collections import defaultdict from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark'] @@ -131,10 +180,7 @@ class ZFSFacts(object): (rc, out, err) = self.module.run_command(cmd) - if rc == 0: - return True - else: - return False + return rc == 0 def get_facts(self): cmd = [self.module.get_bin_path('zfs'), 'get', '-H'] @@ -147,41 +193,44 @@ class ZFSFacts(object): cmd.append('%s' % self.depth) if self.type: cmd.append('-t') - cmd.append(self.type) + cmd.append(','.join(self.type)) cmd.extend(['-o', 'name,property,value', self.properties, self.name]) (rc, out, err) = self.module.run_command(cmd) - if rc == 0: - for line in out.splitlines(): - dataset, property, value = line.split('\t') - - self._datasets[dataset].update({property: value}) - - for k, v in iteritems(self._datasets): - v.update({'name': k}) - self.facts.append(v) - - return {'ansible_zfs_datasets': self.facts} - else: + if rc != 0: self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name, stderr=err, rc=rc) + for line in out.splitlines(): + dataset, property, value = line.split('\t') + + self._datasets[dataset].update({property: value}) + + for k, v in self._datasets.items(): + v.update({'name': k}) + self.facts.append(v) + + return {'ansible_zfs_datasets': self.facts} + def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True, aliases=['ds', 'dataset'], type='str'), - recurse=dict(required=False, default=False, type='bool'), - parsable=dict(required=False, default=False, type='bool'), - properties=dict(required=False, default='all', type='str'), - type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES), - depth=dict(required=False, default=0, type='int') + recurse=dict(default=False, type='bool'), + parsable=dict(default=False, type='bool'), + properties=dict(default='all', type='str'), + type=dict(default='all', type='list', elements='str', choices=SUPPORTED_TYPES), + depth=dict(default=0, type='int') ), supports_check_mode=True ) + if 'all' in module.params['type'] and len(module.params['type']) > 1: + module.fail_json(msg="Value 'all' for parameter 'type' is mutually exclusive with other values") + zfs_facts = ZFSFacts(module) result = {} @@ -194,11 +243,11 @@ def main(): if zfs_facts.recurse: result['recurse'] = zfs_facts.recurse - if zfs_facts.dataset_exists(): - result['ansible_facts'] = zfs_facts.get_facts() - else: + if not zfs_facts.dataset_exists(): module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name) + result['ansible_facts'] = zfs_facts.get_facts() + module.exit_json(**result) diff --git a/plugins/modules/znode.py b/plugins/modules/znode.py index ca59704d12..8e3da6de5e 100644 --- a/plugins/modules/znode.py +++ b/plugins/modules/znode.py @@ -57,7 +57,7 @@ options: default: false auth_scheme: description: - - 'Authentication scheme.' + - Authentication scheme. choices: [digest, sasl] type: str default: "digest" diff --git a/plugins/modules/zpool.py b/plugins/modules/zpool.py new file mode 100644 index 0000000000..3cce255415 --- /dev/null +++ b/plugins/modules/zpool.py @@ -0,0 +1,614 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2025, Tom Hesse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r""" +module: zpool +short_description: Manage ZFS zpools +version_added: 11.0.0 +description: + - Create, destroy, and modify ZFS zpools and their vdev layouts, pool properties, and filesystem properties. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - In check mode, any C(zpool) subcommand that supports the dry-run flag (C(-n)) will be run with C(-n) and its simulated + output is included in the module's diff results. + diff_mode: + support: full +author: + - Tom Hesse (@tomhesse) +options: + name: + description: + - Name of the zpool to manage. + required: true + type: str + state: + description: + - Whether the pool should exist. + choices: [present, absent] + default: present + type: str + disable_new_features: + description: + - If V(true), disable new ZFS feature flags when creating. + type: bool + default: false + force: + description: + - If V(true), force operations (for example overwrite existing devices). + type: bool + default: false + pool_properties: + description: + - Dictionary of ZFS pool properties to set (for example V(autoexpand), V(cachefile)). + type: dict + default: {} + filesystem_properties: + description: + - Dictionary of ZFS filesystem properties to set on the root dataset (for example V(compression), V(dedup)). + type: dict + default: {} + mountpoint: + description: + - Filesystem mountpoint for the root dataset. + type: str + altroot: + description: + - Alternate root for mounting filesystems. + type: str + temp_name: + description: + - Temporary name used during pool creation. + type: str + vdevs: + description: + - List of vdev definitions for the pool. + type: list + elements: dict + suboptions: + role: + description: + - Special vdev role (for example V(log), V(cache), V(spare)). + type: str + choices: [log, cache, spare, dedup, special] + type: + description: + - Vdev topology (for example V(stripe), V(mirror), V(raidz)). + type: str + choices: [stripe, mirror, raidz, raidz1, raidz2, raidz3] + default: stripe + disks: + description: + - List of device paths to include in this vdev. + required: true + type: list + elements: path +""" + +EXAMPLES = r""" +- name: Create pool "tank" on /dev/sda + community.general.zpool: + name: tank + vdevs: + - disks: + - /dev/sda + +- name: Create mirrored pool "tank" + community.general.zpool: + name: tank + vdevs: + - type: mirror + disks: + - /dev/sda + - /dev/sdb + +- name: Add a cache device to tank + community.general.zpool: + name: tank + vdevs: + - disks: + - /dev/sda + - role: cache + disks: + - /dev/nvme0n1 + +- name: Set pool and filesystem properties + community.general.zpool: + name: tank + pool_properties: + ashift: 12 + filesystem_properties: + compression: lz4 + vdevs: + - disks: + - /dev/sda + +- name: Destroy pool "tank" + community.general.zpool: + name: tank + state: absent +""" + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +class Zpool(object): + + def __init__(self, module, name, disable_new_features, force, pool_properties, filesystem_properties, mountpoint, altroot, temp_name, vdevs): + self.module = module + self.name = name + self.disable_new_features = disable_new_features + self.force = force + self.pool_properties = pool_properties + self.filesystem_properties = filesystem_properties + self.mountpoint = mountpoint + self.altroot = altroot + self.temp_name = temp_name + self.vdevs = vdevs + self.zpool_cmd = module.get_bin_path('zpool', required=True) + self.zfs_cmd = module.get_bin_path('zfs', required=True) + self.changed = False + + self.zpool_runner = CmdRunner( + module, + command=self.zpool_cmd, + arg_formats=dict( + subcommand=cmd_runner_fmt.as_list(), + disable_new_features=cmd_runner_fmt.as_bool('-d'), + force=cmd_runner_fmt.as_bool('-f'), + dry_run=cmd_runner_fmt.as_bool('-n'), + pool_properties=cmd_runner_fmt.as_func( + lambda props: sum([['-o', '{}={}'.format(prop, value)] for prop, value in (props or {}).items()], []) + ), + filesystem_properties=cmd_runner_fmt.as_func( + lambda props: sum([['-O', '{}={}'.format(prop, value)] for prop, value in (props or {}).items()], []) + ), + mountpoint=cmd_runner_fmt.as_opt_val('-m'), + altroot=cmd_runner_fmt.as_opt_val('-R'), + temp_name=cmd_runner_fmt.as_opt_val('-t'), + name=cmd_runner_fmt.as_list(), + vdevs=cmd_runner_fmt.as_func( + lambda vdevs: sum( + [ + ([vdev['role']] if vdev.get('role') else []) + + ([] if vdev.get('type', 'stripe') == 'stripe' else [vdev['type']]) + + vdev.get('disks', []) + for vdev in (vdevs or []) + ], + [], + ) + ), + vdev_name=cmd_runner_fmt.as_list(), + scripted=cmd_runner_fmt.as_bool('-H'), + parsable=cmd_runner_fmt.as_bool('-p'), + columns=cmd_runner_fmt.as_opt_val('-o'), + properties=cmd_runner_fmt.as_list(), + assignment=cmd_runner_fmt.as_list(), + full_paths=cmd_runner_fmt.as_bool('-P'), + real_paths=cmd_runner_fmt.as_bool('-L'), + ) + ) + + self.zfs_runner = CmdRunner( + module, + command=self.zfs_cmd, + arg_formats=dict( + subcommand=cmd_runner_fmt.as_list(), + scripted=cmd_runner_fmt.as_bool('-H'), + columns=cmd_runner_fmt.as_opt_val('-o'), + properties=cmd_runner_fmt.as_list(), + assignment=cmd_runner_fmt.as_list(), + name=cmd_runner_fmt.as_list() + ) + ) + + def exists(self): + with self.zpool_runner('subcommand name') as ctx: + rc, stdout, stderr = ctx.run(subcommand='list', name=self.name) + return rc == 0 + + def create(self): + with self.zpool_runner( + 'subcommand disable_new_features force dry_run pool_properties filesystem_properties mountpoint altroot temp_name name vdevs', + check_rc=True + ) as ctx: + rc, stdout, stderr = ctx.run(subcommand='create', dry_run=self.module.check_mode) + self.changed = True + if self.module.check_mode: + return {'prepared': stdout} + + def destroy(self): + if self.module.check_mode: + self.changed = True + return + with self.zpool_runner('subcommand name', check_rc=True) as ctx: + rc, stdout, stderr = ctx.run(subcommand='destroy') + self.changed = True + + def list_pool_properties(self): + with self.zpool_runner('subcommand scripted columns properties name', check_rc=True) as ctx: + rc, stdout, stderr = ctx.run( + subcommand='get', + scripted=True, + columns='property,value', + properties='all', + ) + + props = {} + for line in stdout.splitlines(): + prop, value = line.split('\t', 1) + props[prop] = value + return props + + def set_pool_properties_if_changed(self): + current = self.list_pool_properties() + before = {} + after = {} + for prop, value in self.pool_properties.items(): + if current.get(prop) != str(value): + before[prop] = current.get(prop) + if not self.module.check_mode: + with self.zpool_runner('subcommand assignment name', check_rc=True) as ctx: + rc, stdout, stderr = ctx.run(subcommand='set', assignment='{}={}'.format(prop, value)) + after[prop] = str(value) + self.changed = True + return {'before': {'pool_properties': before}, 'after': {'pool_properties': after}} + + def list_filesystem_properties(self): + with self.zfs_runner('subcommand scripted columns properties name', check_rc=True) as ctx: + rc, stdout, stderr = ctx.run( + subcommand='get', + scripted=True, + columns='property,value', + properties='all', + ) + + props = {} + for line in stdout.splitlines(): + prop, value = line.split('\t', 1) + props[prop] = value + return props + + def set_filesystem_properties_if_changed(self): + current = self.list_filesystem_properties() + before = {} + after = {} + for prop, value in self.filesystem_properties.items(): + if current.get(prop) != str(value): + before[prop] = current.get(prop) + if not self.module.check_mode: + with self.zfs_runner('subcommand assignment name', check_rc=True) as ctx: + rc, stdout, stderr = ctx.run(subcommand='set', assignment='{}={}'.format(prop, value)) + after[prop] = str(value) + self.changed = True + return {'before': {'filesystem_properties': before}, 'after': {'filesystem_properties': after}} + + def base_device(self, device): + if not device.startswith('/dev/'): + return device + + # loop devices + match = re.match(r'^(/dev/loop\d+)$', device) + if match: + return match.group(1) + + # nvme drives + match = re.match(r'^(.*?)(p\d+)$', device) + if match: + return match.group(1) + + # sata/scsi drives + match = re.match(r'^(/dev/(?:sd|vd)[a-z])\d+$', device) + if match: + return match.group(1) + + return device + + def get_current_layout(self): + with self.zpool_runner('subcommand full_paths real_paths name', check_rc=True) as ctx: + rc, stdout, stderr = ctx.run(subcommand='status', full_paths=True, real_paths=True) + + vdevs = [] + current = None + in_config = False + + def flush_current(current): + if current: + if current.get('role') is None: + current.pop('role', None) + vdevs.append(current) + return None + + for line in stdout.splitlines(): + if not in_config: + if line.strip().startswith('config:'): + in_config = True + continue + + if not line.strip() or line.strip().startswith('NAME'): + continue + + partitions = line.split() + device = partitions[0] + + if device == self.name: + continue + + if device in ('logs', 'cache', 'spares'): + current = flush_current(current) + role = 'spare' if device == 'spares' else device.rstrip('s') + current = {'role': role, 'type': None, 'disks': []} + continue + + match_group = re.match(r'^(mirror|raidz\d?)-\d+$', device) + if match_group: + if current and current.get('type') is not None: + current = flush_current(current) + kind = match_group.group(1) + role = current.get('role') if current and current.get('type') is None else None + current = {'role': role, 'type': kind, 'disks': []} + continue + + if device.startswith('/'): + base_device = self.base_device(device) + if current: + if current.get('type') is None: + entry = { + 'type': 'stripe', + 'disks': [base_device] + } + if current.get('role'): + entry['role'] = current['role'] + vdevs.append(entry) + current = None + else: + current['disks'].append(base_device) + else: + vdevs.append({'type': 'stripe', 'disks': [base_device]}) + continue + + if current and current.get('type') is not None: + current = flush_current(current) + + return vdevs + + def normalize_vdevs(self, vdevs): + alias = {'raidz': 'raidz1'} + normalized = [] + for vdev in vdevs: + normalized_type = alias.get(vdev.get('type', 'stripe'), vdev.get('type', 'stripe')) + entry = { + 'type': normalized_type, + 'disks': sorted(vdev['disks']), + } + role = vdev.get('role') + if role is not None: + entry['role'] = role + normalized.append(entry) + return sorted(normalized, key=lambda x: (x.get('role', ''), x['type'], x['disks'])) + + def diff_layout(self): + current = self.normalize_vdevs(self.get_current_layout()) + desired = self.normalize_vdevs(self.vdevs) + + before = {'vdevs': current} + after = {'vdevs': desired} + + if current != desired: + self.changed = True + + return {'before': before, 'after': after} + + def add_vdevs(self): + invalid_properties = [k for k in self.pool_properties if k != 'ashift'] + if invalid_properties: + self.module.warn("zpool add only supports 'ashift', ignoring: {}".format(invalid_properties)) + + diff = self.diff_layout() + before_vdevs = diff['before']['vdevs'] + after_vdevs = diff['after']['vdevs'] + + to_add = [vdev for vdev in after_vdevs if vdev not in before_vdevs] + if not to_add: + return {} + + with self.zpool_runner('subcommand force dry_run pool_properties name vdevs', check_rc=True) as ctx: + rc, stdout, stderr = ctx.run( + subcommand='add', + dry_run=self.module.check_mode, + pool_properties={'ashift': self.pool_properties['ashift']} if 'ashift' in self.pool_properties else {}, + vdevs=to_add, + ) + + self.changed = True + if self.module.check_mode: + return {'prepared': stdout} + + def list_vdevs_with_names(self): + with self.zpool_runner('subcommand full_paths real_paths name', check_rc=True) as ctx: + rc, stdout, stderr = ctx.run(subcommand='status', full_paths=True, real_paths=True) + in_cfg = False + saw_pool = False + vdevs = [] + current = None + for line in stdout.splitlines(): + if not in_cfg: + if line.strip().startswith('config:'): + in_cfg = True + continue + if not line.strip() or line.strip().startswith('NAME'): + continue + partitions = line.strip().split() + device = partitions[0] + if not saw_pool: + if device == self.name: + saw_pool = True + continue + if re.match(r'^(mirror|raidz\d?)\-\d+$', device) or device in ('cache', 'logs', 'spares'): + if current: + vdevs.append(current) + vdev_type = ('stripe' if device in ('cache', 'logs', 'spares') else ('mirror' if device.startswith('mirror') else 'raidz')) + current = {'name': device, 'type': vdev_type, 'disks': []} + continue + if device.startswith('/') and current: + current['disks'].append(self.base_device(device)) + continue + if device.startswith('/'): + base_device = self.base_device(device) + vdevs.append({'name': base_device, 'type': 'stripe', 'disks': [base_device]}) + if current: + vdevs.append(current) + return vdevs + + def remove_vdevs(self): + current = self.list_vdevs_with_names() + current_disks = {disk for vdev in current for disk in vdev['disks']} + desired_disks = {disk for vdev in self.vdevs for disk in vdev.get('disks', [])} + gone = current_disks - desired_disks + to_remove = [vdev['name'] for vdev in current if any(disk in gone for disk in vdev['disks'])] + if not to_remove: + return {} + with self.zpool_runner('subcommand dry_run name vdev_name', check_rc=True) as ctx: + rc, stdout, stderr = ctx.run( + subcommand='remove', dry_run=self.module.check_mode, vdev_name=to_remove) + self.changed = True + if self.module.check_mode: + return {'prepared': stdout} + before = [vdev['name'] for vdev in current] + after = [name for name in before if name not in to_remove] + return {'before': {'vdevs': before}, 'after': {'vdevs': after}} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=['present', 'absent'], default='present'), + disable_new_features=dict(type='bool', default=False), + force=dict(type='bool', default=False), + pool_properties=dict(type='dict', default={}), + filesystem_properties=dict(type='dict', default={}), + mountpoint=dict(type='str'), + altroot=dict(type='str'), + temp_name=dict(type='str'), + vdevs=dict( + type='list', + elements='dict', + options=dict( + role=dict( + type='str', + choices=['log', 'cache', 'spare', 'dedup', 'special'], + ), + type=dict( + type='str', + choices=['stripe', 'mirror', 'raidz', 'raidz1', 'raidz2', 'raidz3'], + default='stripe', + ), + disks=dict( + type='list', + elements='path', + required=True, + ), + ), + ), + ), + supports_check_mode=True, + required_if=[('state', 'present', ['vdevs'])] + ) + + name = module.params.get('name') + state = module.params.get('state') + disable_new_features = module.params.get('disable_new_features') + force = module.params.get('force') + pool_properties = module.params.get('pool_properties') + filesystem_properties = module.params.get('filesystem_properties') + mountpoint = module.params.get('mountpoint') + altroot = module.params.get('altroot') + temp_name = module.params.get('temp_name') + vdevs = module.params.get('vdevs') + + for property_key in ('pool_properties', 'filesystem_properties'): + for key, value in list(module.params.get(property_key, {}).items()): + if isinstance(value, bool): + module.params[property_key][key] = 'on' if value else 'off' + + if state != 'absent': + for idx, vdev in enumerate(vdevs, start=1): + disks = vdev.get('disks') + if not isinstance(disks, list) or len(disks) == 0: + module.fail_json(msg="vdev #{idx}: at least one disk is required (got: {disks!r})".format(idx=idx, disks=disks)) + + result = dict( + name=name, + state=state, + ) + + zpool = Zpool(module, name, disable_new_features, force, pool_properties, filesystem_properties, mountpoint, altroot, temp_name, vdevs) + + if state == 'present': + if zpool.exists(): + vdev_layout_diff = zpool.diff_layout() + + add_vdev_diff = zpool.add_vdevs() or {} + remove_vdev_diff = zpool.remove_vdevs() or {} + pool_properties_diff = zpool.set_pool_properties_if_changed() + filesystem_properties_diff = zpool.set_filesystem_properties_if_changed() + + before = {} + after = {} + for diff in (vdev_layout_diff, pool_properties_diff, filesystem_properties_diff): + before.update(diff.get('before', {})) + after.update(diff.get('after', {})) + + result['diff'] = {'before': before, 'after': after} + + if module.check_mode: + prepared = '' + for diff in (add_vdev_diff, remove_vdev_diff): + if 'prepared' in diff: + prepared += (diff['prepared'] if not prepared else '\n' + diff['prepared']) + result['diff']['prepared'] = prepared + else: + if module.check_mode: + result['diff'] = zpool.create() + else: + before_vdevs = [] + desired_vdevs = zpool.normalize_vdevs(zpool.vdevs) + zpool.create() + result['diff'] = { + 'before': {'state': 'absent', 'vdevs': before_vdevs}, + 'after': {'state': state, 'vdevs': desired_vdevs}, + } + + elif state == 'absent': + if zpool.exists(): + before_vdevs = zpool.get_current_layout() + zpool.destroy() + result['diff'] = { + 'before': {'state': 'present', 'vdevs': before_vdevs}, + 'after': {'state': state, 'vdevs': []}, + } + else: + result['diff'] = {} + + result['diff']['before_header'] = name + result['diff']['after_header'] = name + + result['changed'] = zpool.changed + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/zpool_facts.py b/plugins/modules/zpool_facts.py index e0b87b570c..93949de4f3 100644 --- a/plugins/modules/zpool_facts.py +++ b/plugins/modules/zpool_facts.py @@ -34,8 +34,8 @@ options: required: false properties: description: - - Specifies which dataset properties should be queried in comma-separated format. For more information about dataset properties, check zpool(1M) - man page. + - Specifies which dataset properties should be queried in comma-separated format. For more information about dataset + properties, check zpool(1M) man page. type: str default: all required: false @@ -64,14 +64,46 @@ ansible_facts: description: ZFS pool facts. returned: always type: str - sample: {"allocated": "3.46G", "altroot": "-", "autoexpand": "off", "autoreplace": "off", "bootfs": "rpool/ROOT/openindiana", "cachefile": "-", - "capacity": "6%", "comment": "-", "dedupditto": "0", "dedupratio": "1.00x", "delegation": "on", "expandsize": "-", "failmode": "wait", - "feature@async_destroy": "enabled", "feature@bookmarks": "enabled", "feature@edonr": "enabled", "feature@embedded_data": "active", - "feature@empty_bpobj": "active", "feature@enabled_txg": "active", "feature@extensible_dataset": "enabled", "feature@filesystem_limits": "enabled", - "feature@hole_birth": "active", "feature@large_blocks": "enabled", "feature@lz4_compress": "active", "feature@multi_vdev_crash_dump": "enabled", - "feature@sha512": "enabled", "feature@skein": "enabled", "feature@spacemap_histogram": "active", "fragmentation": "3%", "free": "46.3G", - "freeing": "0", "guid": "15729052870819522408", "health": "ONLINE", "leaked": "0", "listsnapshots": "off", "name": "rpool", "readonly": "off", - "size": "49.8G", "version": "-"} + sample: + "allocated": "3.46G" + "altroot": "-" + "autoexpand": "off" + "autoreplace": "off" + "bootfs": "rpool/ROOT/openindiana" + "cachefile": "-" + "capacity": "6%" + "comment": "-" + "dedupditto": "0" + "dedupratio": "1.00x" + "delegation": "on" + "expandsize": "-" + "failmode": "wait" + "feature@async_destroy": "enabled" + "feature@bookmarks": "enabled" + "feature@edonr": "enabled" + "feature@embedded_data": "active" + "feature@empty_bpobj": "active" + "feature@enabled_txg": "active" + "feature@extensible_dataset": "enabled" + "feature@filesystem_limits": "enabled" + "feature@hole_birth": "active" + "feature@large_blocks": "enabled" + "feature@lz4_compress": "active" + "feature@multi_vdev_crash_dump": "enabled" + "feature@sha512": "enabled" + "feature@skein": "enabled" + "feature@spacemap_histogram": "active" + "fragmentation": "3%" + "free": "46.3G" + "freeing": "0" + "guid": "15729052870819522408" + "health": "ONLINE" + "leaked": "0" + "listsnapshots": "off" + "name": "rpool" + "readonly": "off" + "size": "49.8G" + "version": "-" name: description: ZFS pool name. returned: always diff --git a/plugins/modules/zypper.py b/plugins/modules/zypper.py index 5bc6c766a0..a9e3bf5257 100644 --- a/plugins/modules/zypper.py +++ b/plugins/modules/zypper.py @@ -29,7 +29,8 @@ author: short_description: Manage packages on SUSE and openSUSE description: - Manage packages on SUSE and openSUSE using the zypper and rpm tools. - - Also supports transactional updates, by running zypper inside C(/sbin/transactional-update --continue --drop-if-no-change --quiet run). + - Also supports transactional updates, by running zypper inside C(/sbin/transactional-update --continue --drop-if-no-change + --quiet run). extends_documentation_fragment: - community.general.attributes - community.general.attributes @@ -42,9 +43,9 @@ options: name: description: - Package name V(name) or package specifier or a list of either. - - Can include a version like V(name=1.0), V(name>3.4) or V(name<=2.7). If a version is given, V(oldpackage) is implied and zypper is allowed - to update the package within the version range given. - - You can also pass a url or a local path to a rpm file. + - Can include a version like V(name=1.0), V(name>3.4) or V(name<=2.7). If a version is given, V(oldpackage) is implied + and zypper is allowed to update the package within the version range given. + - You can also pass a URL or a local path to a rpm file. - When using O(state=latest), this can be V(*), which updates all installed packages. required: true aliases: ['pkg'] @@ -52,10 +53,10 @@ options: elements: str state: description: - - V(present) will make sure the package is installed. - - V(latest) will make sure the latest version of the package is installed. - - V(absent) will make sure the specified package is not installed. - - V(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed. + - V(present) makes sure the package is installed. + - V(latest) makes sure the latest version of the package is installed. + - V(absent) makes sure the specified package is not installed. + - V(dist-upgrade) makes sure the latest version of all installed packages from all enabled repositories is installed. - When using V(dist-upgrade), O(name) should be V(*). required: false choices: [present, latest, absent, dist-upgrade, installed, removed] @@ -76,15 +77,15 @@ options: type: str disable_gpg_check: description: - - Whether to disable to GPG signature checking of the package signature being installed. Has an effect only if O(state) is V(present) or - V(latest). + - Whether to disable to GPG signature checking of the package signature being installed. Has an effect only if O(state) + is V(present) or V(latest). required: false default: false type: bool disable_recommends: description: - - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (V(true)) modifies zypper's default behavior; V(false) does - install recommended packages. + - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (V(true)) modifies zypper's default behavior; + V(false) does install recommended packages. required: false default: true type: bool @@ -96,7 +97,8 @@ options: type: bool force_resolution: description: - - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution). + - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver + chooses a solution). required: false default: false type: bool @@ -110,8 +112,8 @@ options: aliases: ["refresh"] oldpackage: description: - - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a - version is specified as part of the package name. + - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is + implied as soon as a version is specified as part of the package name. required: false default: false type: bool @@ -156,10 +158,16 @@ options: description: - Adds C(--quiet) option to I(zypper) install/update command. version_added: '10.2.0' + skip_post_errors: + type: bool + required: false + default: false + description: + - When set to V(true), ignore I(zypper) return code 107 (post install script errors). + version_added: '10.6.0' notes: - - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) - option. -# informational: requirements for nodes + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. requirements: - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" - python-xml @@ -247,6 +255,12 @@ EXAMPLES = r""" state: present environment: ZYPP_LOCK_TIMEOUT: 20 + +- name: Install the package with post-install error without failing + community.general.zypper: + name: + state: present + skip_post_errors: true """ import os.path @@ -343,12 +357,13 @@ def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) else: return {}, rc, stdout, stderr - elif rc in [0, 102, 103, 106]: + elif rc in [0, 102, 103, 106, 107]: # zypper exit codes # 0: success # 106: signature verification failed # 102: ZYPPER_EXIT_INF_REBOOT_NEEDED - Returned after a successful installation of a patch which requires reboot of computer. # 103: zypper was upgraded, run same command again + # 107: ZYPPER_EXIT_INF_RPM_SCRIPT_FAILED - Some of the packages %post install scripts returned an error, but package is installed. if packages is None: firstrun = True packages = {} @@ -367,14 +382,18 @@ def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): # if this was the first run and it failed with 103 # run zypper again with the same command to complete update return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) + if rc == 107 and m.params['skip_post_errors'] and firstrun: + # if this was the first run and it failed with 107 with skip_post_errors flag + # run zypper again with the same command to complete update + return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) - # apply simple_errors logic to rc 0,102,103,106 + # apply simple_errors logic to rc 0,102,103,106,107 if m.params['simple_errors']: stdout = get_simple_errors(dom) or stdout return packages, rc, stdout, stderr - # apply simple_errors logic to rc other than 0,102,103,106 + # apply simple_errors logic to rc other than 0,102,103,106,107 if m.params['simple_errors']: stdout = get_simple_errors(dom) or stdout @@ -586,21 +605,22 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True, aliases=['pkg'], type='list', elements='str'), - state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']), - type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), - extra_args_precommand=dict(required=False, default=None), - disable_gpg_check=dict(required=False, default=False, type='bool'), - disable_recommends=dict(required=False, default=True, type='bool'), - force=dict(required=False, default=False, type='bool'), - force_resolution=dict(required=False, default=False, type='bool'), - update_cache=dict(required=False, aliases=['refresh'], default=False, type='bool'), - oldpackage=dict(required=False, default=False, type='bool'), - extra_args=dict(required=False, default=None), - allow_vendor_change=dict(required=False, default=False, type='bool'), - replacefiles=dict(required=False, default=False, type='bool'), - clean_deps=dict(required=False, default=False, type='bool'), - simple_errors=dict(required=False, default=False, type='bool'), - quiet=dict(required=False, default=True, type='bool'), + state=dict(default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']), + type=dict(default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), + extra_args_precommand=dict(), + disable_gpg_check=dict(default=False, type='bool'), + disable_recommends=dict(default=True, type='bool'), + force=dict(default=False, type='bool'), + force_resolution=dict(default=False, type='bool'), + update_cache=dict(aliases=['refresh'], default=False, type='bool'), + oldpackage=dict(default=False, type='bool'), + extra_args=dict(), + allow_vendor_change=dict(default=False, type='bool'), + replacefiles=dict(default=False, type='bool'), + clean_deps=dict(default=False, type='bool'), + simple_errors=dict(default=False, type='bool'), + quiet=dict(default=True, type='bool'), + skip_post_errors=dict(default=False, type='bool'), ), supports_check_mode=True ) diff --git a/plugins/modules/zypper_repository.py b/plugins/modules/zypper_repository.py index 18f9ff0824..e6beeca9a4 100644 --- a/plugins/modules/zypper_repository.py +++ b/plugins/modules/zypper_repository.py @@ -58,7 +58,7 @@ options: aliases: ["refresh"] priority: description: - - Set priority of repository. Packages will always be installed from the repository with the smallest priority number. + - Set priority of repository. Packages are always installed from the repository with the smallest priority number. - Needs C(zypper) version >= 1.12.25. type: int overwrite_multiple: @@ -69,7 +69,8 @@ options: auto_import_keys: description: - Automatically import the gpg signing key of the new or changed repository. - - Has an effect only if O(state=present). Has no effect on existing (unchanged) repositories or in combination with O(state=absent). + - Has an effect only if O(state=present). Has no effect on existing (unchanged) repositories or in combination with + O(state=absent). - Implies O(runrefresh). - Only works with C(.repo) files if O(name) is given explicitly. type: bool @@ -141,6 +142,7 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.urls import fetch_url from ansible.module_utils.common.text.converters import to_text +from ansible.module_utils.six import PY3 from ansible.module_utils.six.moves import configparser, StringIO from io import open @@ -172,7 +174,10 @@ def _parse_repos(module): opts = {} for o in REPO_OPTS: opts[o] = repo.getAttribute(o) - opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data + try: + opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data + except IndexError: + opts['url'] = repo.getAttribute('metalink') # A repo can be uniquely identified by an alias + url repos.append(opts) return repos @@ -245,7 +250,7 @@ def repo_exists(module, repodata, overwrite_multiple): module.fail_json(msg=errmsg) -def addmodify_repo(module, repodata, old_repos, zypper_version, warnings): +def addmodify_repo(module, repodata, old_repos, zypper_version): "Adds the repo, removes old repos before, that would conflict." repo = repodata['url'] cmd = _get_cmd(module, 'addrepo', '--check') @@ -258,7 +263,7 @@ def addmodify_repo(module, repodata, old_repos, zypper_version, warnings): if zypper_version >= LooseVersion('1.12.25'): cmd.extend(['--priority', str(repodata['priority'])]) else: - warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.") + module.warn("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.") if repodata['enabled'] == '0': cmd.append('--disable') @@ -272,7 +277,7 @@ def addmodify_repo(module, repodata, old_repos, zypper_version, warnings): else: cmd.append('--no-gpgcheck') else: - warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.") + module.warn("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.") if repodata['autorefresh'] == '1': cmd.append('--refresh') @@ -321,17 +326,17 @@ def runrefreshrepo(module, auto_import_keys=False, shortname=None): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=False), - repo=dict(required=False), + name=dict(), + repo=dict(), state=dict(choices=['present', 'absent'], default='present'), - runrefresh=dict(required=False, default=False, type='bool'), - description=dict(required=False), - disable_gpg_check=dict(required=False, default=False, type='bool'), - autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']), - priority=dict(required=False, type='int'), - enabled=dict(required=False, default=True, type='bool'), - overwrite_multiple=dict(required=False, default=False, type='bool'), - auto_import_keys=dict(required=False, default=False, type='bool'), + runrefresh=dict(default=False, type='bool'), + description=dict(), + disable_gpg_check=dict(default=False, type='bool'), + autorefresh=dict(default=True, type='bool', aliases=['refresh']), + priority=dict(type='int'), + enabled=dict(default=True, type='bool'), + overwrite_multiple=dict(default=False, type='bool'), + auto_import_keys=dict(default=False, type='bool'), ), supports_check_mode=False, required_one_of=[['state', 'runrefresh']], @@ -345,7 +350,6 @@ def main(): runrefresh = module.params['runrefresh'] zypper_version = get_zypper_version(module) - warnings = [] # collect warning messages for final output repodata = { 'url': repo, @@ -406,7 +410,10 @@ def main(): repofile = configparser.ConfigParser() try: - repofile.readfp(StringIO(repofile_text)) + if PY3: + repofile.read_file(StringIO(repofile_text)) + else: + repofile.readfp(StringIO(repofile_text)) except configparser.Error: module.fail_json(msg='Invalid format, .repo file could not be parsed') @@ -452,7 +459,7 @@ def main(): if runrefresh: runrefreshrepo(module, auto_import_keys, shortname) exit_unchanged() - rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings) + rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version) if rc == 0 and (runrefresh or auto_import_keys): runrefreshrepo(module, auto_import_keys, shortname) elif state == 'absent': @@ -461,9 +468,9 @@ def main(): rc, stdout, stderr = remove_repo(module, shortname) if rc == 0: - module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings) + module.exit_json(changed=True, repodata=repodata, state=state) else: - module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings) + module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state) if __name__ == '__main__': diff --git a/plugins/modules/zypper_repository_info.py b/plugins/modules/zypper_repository_info.py index 9512d32eed..d3f20883c6 100644 --- a/plugins/modules/zypper_repository_info.py +++ b/plugins/modules/zypper_repository_info.py @@ -25,7 +25,7 @@ requirements: - "zypper >= 1.0 (included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0)" - python-xml notes: - - "For info about packages, use the module M(ansible.builtin.package_facts)." + - For info about packages, use the module M(ansible.builtin.package_facts). """ EXAMPLES = r""" diff --git a/plugins/plugin_utils/ansible_type.py b/plugins/plugin_utils/ansible_type.py index ab78b78927..53348ba0f4 100644 --- a/plugins/plugin_utils/ansible_type.py +++ b/plugins/plugin_utils/ansible_type.py @@ -8,17 +8,31 @@ __metaclass__ = type from ansible.errors import AnsibleFilterError from ansible.module_utils.common._collections_compat import Mapping +try: + # Introduced with Data Tagging (https://github.com/ansible/ansible/pull/84621): + from ansible.module_utils.datatag import native_type_name as _native_type_name +except ImportError: + _native_type_name = None -def _atype(data, alias): + +def _atype(data, alias, *, use_native_type: bool = False): """ Returns the name of the type class. """ - data_type = type(data).__name__ + if use_native_type and _native_type_name: + data_type = _native_type_name(data) + else: + data_type = type(data).__name__ + # The following types were introduced with Data Tagging (https://github.com/ansible/ansible/pull/84621): + if data_type == "_AnsibleLazyTemplateDict": + data_type = "dict" + elif data_type == "_AnsibleLazyTemplateList": + data_type = "list" return alias.get(data_type, data_type) -def _ansible_type(data, alias): +def _ansible_type(data, alias, *, use_native_type: bool = False): """ Returns the Ansible data type. """ @@ -30,16 +44,16 @@ def _ansible_type(data, alias): msg = "The argument alias must be a dictionary. %s is %s" raise AnsibleFilterError(msg % (alias, type(alias))) - data_type = _atype(data, alias) + data_type = _atype(data, alias, use_native_type=use_native_type) if data_type == 'list' and len(data) > 0: - items = [_atype(i, alias) for i in data] + items = [_atype(i, alias, use_native_type=use_native_type) for i in data] items_type = '|'.join(sorted(set(items))) return ''.join((data_type, '[', items_type, ']')) if data_type == 'dict' and len(data) > 0: - keys = [_atype(i, alias) for i in data.keys()] - vals = [_atype(i, alias) for i in data.values()] + keys = [_atype(i, alias, use_native_type=use_native_type) for i in data.keys()] + vals = [_atype(i, alias, use_native_type=use_native_type) for i in data.values()] keys_type = '|'.join(sorted(set(keys))) vals_type = '|'.join(sorted(set(vals))) return ''.join((data_type, '[', keys_type, ', ', vals_type, ']')) diff --git a/plugins/test/a_module.py b/plugins/test/a_module.py index 0d6cecac6a..14f7ae27f2 100644 --- a/plugins/test/a_module.py +++ b/plugins/test/a_module.py @@ -6,18 +6,18 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' - name: a_module - short_description: Test whether a given string refers to an existing module or action plugin - version_added: 4.0.0 - author: Felix Fontein (@felixfontein) - description: - - Test whether a given string refers to an existing module or action plugin. - - This can be useful in roles, which can use this to ensure that required modules are present ahead of time. - options: - _input: - description: A string denoting a fully qualified collection name (FQCN) of a module or action plugin. - type: string - required: true +name: a_module +short_description: Test whether a given string refers to an existing module or action plugin +version_added: 4.0.0 +author: Felix Fontein (@felixfontein) +description: + - Test whether a given string refers to an existing module or action plugin. + - This can be useful in roles, which can use this to ensure that required modules are present ahead of time. +options: + _input: + description: A string denoting a fully qualified collection name (FQCN) of a module or action plugin. + type: string + required: true ''' EXAMPLES = ''' @@ -34,9 +34,9 @@ EXAMPLES = ''' ''' RETURN = ''' - _value: - description: Whether the module or action plugin denoted by the input exists. - type: boolean +_value: + description: Whether the module or action plugin denoted by the input exists. + type: boolean ''' from ansible.plugins.loader import action_loader, module_loader diff --git a/plugins/test/ansible_type.py b/plugins/test/ansible_type.py index 9ac5e138eb..45bf1b42e5 100644 --- a/plugins/test/ansible_type.py +++ b/plugins/test/ansible_type.py @@ -6,52 +6,61 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' - name: ansible_type - short_description: Validate input type - version_added: "9.2.0" - author: Vladimir Botka (@vbotka) - description: This test validates input type. - options: - _input: - description: Input data. - type: raw - required: true - dtype: - description: A single data type, or a data types list to be validated. - type: raw - required: true - alias: - description: Data type aliases. - default: {} - type: dictionary +name: ansible_type +short_description: Validate input type +version_added: "9.2.0" +author: Vladimir Botka (@vbotka) +description: This test validates input type. +options: + _input: + description: Input data. + type: raw + required: true + dtype: + description: A single data type, or a data types list to be validated. + type: raw + required: true + alias: + description: Data type aliases. + default: {} + type: dictionary ''' EXAMPLES = ''' +# Substitution converts str to AnsibleUnicode or _AnsibleTaggedStr +# ---------------------------------------------------------------- -# Substitution converts str to AnsibleUnicode -# ------------------------------------------- - -# String. AnsibleUnicode. -dtype: AnsibleUnicode +--- +# String. AnsibleUnicode or _AnsibleTaggedStr. +dtype: + - AnsibleUnicode + - _AnsibleTaggedStr data: "abc" result: '{{ data is community.general.ansible_type(dtype) }}' # result => true -# String. AnsibleUnicode alias str. -alias: {"AnsibleUnicode": "str"} +--- +# String. AnsibleUnicode/_AnsibleTaggedStr alias str. +alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str"} dtype: str data: "abc" result: '{{ data is community.general.ansible_type(dtype, alias) }}' # result => true -# List. All items are AnsibleUnicode. -dtype: list[AnsibleUnicode] +--- +# List. All items are AnsibleUnicode/_AnsibleTaggedStr. +dtype: + - list[AnsibleUnicode] + - list[_AnsibleTaggedStr] data: ["a", "b", "c"] result: '{{ data is community.general.ansible_type(dtype) }}' # result => true -# Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. -dtype: dict[AnsibleUnicode, AnsibleUnicode] +--- +# Dictionary. All keys and values are AnsibleUnicode/_AnsibleTaggedStr. +dtype: + - dict[AnsibleUnicode, AnsibleUnicode] + - dict[_AnsibleTaggedStr, _AnsibleTaggedStr] data: {"a": "foo", "b": "bar", "c": "baz"} result: '{{ data is community.general.ansible_type(dtype) }}' # result => true @@ -59,94 +68,123 @@ result: '{{ data is community.general.ansible_type(dtype) }}' # No substitution and no alias. Type of strings is str # ---------------------------------------------------- +--- # String dtype: str result: '{{ "abc" is community.general.ansible_type(dtype) }}' # result => true +--- # Integer dtype: int result: '{{ 123 is community.general.ansible_type(dtype) }}' # result => true +--- # Float dtype: float result: '{{ 123.45 is community.general.ansible_type(dtype) }}' # result => true +--- # Boolean dtype: bool result: '{{ true is community.general.ansible_type(dtype) }}' # result => true +--- # List. All items are strings. dtype: list[str] result: '{{ ["a", "b", "c"] is community.general.ansible_type(dtype) }}' # result => true +--- # List of dictionaries. dtype: list[dict] result: '{{ [{"a": 1}, {"b": 2}] is community.general.ansible_type(dtype) }}' # result => true +--- # Dictionary. All keys are strings. All values are integers. dtype: dict[str, int] result: '{{ {"a": 1} is community.general.ansible_type(dtype) }}' # result => true +--- # Dictionary. All keys are strings. All values are integers. dtype: dict[str, int] result: '{{ {"a": 1, "b": 2} is community.general.ansible_type(dtype) }}' # result => true -# Type of strings is AnsibleUnicode or str -# ---------------------------------------- +# Type of strings is AnsibleUnicode, _AnsibleTaggedStr, or str +# ------------------------------------------------------------ +--- # Dictionary. The keys are integers or strings. All values are strings. -alias: {"AnsibleUnicode": "str"} +alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int dtype: dict[int|str, str] data: {1: 'a', 'b': 'b'} result: '{{ data is community.general.ansible_type(dtype, alias) }}' # result => true +--- # Dictionary. All keys are integers. All values are keys. -alias: {"AnsibleUnicode": "str"} +alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int dtype: dict[int, str] data: {1: 'a', 2: 'b'} result: '{{ data is community.general.ansible_type(dtype, alias) }}' # result => true +--- # Dictionary. All keys are strings. Multiple types values. -alias: {"AnsibleUnicode": "str"} +alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float dtype: dict[str, bool|dict|float|int|list|str] -data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} +data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': true, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} result: '{{ data is community.general.ansible_type(dtype, alias) }}' # result => true +--- # List. Multiple types items. -alias: {"AnsibleUnicode": "str"} +alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float dtype: list[bool|dict|float|int|list|str] -data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] +data: [1, 2, 1.1, 'abc', true, ['x', 'y', 'z'], {'x': 1, 'y': 2}] result: '{{ data is community.general.ansible_type(dtype, alias) }}' # result => true # Option dtype is list # -------------------- -# AnsibleUnicode or str -dtype: ['AnsibleUnicode', 'str'] +--- +# AnsibleUnicode, _AnsibleTaggedStr, or str +dtype: ['AnsibleUnicode', '_AnsibleTaggedStr', 'str'] data: abc result: '{{ data is community.general.ansible_type(dtype) }}' # result => true +--- # float or int -dtype: ['float', 'int'] +dtype: ['float', 'int', "_AnsibleTaggedInt", "_AnsibleTaggedFloat"] data: 123 result: '{{ data is community.general.ansible_type(dtype) }}' # result => true +--- # float or int -dtype: ['float', 'int'] +dtype: ['float', 'int', "_AnsibleTaggedInt", "_AnsibleTaggedFloat"] data: 123.45 result: '{{ data is community.general.ansible_type(dtype) }}' # result => true @@ -154,15 +192,25 @@ result: '{{ data is community.general.ansible_type(dtype) }}' # Multiple alias # -------------- +--- # int alias number -alias: {"int": "number", "float": "number"} +alias: + int: number + float: number + _AnsibleTaggedInt: number + _AnsibleTaggedFloat: float dtype: number data: 123 result: '{{ data is community.general.ansible_type(dtype, alias) }}' # result => true +--- # float alias number -alias: {"int": "number", "float": "number"} +alias: + int: number + float: number + _AnsibleTaggedInt: number + _AnsibleTaggedFloat: float dtype: number data: 123.45 result: '{{ data is community.general.ansible_type(dtype, alias) }}' @@ -170,9 +218,9 @@ result: '{{ data is community.general.ansible_type(dtype, alias) }}' ''' RETURN = ''' - _value: - description: Whether the data type is valid. - type: bool +_value: + description: Whether the data type is valid. + type: bool ''' from ansible.errors import AnsibleFilterError @@ -192,6 +240,7 @@ def ansible_type(data, dtype, alias=None): else: data_types = dtype + # TODO: expose use_native_type parameter return _ansible_type(data, alias) in data_types diff --git a/plugins/test/fqdn_valid.py b/plugins/test/fqdn_valid.py index 1ec7742077..c8a143687a 100644 --- a/plugins/test/fqdn_valid.py +++ b/plugins/test/fqdn_valid.py @@ -17,41 +17,41 @@ else: DOCUMENTATION = ''' - name: fqdn_valid - short_description: Validates fully-qualified domain names against RFC 1123 - version_added: 8.1.0 - author: Vladimir Botka (@vbotka) - requirements: +name: fqdn_valid +short_description: Validates fully-qualified domain names against RFC 1123 +version_added: 8.1.0 +author: Vladimir Botka (@vbotka) +requirements: - fqdn>=1.5.1 (PyPI) - description: - - This test validates Fully Qualified Domain Names (FQDNs) - conforming to the Internet Engineering Task Force specification - RFC 1123 and RFC 952. - - The design intent is to validate that a string would be - traditionally acceptable as a public Internet hostname to - RFC-conforming software, which is a strict subset of the logic - in modern web browsers like Mozilla Firefox and Chromium that - determines whether make a DNS lookup. - - Certificate Authorities like Let's Encrypt run a narrower set of - string validation logic to determine validity for issuance. This - test is not intended to achieve functional parity with CA - issuance. - - Single label names are allowed by default (O(min_labels=1)). - options: - _input: - description: Name of the host. - type: str - required: true - min_labels: - description: Required minimum of labels, separated by period. - default: 1 - type: int - required: false - allow_underscores: - description: Allow underscore characters. - default: false - type: bool - required: false +description: + - This test validates Fully Qualified Domain Names (FQDNs) + conforming to the Internet Engineering Task Force specification + RFC 1123 and RFC 952. + - The design intent is to validate that a string would be + traditionally acceptable as a public Internet hostname to + RFC-conforming software, which is a strict subset of the logic + in modern web browsers like Mozilla Firefox and Chromium that + determines whether make a DNS lookup. + - Certificate Authorities like Let's Encrypt run a narrower set of + string validation logic to determine validity for issuance. This + test is not intended to achieve functional parity with CA + issuance. + - Single label names are allowed by default (O(min_labels=1)). +options: + _input: + description: Name of the host. + type: str + required: true + min_labels: + description: Required minimum of labels, separated by period. + default: 1 + type: int + required: false + allow_underscores: + description: Allow underscore characters. + default: false + type: bool + required: false ''' EXAMPLES = ''' @@ -69,9 +69,9 @@ EXAMPLES = ''' ''' RETURN = ''' - _value: - description: Whether the name is valid. - type: bool +_value: + description: Whether the name is valid. + type: bool ''' diff --git a/tests/integration/requirements.yml b/tests/integration/requirements.yml index b772fc82d1..dfe544e3cc 100644 --- a/tests/integration/requirements.yml +++ b/tests/integration/requirements.yml @@ -4,6 +4,6 @@ # SPDX-License-Identifier: GPL-3.0-or-later collections: -- ansible.posix -- community.crypto -- community.docker + - ansible.posix + - community.crypto + - community.docker diff --git a/tests/integration/targets/aix_filesystem/tasks/main.yml b/tests/integration/targets/aix_filesystem/tasks/main.yml index 878088f4e7..5c4f2c7e39 100644 --- a/tests/integration/targets/aix_filesystem/tasks/main.yml +++ b/tests/integration/targets/aix_filesystem/tasks/main.yml @@ -28,10 +28,10 @@ # It requires a host (nfshost) exporting the NFS - name: Creating NFS filesystem from nfshost (Linux NFS server) aix_filesystem: - device: /home/ftp - nfs_server: nfshost - filesystem: /nfs/ftp - state: present + device: /home/ftp + nfs_server: nfshost + filesystem: /nfs/ftp + state: present # It requires a volume group named datavg (next three actions) - name: Creating a logical volume testlv (aix_lvol module) diff --git a/tests/integration/targets/alternatives/tasks/main.yml b/tests/integration/targets/alternatives/tasks/main.yml index cd86b085d4..906463903f 100644 --- a/tests/integration/targets/alternatives/tasks/main.yml +++ b/tests/integration/targets/alternatives/tasks/main.yml @@ -9,60 +9,60 @@ - name: 'setup: create a dummy alternative' block: - - import_tasks: setup.yml + - import_tasks: setup.yml - ############## - # Test parameters: - # link parameter present / absent ('with_link' variable) - # with / without alternatives defined in alternatives file ('with_alternatives' variable) - # auto / manual ('mode' variable) + ############## + # Test parameters: + # link parameter present / absent ('with_link' variable) + # with / without alternatives defined in alternatives file ('with_alternatives' variable) + # auto / manual ('mode' variable) - - include_tasks: tests.yml - with_nested: - - [ true, false ] # with_link - - [ true, false ] # with_alternatives - - [ 'auto', 'manual' ] # mode - loop_control: - loop_var: test_conf + - include_tasks: tests.yml + with_nested: + - [true, false] # with_link + - [true, false] # with_alternatives + - ['auto', 'manual'] # mode + loop_control: + loop_var: test_conf - ########## - # Priority - - block: - - include_tasks: remove_links.yml - - include_tasks: setup_test.yml - # at least two iterations again - - include_tasks: tests_set_priority.yml - with_sequence: start=3 end=4 - vars: - with_alternatives: true - mode: auto + ########## + # Priority + - block: + - include_tasks: remove_links.yml + - include_tasks: setup_test.yml + # at least two iterations again + - include_tasks: tests_set_priority.yml + with_sequence: start=3 end=4 + vars: + with_alternatives: true + mode: auto - - block: - - include_tasks: remove_links.yml - - include_tasks: setup_test.yml - # at least two iterations again - - include_tasks: tests_set_priority.yml - with_sequence: start=3 end=4 - vars: - with_alternatives: false - mode: auto + - block: + - include_tasks: remove_links.yml + - include_tasks: setup_test.yml + # at least two iterations again + - include_tasks: tests_set_priority.yml + with_sequence: start=3 end=4 + vars: + with_alternatives: false + mode: auto - # Test that path is checked: alternatives must fail when path is nonexistent - - import_tasks: path_is_checked.yml + # Test that path is checked: alternatives must fail when path is nonexistent + - import_tasks: path_is_checked.yml - # Test that subcommands commands work - - import_tasks: subcommands.yml + # Test that subcommands commands work + - import_tasks: subcommands.yml - # Test operation of the 'state' parameter - - block: - - include_tasks: remove_links.yml - - include_tasks: tests_state.yml + # Test operation of the 'state' parameter + - block: + - include_tasks: remove_links.yml + - include_tasks: tests_state.yml - # Test for the family parameter - - block: - - include_tasks: remove_links.yml - - include_tasks: tests_family.yml - when: ansible_os_family == 'RedHat' + # Test for the family parameter + - block: + - include_tasks: remove_links.yml + - include_tasks: tests_family.yml + when: ansible_os_family == 'RedHat' # Cleanup always: diff --git a/tests/integration/targets/alternatives/tasks/setup.yml b/tests/integration/targets/alternatives/tasks/setup.yml index ab2c398521..cadee7f7f2 100644 --- a/tests/integration/targets/alternatives/tasks/setup.yml +++ b/tests/integration/targets/alternatives/tasks/setup.yml @@ -5,11 +5,11 @@ - include_vars: '{{ item }}' with_first_found: - - files: - - '{{ ansible_os_family }}-{{ ansible_distribution_version }}.yml' - - '{{ ansible_os_family }}.yml' - - default.yml - paths: ../vars + - files: + - '{{ ansible_os_family }}-{{ ansible_distribution_version }}.yml' + - '{{ ansible_os_family }}.yml' + - default.yml + paths: ../vars - template: src: dummy_command dest: /usr/bin/dummy{{ item }} diff --git a/tests/integration/targets/alternatives/tasks/subcommands.yml b/tests/integration/targets/alternatives/tasks/subcommands.yml index 678bbe68f4..3c70e6275d 100644 --- a/tests/integration/targets/alternatives/tasks/subcommands.yml +++ b/tests/integration/targets/alternatives/tasks/subcommands.yml @@ -89,7 +89,7 @@ assert: that: - cmd.rc == 2 - - '"No such file" in cmd.msg' + - '"No such file" in cmd.msg or "Error executing command." == cmd.msg' - name: Get dummymain alternatives output command: @@ -172,7 +172,7 @@ assert: that: - cmd.rc == 2 - - '"No such file" in cmd.msg' + - '"No such file" in cmd.msg or "Error executing command." == cmd.msg' - name: Get dummymain alternatives output command: diff --git a/tests/integration/targets/alternatives/tasks/test.yml b/tests/integration/targets/alternatives/tasks/test.yml index ca59a4b554..3445f64555 100644 --- a/tests/integration/targets/alternatives/tasks/test.yml +++ b/tests/integration/targets/alternatives/tasks/test.yml @@ -7,32 +7,32 @@ msg: ' with_alternatives: {{ with_alternatives }}, mode: {{ mode }}' - block: - - name: set alternative (using link parameter) - alternatives: - name: dummy - path: '/usr/bin/dummy{{ item }}' - link: '/usr/bin/dummy' - register: alternative + - name: set alternative (using link parameter) + alternatives: + name: dummy + path: '/usr/bin/dummy{{ item }}' + link: '/usr/bin/dummy' + register: alternative - - name: check expected command was executed - assert: - that: - - 'alternative is successful' - - 'alternative is changed' + - name: check expected command was executed + assert: + that: + - 'alternative is successful' + - 'alternative is changed' when: with_link - block: - - name: set alternative (without link parameter) - alternatives: - name: dummy - path: '/usr/bin/dummy{{ item }}' - register: alternative + - name: set alternative (without link parameter) + alternatives: + name: dummy + path: '/usr/bin/dummy{{ item }}' + register: alternative - - name: check expected command was executed - assert: - that: - - 'alternative is successful' - - 'alternative is changed' + - name: check expected command was executed + assert: + that: + - 'alternative is successful' + - 'alternative is changed' when: not with_link - name: execute dummy command diff --git a/tests/integration/targets/alternatives/tasks/tests.yml b/tests/integration/targets/alternatives/tasks/tests.yml index 75e30cabea..63068ede54 100644 --- a/tests/integration/targets/alternatives/tasks/tests.yml +++ b/tests/integration/targets/alternatives/tasks/tests.yml @@ -4,13 +4,13 @@ # SPDX-License-Identifier: GPL-3.0-or-later - block: - - include_tasks: remove_links.yml - - include_tasks: setup_test.yml - # at least two iterations: - # - first will use 'link currently absent', - # - second will receive 'link currently points to' - - include_tasks: test.yml - with_sequence: start=1 end=2 + - include_tasks: remove_links.yml + - include_tasks: setup_test.yml + # at least two iterations: + # - first will use 'link currently absent', + # - second will receive 'link currently points to' + - include_tasks: test.yml + with_sequence: start=1 end=2 vars: with_link: '{{ test_conf[0] }}' with_alternatives: '{{ test_conf[1] }}' diff --git a/tests/integration/targets/android_sdk/tasks/main.yml b/tests/integration/targets/android_sdk/tasks/main.yml index 46cf3192e1..3b49df4056 100644 --- a/tests/integration/targets/android_sdk/tasks/main.yml +++ b/tests/integration/targets/android_sdk/tasks/main.yml @@ -11,21 +11,21 @@ # java >= 17 is not available in RHEL and CentOS7 repos, which is required for sdkmanager to run - name: Bail out if not supported when: - - "ansible_os_family == 'RedHat' and ansible_distribution_version is version('8.0', '<')" + - "ansible_os_family == 'RedHat' and ansible_distribution_version is version('8.0', '<')" ansible.builtin.meta: end_play - name: Run android_sdk tests environment: PATH: '{{ ansible_env.PATH }}:{{ android_sdk_location }}/cmdline-tools/latest/bin' block: - - import_tasks: setup.yml + - import_tasks: setup.yml - - name: Run default tests - import_tasks: default-tests.yml - when: ansible_os_family != 'FreeBSD' + - name: Run default tests + import_tasks: default-tests.yml + when: ansible_os_family != 'FreeBSD' - # Most of the important Android SDK packages are not available on FreeBSD (like, build-tools, platform-tools and so on), - # but at least some of the functionality can be tested (like, downloading sources) - - name: Run FreeBSD tests - import_tasks: freebsd-tests.yml - when: ansible_os_family == 'FreeBSD' + # Most of the important Android SDK packages are not available on FreeBSD (like, build-tools, platform-tools and so on), + # but at least some of the functionality can be tested (like, downloading sources) + - name: Run FreeBSD tests + import_tasks: freebsd-tests.yml + when: ansible_os_family == 'FreeBSD' diff --git a/tests/integration/targets/android_sdk/tasks/setup.yml b/tests/integration/targets/android_sdk/tasks/setup.yml index ff2e3eb3cf..9965403367 100644 --- a/tests/integration/targets/android_sdk/tasks/setup.yml +++ b/tests/integration/targets/android_sdk/tasks/setup.yml @@ -9,7 +9,16 @@ # SPDX-License-Identifier: GPL-3.0-or-later - name: Include OS-specific variables - include_vars: '{{ ansible_os_family }}.yml' + include_vars: '{{ lookup("first_found", params) }}' + vars: + params: + files: + - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml' + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' + - '{{ ansible_distribution }}.yml' + - '{{ ansible_os_family }}.yml' + paths: + - '{{ role_path }}/vars' - name: Install dependencies become: true @@ -74,7 +83,7 @@ unarchive: src: "{{ commandline_tools_link }}" dest: "{{ android_cmdline_temp_dir }}" - remote_src: yes + remote_src: true creates: "{{ android_cmdline_temp_dir }}/cmdline-tools" when: not sdkmanager_installed.stat.exists @@ -83,4 +92,4 @@ copy: src: "{{ android_cmdline_temp_dir }}/cmdline-tools/" dest: "{{ android_sdk_location }}/cmdline-tools/latest" - remote_src: yes + remote_src: true diff --git a/tests/integration/targets/android_sdk/vars/Fedora.yml b/tests/integration/targets/android_sdk/vars/Fedora.yml new file mode 100644 index 0000000000..e48443f0b5 --- /dev/null +++ b/tests/integration/targets/android_sdk/vars/Fedora.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +openjdk_pkg: java-21-openjdk-headless diff --git a/tests/integration/targets/android_sdk/vars/RedHat-10.yml b/tests/integration/targets/android_sdk/vars/RedHat-10.yml new file mode 100644 index 0000000000..e48443f0b5 --- /dev/null +++ b/tests/integration/targets/android_sdk/vars/RedHat-10.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +openjdk_pkg: java-21-openjdk-headless diff --git a/tests/integration/targets/android_sdk/vars/RedHat-9.yml b/tests/integration/targets/android_sdk/vars/RedHat-9.yml new file mode 100644 index 0000000000..e48443f0b5 --- /dev/null +++ b/tests/integration/targets/android_sdk/vars/RedHat-9.yml @@ -0,0 +1,6 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +openjdk_pkg: java-21-openjdk-headless diff --git a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml index 5c4af6d167..686422c065 100644 --- a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml +++ b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml @@ -19,8 +19,8 @@ - name: Assert collection netbox.netbox was installed assert: that: - - install_c0 is changed - - '"netbox.netbox" in install_c0.new_collections' + - install_c0 is changed + - '"netbox.netbox" in install_c0.new_collections' - name: Install collection netbox.netbox (again) community.general.ansible_galaxy_install: @@ -32,7 +32,7 @@ - name: Assert collection was not installed assert: that: - - install_c1 is not changed + - install_c1 is not changed ################################################### - name: Make directory install_r @@ -50,8 +50,8 @@ - name: Assert collection ansistrano.deploy was installed assert: that: - - install_r0 is changed - - '"ansistrano.deploy" in install_r0.new_roles' + - install_r0 is changed + - '"ansistrano.deploy" in install_r0.new_roles' - name: Install role ansistrano.deploy (again) community.general.ansible_galaxy_install: @@ -63,7 +63,7 @@ - name: Assert role was not installed assert: that: - - install_r1 is not changed + - install_r1 is not changed ################################################### - name: Set requirements file path @@ -85,9 +85,9 @@ - name: Assert requirements file was installed assert: that: - - install_rq0 is changed - - '"geerlingguy.java" in install_rq0.new_roles' - - '"geerlingguy.php_roles" in install_rq0.new_collections' + - install_rq0 is changed + - '"geerlingguy.java" in install_rq0.new_roles' + - '"geerlingguy.php_roles" in install_rq0.new_collections' - name: Install from requirements file (again) community.general.ansible_galaxy_install: @@ -99,7 +99,7 @@ - name: Assert requirements file was not installed assert: that: - - install_rq1 is not changed + - install_rq1 is not changed ################################################### - name: Make directory upgrade_c @@ -117,8 +117,8 @@ - name: Assert collection netbox.netbox was installed assert: that: - - upgrade_c0 is changed - - '"netbox.netbox" in upgrade_c0.new_collections' + - upgrade_c0 is changed + - '"netbox.netbox" in upgrade_c0.new_collections' - name: Upgrade collection netbox.netbox community.general.ansible_galaxy_install: @@ -139,5 +139,5 @@ - name: Assert collection was not installed assert: that: - - upgrade_c1 is changed - - upgrade_c2 is not changed + - upgrade_c1 is changed + - upgrade_c2 is not changed diff --git a/tests/integration/targets/apache2_mod_proxy/aliases b/tests/integration/targets/apache2_mod_proxy/aliases new file mode 100644 index 0000000000..0d1324b22a --- /dev/null +++ b/tests/integration/targets/apache2_mod_proxy/aliases @@ -0,0 +1,7 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/3 +destructive +skip/aix diff --git a/tests/integration/targets/proxmox_pool/defaults/main.yml b/tests/integration/targets/apache2_mod_proxy/meta/main.yml similarity index 58% rename from tests/integration/targets/proxmox_pool/defaults/main.yml rename to tests/integration/targets/apache2_mod_proxy/meta/main.yml index 5a518ac734..ac5ba5a0d0 100644 --- a/tests/integration/targets/proxmox_pool/defaults/main.yml +++ b/tests/integration/targets/apache2_mod_proxy/meta/main.yml @@ -1,7 +1,8 @@ -# Copyright (c) 2023, Sergei Antipov +--- +# Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -poolid: test -member: local -member_type: storage +dependencies: + - setup_remote_constraints + - setup_apache2 diff --git a/tests/integration/targets/apache2_mod_proxy/tasks/main.yml b/tests/integration/targets/apache2_mod_proxy/tasks/main.yml new file mode 100644 index 0000000000..6ba6ee8808 --- /dev/null +++ b/tests/integration/targets/apache2_mod_proxy/tasks/main.yml @@ -0,0 +1,253 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- meta: end_play + when: ansible_os_family not in ['Debian', 'Suse'] + +- name: Enable mod_proxy + community.general.apache2_module: + state: present + name: "{{ item }}" + loop: + - status + - proxy + - proxy_http + - proxy_balancer + - lbmethod_byrequests + +- name: Add port 81 + lineinfile: + path: "/etc/apache2/{{ 'ports.conf' if ansible_os_family == 'Debian' else 'listen.conf' }}" + line: Listen 81 + +- name: Set up virtual host + copy: + dest: "/etc/apache2/{{ 'sites-available' if ansible_os_family == 'Debian' else 'vhosts.d' }}/000-apache2_mod_proxy-test.conf" + content: | + + + BalancerMember http://127.0.0.1:8080 + BalancerMember http://127.0.0.1:8081 + + + + DOSBlockingPeriod 0 + DOSWhiteList 127.0.0.1 + DOSWhiteList ::1 + + + + ProxyPreserveHost On + ProxyPass balancer://mycluster/ + ProxyPassReverse balancer://mycluster/ + + + + SetHandler balancer-manager + Require all granted + + + +- name: Enable virtual host + file: + src: /etc/apache2/sites-available/000-apache2_mod_proxy-test.conf + dest: /etc/apache2/sites-enabled/000-apache2_mod_proxy-test.conf + owner: root + group: root + state: link + when: ansible_os_family not in ['Suse'] + +- name: Restart Apache + service: + name: apache2 + state: restarted + +- name: Install BeautifulSoup + pip: + name: "{{ 'BeautifulSoup' if ansible_python_version is version('3', '<') else 'BeautifulSoup4' }}" + extra_args: "-c {{ remote_constraints }}" + +- name: Get all current balancer pool members attributes + community.general.apache2_mod_proxy: + balancer_vhost: localhost:81 + register: result + +- assert: + that: + - result is not changed + - result.members | length == 2 + - result.members[0].port in ["8080", "8081"] + - result.members[0].balancer_url == "http://localhost:81/balancer-manager/" + - result.members[0].host == "127.0.0.1" + - result.members[0].path is none + - result.members[0].protocol == "http" + - result.members[1].port in ["8080", "8081"] + - result.members[1].balancer_url == "http://localhost:81/balancer-manager/" + - result.members[1].host == "127.0.0.1" + - result.members[1].path is none + - result.members[1].protocol == "http" + +- name: Enable member + community.general.apache2_mod_proxy: + balancer_vhost: localhost:81 + member_host: 127.0.0.1 + state: present + register: result + +- assert: + that: + - result is not changed + +- name: Get all current balancer pool members attributes + community.general.apache2_mod_proxy: + balancer_vhost: localhost:81 + register: result + +- assert: + that: + - result is not changed + - result.members | length == 2 + - result.members[0].port in ["8080", "8081"] + - result.members[0].balancer_url == "http://localhost:81/balancer-manager/" + - result.members[0].host == "127.0.0.1" + - result.members[0].path is none + - result.members[0].protocol == "http" + - result.members[0].status.disabled == false + - result.members[0].status.drained == false + - result.members[0].status.hot_standby == false + - result.members[0].status.ignore_errors == false + - result.members[1].port in ["8080", "8081"] + - result.members[1].balancer_url == "http://localhost:81/balancer-manager/" + - result.members[1].host == "127.0.0.1" + - result.members[1].path is none + - result.members[1].protocol == "http" + - result.members[1].status.disabled == false + - result.members[1].status.drained == false + - result.members[1].status.hot_standby == false + - result.members[1].status.ignore_errors == false + +- name: Drain member + community.general.apache2_mod_proxy: + balancer_vhost: localhost:81 + member_host: 127.0.0.1 + state: drained + register: result + +- assert: + that: + - result is changed + +# Note that since both members are on the same host, this always affects **both** members! + +- name: Get all current balancer pool members attributes + community.general.apache2_mod_proxy: + balancer_vhost: localhost:81 + register: result + +- assert: + that: + - result is not changed + - result.members | length == 2 + - result.members[0].port in ["8080", "8081"] + - result.members[0].balancer_url == "http://localhost:81/balancer-manager/" + - result.members[0].host == "127.0.0.1" + - result.members[0].path is none + - result.members[0].protocol == "http" + - result.members[0].status.disabled == false + - result.members[0].status.drained == true + - result.members[0].status.hot_standby == false + - result.members[0].status.ignore_errors == false + - result.members[1].port in ["8080", "8081"] + - result.members[1].balancer_url == "http://localhost:81/balancer-manager/" + - result.members[1].host == "127.0.0.1" + - result.members[1].path is none + - result.members[1].protocol == "http" + - result.members[1].status.disabled == false + - result.members[1].status.drained == true + - result.members[1].status.hot_standby == false + - result.members[1].status.ignore_errors == false + +- name: Disable member + community.general.apache2_mod_proxy: + balancer_vhost: localhost:81 + member_host: 127.0.0.1 + state: absent + register: result + +- assert: + that: + - result is changed + +- name: Get all current balancer pool members attributes + community.general.apache2_mod_proxy: + balancer_vhost: localhost:81 + register: result + +- assert: + that: + - result is not changed + - result.members | length == 2 + - result.members[0].port in ["8080", "8081"] + - result.members[0].balancer_url == "http://localhost:81/balancer-manager/" + - result.members[0].host == "127.0.0.1" + - result.members[0].path is none + - result.members[0].protocol == "http" + - result.members[0].status.disabled == true + - result.members[0].status.drained == false + - result.members[0].status.hot_standby == false + - result.members[0].status.ignore_errors == false + - result.members[1].port in ["8080", "8081"] + - result.members[1].balancer_url == "http://localhost:81/balancer-manager/" + - result.members[1].host == "127.0.0.1" + - result.members[1].path is none + - result.members[1].protocol == "http" + - result.members[1].status.disabled == true + - result.members[1].status.drained == false + - result.members[1].status.hot_standby == false + - result.members[1].status.ignore_errors == false + +- name: Enable member + community.general.apache2_mod_proxy: + balancer_vhost: localhost:81 + member_host: 127.0.0.1 + state: present + register: result + +- assert: + that: + - result is changed + +- name: Get all current balancer pool members attributes + community.general.apache2_mod_proxy: + balancer_vhost: localhost:81 + register: result + +- assert: + that: + - result is not changed + - result.members | length == 2 + - result.members[0].port in ["8080", "8081"] + - result.members[0].balancer_url == "http://localhost:81/balancer-manager/" + - result.members[0].host == "127.0.0.1" + - result.members[0].path is none + - result.members[0].protocol == "http" + - result.members[0].status.disabled == false + - result.members[0].status.drained == false + - result.members[0].status.hot_standby == false + - result.members[0].status.ignore_errors == false + - result.members[1].port in ["8080", "8081"] + - result.members[1].balancer_url == "http://localhost:81/balancer-manager/" + - result.members[1].host == "127.0.0.1" + - result.members[1].path is none + - result.members[1].protocol == "http" + - result.members[1].status.disabled == false + - result.members[1].status.drained == false + - result.members[1].status.hot_standby == false + - result.members[1].status.ignore_errors == false diff --git a/tests/integration/targets/apache2_module/meta/main.yml b/tests/integration/targets/apache2_module/meta/main.yml new file mode 100644 index 0000000000..f32339acf0 --- /dev/null +++ b/tests/integration/targets/apache2_module/meta/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +dependencies: + - setup_apache2 diff --git a/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml b/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml index 5d93a9d300..c7d140b7bc 100644 --- a/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml +++ b/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml @@ -15,11 +15,11 @@ - assert: that: - - "'warnings' in disable_mpm_modules" - - disable_mpm_modules["warnings"] == [ - "No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately.", - "No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately." - ] + - "'warnings' in disable_mpm_modules" + - disable_mpm_modules["warnings"] == [ + "No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately.", + "No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately." + ] - name: Enable MPM event module - Revert previous change apache2_module: @@ -44,4 +44,4 @@ - assert: that: - - "'warnings' not in disable_mpm_modules" + - "'warnings' not in disable_mpm_modules" diff --git a/tests/integration/targets/apache2_module/tasks/actualtest.yml b/tests/integration/targets/apache2_module/tasks/actualtest.yml index 6fd10ce572..88e063fe92 100644 --- a/tests/integration/targets/apache2_module/tasks/actualtest.yml +++ b/tests/integration/targets/apache2_module/tasks/actualtest.yml @@ -67,141 +67,141 @@ - name: Debian/Ubuntu specific tests when: "ansible_os_family == 'Debian'" block: - - name: force disable of autoindex # bug #2499 - community.general.apache2_module: - name: autoindex - state: absent - force: true - - - name: re-enable autoindex - community.general.apache2_module: - name: autoindex - state: present - - # mod_evasive is enabled by default upon the installation, so disable first and enable second, to preserve the config - - name: disable evasive module - community.general.apache2_module: - name: evasive - state: absent - - - name: enable evasive module, test https://github.com/ansible/ansible/issues/22635 - community.general.apache2_module: - name: evasive - state: present - - - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669 - community.general.apache2_module: - name: dump_io - state: present - ignore_errors: true - register: enable_dumpio_wrong - - - name: disable dump_io - community.general.apache2_module: - name: dump_io - identifier: dumpio_module - state: absent - - - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669 - community.general.apache2_module: - name: dump_io - identifier: dumpio_module - state: present - register: enable_dumpio_correct_1 - - - name: ensure idempotency with identifier - community.general.apache2_module: - name: dump_io - identifier: dumpio_module - state: present - register: enable_dumpio_correct_2 - - - name: disable dump_io - community.general.apache2_module: - name: dump_io - identifier: dumpio_module - state: absent - - - assert: - that: - - enable_dumpio_wrong is failed - - enable_dumpio_correct_1 is changed - - enable_dumpio_correct_2 is not changed - - - name: disable mpm modules - community.general.apache2_module: - name: "{{ item }}" - state: absent - ignore_configcheck: true - with_items: - - mpm_worker - - mpm_event - - mpm_prefork - - - name: enabled mpm_event - community.general.apache2_module: - name: mpm_event - state: present - ignore_configcheck: true - register: enabledmpmevent - - - name: ensure changed mpm_event - assert: - that: - - 'enabledmpmevent.changed' - - - name: switch between mpm_event and mpm_worker - community.general.apache2_module: - name: "{{ item.name }}" - state: "{{ item.state }}" - ignore_configcheck: true - with_items: - - name: mpm_event + - name: force disable of autoindex # bug #2499 + community.general.apache2_module: + name: autoindex state: absent - - name: mpm_worker + force: true + + - name: re-enable autoindex + community.general.apache2_module: + name: autoindex state: present - - name: ensure mpm_worker is already enabled - community.general.apache2_module: - name: mpm_worker - state: present - register: enabledmpmworker + # mod_evasive is enabled by default upon the installation, so disable first and enable second, to preserve the config + - name: disable evasive module + community.general.apache2_module: + name: evasive + state: absent - - name: ensure mpm_worker unchanged - assert: - that: - - 'not enabledmpmworker.changed' + - name: enable evasive module, test https://github.com/ansible/ansible/issues/22635 + community.general.apache2_module: + name: evasive + state: present - - name: try to disable all mpm modules with configcheck - community.general.apache2_module: - name: "{{item}}" - state: absent - with_items: - - mpm_worker - - mpm_event - - mpm_prefork - ignore_errors: true - register: remove_with_configcheck + - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669 + community.general.apache2_module: + name: dump_io + state: present + ignore_errors: true + register: enable_dumpio_wrong - - name: ensure configcheck fails task with when run without mpm modules - assert: - that: - - "{{ item.failed }}" - with_items: "{{ remove_with_configcheck.results }}" + - name: disable dump_io + community.general.apache2_module: + name: dump_io + identifier: dumpio_module + state: absent - - name: try to disable all mpm modules without configcheck - community.general.apache2_module: - name: "{{item}}" - state: absent - ignore_configcheck: true - with_items: - - mpm_worker - - mpm_event - - mpm_prefork + - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669 + community.general.apache2_module: + name: dump_io + identifier: dumpio_module + state: present + register: enable_dumpio_correct_1 - - name: enabled mpm_event to restore previous state - community.general.apache2_module: - name: mpm_event - state: present - ignore_configcheck: true - register: enabledmpmevent + - name: ensure idempotency with identifier + community.general.apache2_module: + name: dump_io + identifier: dumpio_module + state: present + register: enable_dumpio_correct_2 + + - name: disable dump_io + community.general.apache2_module: + name: dump_io + identifier: dumpio_module + state: absent + + - assert: + that: + - enable_dumpio_wrong is failed + - enable_dumpio_correct_1 is changed + - enable_dumpio_correct_2 is not changed + + - name: disable mpm modules + community.general.apache2_module: + name: "{{ item }}" + state: absent + ignore_configcheck: true + with_items: + - mpm_worker + - mpm_event + - mpm_prefork + + - name: enabled mpm_event + community.general.apache2_module: + name: mpm_event + state: present + ignore_configcheck: true + register: enabledmpmevent + + - name: ensure changed mpm_event + assert: + that: + - 'enabledmpmevent.changed' + + - name: switch between mpm_event and mpm_worker + community.general.apache2_module: + name: "{{ item.name }}" + state: "{{ item.state }}" + ignore_configcheck: true + with_items: + - name: mpm_event + state: absent + - name: mpm_worker + state: present + + - name: ensure mpm_worker is already enabled + community.general.apache2_module: + name: mpm_worker + state: present + register: enabledmpmworker + + - name: ensure mpm_worker unchanged + assert: + that: + - 'not enabledmpmworker.changed' + + - name: try to disable all mpm modules with configcheck + community.general.apache2_module: + name: "{{item}}" + state: absent + with_items: + - mpm_worker + - mpm_event + - mpm_prefork + ignore_errors: true + register: remove_with_configcheck + + - name: ensure configcheck fails task with when run without mpm modules + assert: + that: + - item is failed + with_items: "{{ remove_with_configcheck.results }}" + + - name: try to disable all mpm modules without configcheck + community.general.apache2_module: + name: "{{item}}" + state: absent + ignore_configcheck: true + with_items: + - mpm_worker + - mpm_event + - mpm_prefork + + - name: enabled mpm_event to restore previous state + community.general.apache2_module: + name: mpm_event + state: present + ignore_configcheck: true + register: enabledmpmevent diff --git a/tests/integration/targets/apache2_module/tasks/main.yml b/tests/integration/targets/apache2_module/tasks/main.yml index 6f2f718ad0..e8210f2682 100644 --- a/tests/integration/targets/apache2_module/tasks/main.yml +++ b/tests/integration/targets/apache2_module/tasks/main.yml @@ -8,21 +8,6 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: install apache via apt - apt: - name: "{{item}}" - state: present - when: "ansible_os_family == 'Debian'" - with_items: - - apache2 - - libapache2-mod-evasive - -- name: install apache via zypper - community.general.zypper: - name: apache2 - state: present - when: "ansible_os_family == 'Suse'" - - name: test apache2_module block: - name: get list of enabled modules diff --git a/tests/integration/targets/apk/tasks/main.yml b/tests/integration/targets/apk/tasks/main.yml index 0e1b0ae429..c800b1fa1c 100644 --- a/tests/integration/targets/apk/tasks/main.yml +++ b/tests/integration/targets/apk/tasks/main.yml @@ -158,3 +158,63 @@ that: - results is not changed - (results.packages | default([]) | length) == 0 + + - name: Install package with empty name + community.general.apk: + name: "" + register: result_empty + ignore_errors: true + + - name: Assert failure due to empty package name + ansible.builtin.assert: + that: + - result_empty is failed + - "'Package name(s) cannot be empty or whitespace-only' == result_empty.msg" + + - name: Install package name with only spaces + community.general.apk: + name: [" "] + register: result_spaces + ignore_errors: true + + - name: Assert failure due to whitespace-only package name + ansible.builtin.assert: + that: + - result_spaces is failed + - "'Package name(s) cannot be empty or whitespace-only' == result_spaces.msg" + + - name: Accept list with valid and empty string + community.general.apk: + name: ["busybox", ""] + register: result_valid_mixed + ignore_errors: true + + - name: Assert success with mixed package list + ansible.builtin.assert: + that: + - result_valid_mixed is not failed + + - name: Reject package name list with multiple empty/whitespace-only strings + community.general.apk: + name: ["", " "] + register: result_multiple_empty + ignore_errors: true + + - name: Assert failure due to all package names being empty or whitespace + ansible.builtin.assert: + that: + - result_multiple_empty is failed + - "'Package name(s) cannot be empty or whitespace-only' == result_multiple_empty.msg" + + - name: Reject empty package name with update_cache parameter + community.general.apk: + name: "" + update_cache: true + register: result_empty_package_with_update_cache + ignore_errors: true + + - name: Assert failure due to all package names being empty or whitespace + ansible.builtin.assert: + that: + - result_empty_package_with_update_cache is failed + - "'Package name(s) cannot be empty or whitespace-only' == result_empty_package_with_update_cache.msg" diff --git a/tests/integration/targets/btrfs_subvolume/defaults/main.yml b/tests/integration/targets/btrfs_subvolume/defaults/main.yml index 52c88d5de1..fad7fb401a 100644 --- a/tests/integration/targets/btrfs_subvolume/defaults/main.yml +++ b/tests/integration/targets/btrfs_subvolume/defaults/main.yml @@ -4,15 +4,15 @@ # SPDX-License-Identifier: GPL-3.0-or-later btrfs_subvolume_single_configs: -- file: "/tmp/disks0.img" - loop: "/dev/loop95" + - file: "/tmp/disks0.img" + loop: "/dev/loop95" btrfs_subvolume_multiple_configs: -- file: "/tmp/diskm0.img" - loop: "/dev/loop97" -- file: "/tmp/diskm1.img" - loop: "/dev/loop98" -- file: "/tmp/diskm2.img" - loop: "/dev/loop99" + - file: "/tmp/diskm0.img" + loop: "/dev/loop97" + - file: "/tmp/diskm1.img" + loop: "/dev/loop98" + - file: "/tmp/diskm2.img" + loop: "/dev/loop99" btrfs_subvolume_configs: "{{ btrfs_subvolume_single_configs + btrfs_subvolume_multiple_configs }}" btrfs_subvolume_single_devices: "{{ btrfs_subvolume_single_configs | map(attribute='loop') }}" btrfs_subvolume_single_label: "single" diff --git a/tests/integration/targets/btrfs_subvolume/tasks/main.yml b/tests/integration/targets/btrfs_subvolume/tasks/main.yml index d472704401..f97b6643a8 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/main.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/main.yml @@ -8,22 +8,22 @@ name: - btrfs-progs # btrfs userspace - util-linux # losetup - ignore_errors: True + ignore_errors: true register: btrfs_installed - name: Execute integration tests tests block: - - ansible.builtin.include_tasks: 'setup.yml' + - ansible.builtin.include_tasks: 'setup.yml' - - name: "Execute test scenario for single device filesystem" - ansible.builtin.include_tasks: 'run_filesystem_tests.yml' - vars: - btrfs_subvolume_target_device: "{{ btrfs_subvolume_single_devices | first }}" - btrfs_subvolume_target_label: "{{ btrfs_subvolume_single_label }}" + - name: "Execute test scenario for single device filesystem" + ansible.builtin.include_tasks: 'run_filesystem_tests.yml' + vars: + btrfs_subvolume_target_device: "{{ btrfs_subvolume_single_devices | first }}" + btrfs_subvolume_target_label: "{{ btrfs_subvolume_single_label }}" - - name: "Execute test scenario for multiple device configuration" - ansible.builtin.include_tasks: 'run_filesystem_tests.yml' - vars: - btrfs_subvolume_target_device: "{{ btrfs_subvolume_multiple_devices | first }}" - btrfs_subvolume_target_label: "{{ btrfs_subvolume_multiple_label }}" + - name: "Execute test scenario for multiple device configuration" + ansible.builtin.include_tasks: 'run_filesystem_tests.yml' + vars: + btrfs_subvolume_target_device: "{{ btrfs_subvolume_multiple_devices | first }}" + btrfs_subvolume_target_label: "{{ btrfs_subvolume_multiple_label }}" when: btrfs_installed is success diff --git a/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml b/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml index 0ea3fa6660..137f97a235 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml @@ -10,23 +10,23 @@ - name: "Execute test scenarios where non-root subvolume is mounted" block: - - name: Create subvolume '/nonroot' - community.general.btrfs_subvolume: - automount: Yes - name: "/nonroot" - filesystem_label: "{{ btrfs_subvolume_target_label }}" - state: "present" - register: nonroot - - name: "Mount subvolume '/nonroot'" - ansible.posix.mount: - src: "{{ nonroot.filesystem.devices | first }}" - path: /mnt - opts: "subvolid={{ nonroot.target_subvolume_id }}" - fstype: btrfs - state: mounted - - name: "Run tests for explicit, mounted single device configuration" - ansible.builtin.include_tasks: 'run_common_tests.yml' - - name: "Unmount subvolume /nonroot" - ansible.posix.mount: - path: /mnt - state: absent + - name: Create subvolume '/nonroot' + community.general.btrfs_subvolume: + automount: true + name: "/nonroot" + filesystem_label: "{{ btrfs_subvolume_target_label }}" + state: "present" + register: nonroot + - name: "Mount subvolume '/nonroot'" + ansible.posix.mount: + src: "{{ nonroot.filesystem.devices | first }}" + path: /mnt + opts: "subvolid={{ nonroot.target_subvolume_id }}" + fstype: btrfs + state: mounted + - name: "Run tests for explicit, mounted single device configuration" + ansible.builtin.include_tasks: 'run_common_tests.yml' + - name: "Unmount subvolume /nonroot" + ansible.posix.mount: + path: /mnt + state: absent diff --git a/tests/integration/targets/btrfs_subvolume/tasks/setup.yml b/tests/integration/targets/btrfs_subvolume/tasks/setup.yml index f5bbdf9c54..f5d03a1779 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/setup.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/setup.yml @@ -18,12 +18,12 @@ - name: Create single device btrfs filesystem ansible.builtin.command: cmd: "mkfs.btrfs --label {{ btrfs_subvolume_single_label }} -f {{ btrfs_subvolume_single_devices | first }}" - changed_when: True + changed_when: true - name: Create multiple device btrfs filesystem ansible.builtin.command: cmd: "mkfs.btrfs --label {{ btrfs_subvolume_multiple_label }} -f -d raid0 {{ btrfs_subvolume_multiple_devices | join(' ') }}" - changed_when: True + changed_when: true # Typically created by udev, but apparently missing on Alpine - name: Create btrfs control device node @@ -34,4 +34,4 @@ - name: Force rescan to ensure all device are detected ansible.builtin.command: cmd: "btrfs device scan" - changed_when: True + changed_when: true diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml index 2455eeacf1..b00c033bcf 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml @@ -5,76 +5,76 @@ - name: "Match targeted filesystem by label" block: - - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by label - community.general.btrfs_subvolume: - automount: Yes - name: "/match_label" - filesystem_label: "{{ btrfs_subvolume_target_label }}" - state: "present" - register: result + - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by label + community.general.btrfs_subvolume: + automount: true + name: "/match_label" + filesystem_label: "{{ btrfs_subvolume_target_label }}" + state: "present" + register: result - - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen - ansible.builtin.assert: - that: - - result.filesystem.label == btrfs_subvolume_target_label + - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen + ansible.builtin.assert: + that: + - result.filesystem.label == btrfs_subvolume_target_label - name: "Match targeted filesystem by uuid" block: - - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by uuid - community.general.btrfs_subvolume: - automount: Yes - name: "/match_uuid" - filesystem_uuid: "{{ result.filesystem.uuid }}" - state: "present" - register: result + - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by uuid + community.general.btrfs_subvolume: + automount: true + name: "/match_uuid" + filesystem_uuid: "{{ result.filesystem.uuid }}" + state: "present" + register: result - - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen - ansible.builtin.assert: - that: - - result.filesystem.label == btrfs_subvolume_target_label + - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen + ansible.builtin.assert: + that: + - result.filesystem.label == btrfs_subvolume_target_label - name: "Match targeted filesystem by devices" block: - - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by device - community.general.btrfs_subvolume: - automount: Yes - name: "/match_device" - filesystem_device: "{{ result.filesystem.devices | first }}" - state: "present" - register: result + - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by device + community.general.btrfs_subvolume: + automount: true + name: "/match_device" + filesystem_device: "{{ result.filesystem.devices | first }}" + state: "present" + register: result - - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen - ansible.builtin.assert: - that: - - result.filesystem.label == btrfs_subvolume_target_label + - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen + ansible.builtin.assert: + that: + - result.filesystem.label == btrfs_subvolume_target_label - name: "Match only mounted filesystem" block: - - name: "Mount filesystem '{{ btrfs_subvolume_target_label }}'" - ansible.posix.mount: - src: "{{ result.filesystem.devices | first }}" - path: /mnt - opts: "subvolid={{ 5 }}" - fstype: btrfs - state: mounted + - name: "Mount filesystem '{{ btrfs_subvolume_target_label }}'" + ansible.posix.mount: + src: "{{ result.filesystem.devices | first }}" + path: /mnt + opts: "subvolid={{ 5 }}" + fstype: btrfs + state: mounted - - name: Print current status - community.general.btrfs_info: + - name: Print current status + community.general.btrfs_info: - - name: Match '{{ btrfs_subvolume_target_label }}' filesystem when only mount - community.general.btrfs_subvolume: - automount: Yes - name: "/match_only_mounted" - state: "present" - register: result + - name: Match '{{ btrfs_subvolume_target_label }}' filesystem when only mount + community.general.btrfs_subvolume: + automount: true + name: "/match_only_mounted" + state: "present" + register: result - - name: "Unmount filesystem '{{ btrfs_subvolume_target_label }}'" - ansible.posix.mount: - path: /mnt - state: absent + - name: "Unmount filesystem '{{ btrfs_subvolume_target_label }}'" + ansible.posix.mount: + path: /mnt + state: absent - - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen - ansible.builtin.assert: - that: - - result.filesystem.label == btrfs_subvolume_target_label - when: False # TODO don't attempt this if the host already has a pre-existing btrfs filesystem + - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen + ansible.builtin.assert: + that: + - result.filesystem.label == btrfs_subvolume_target_label + when: false # TODO don't attempt this if the host already has a pre-existing btrfs filesystem diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml index ce25a999ba..f0224b23f1 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml @@ -5,37 +5,37 @@ - name: Create a snapshot, overwriting if one already exists at path block: - - name: Create a snapshot named 'snapshot_clobber' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/snapshot_clobber" - snapshot_source: "/" - snapshot_conflict: "clobber" - state: "present" - register: result - - name: Snapshot 'snapshot_clobber' created - ansible.builtin.assert: - that: - - result is changed + - name: Create a snapshot named 'snapshot_clobber' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/snapshot_clobber" + snapshot_source: "/" + snapshot_conflict: "clobber" + state: "present" + register: result + - name: Snapshot 'snapshot_clobber' created + ansible.builtin.assert: + that: + - result is changed - - name: Create a snapshot named 'snapshot_clobber' (no idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/snapshot_clobber" - snapshot_source: "/" - snapshot_conflict: "clobber" - state: "present" - register: result - - name: Snapshot 'snapshot_clobber' created (no idempotency) - ansible.builtin.assert: - that: - - result is changed + - name: Create a snapshot named 'snapshot_clobber' (no idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/snapshot_clobber" + snapshot_source: "/" + snapshot_conflict: "clobber" + state: "present" + register: result + - name: Snapshot 'snapshot_clobber' created (no idempotency) + ansible.builtin.assert: + that: + - result is changed - name: Cleanup created snapshot community.general.btrfs_subvolume: - automount: Yes + automount: true filesystem_label: "{{ btrfs_subvolume_target_label }}" name: "/snapshot_clobber" state: "absent" diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml index 49d928b74c..1399acfd4b 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml @@ -5,38 +5,38 @@ - name: Create a snapshot, erroring if one already exists at path block: - - name: Create a snapshot named 'snapshot_error' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/snapshot_error" - snapshot_source: "/" - snapshot_conflict: "error" - state: "present" - register: result - - name: Snapshot 'snapshot_error' created - ansible.builtin.assert: - that: - - result is changed + - name: Create a snapshot named 'snapshot_error' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/snapshot_error" + snapshot_source: "/" + snapshot_conflict: "error" + state: "present" + register: result + - name: Snapshot 'snapshot_error' created + ansible.builtin.assert: + that: + - result is changed - - name: Create a snapshot named 'snapshot_error' (no idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/snapshot_error" - snapshot_source: "/" - snapshot_conflict: "error" - state: "present" - register: result - ignore_errors: true - - name: Snapshot 'snapshot_error' created (no idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Create a snapshot named 'snapshot_error' (no idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/snapshot_error" + snapshot_source: "/" + snapshot_conflict: "error" + state: "present" + register: result + ignore_errors: true + - name: Snapshot 'snapshot_error' created (no idempotency) + ansible.builtin.assert: + that: + - result is not changed - name: Cleanup created snapshot community.general.btrfs_subvolume: - automount: Yes + automount: true filesystem_label: "{{ btrfs_subvolume_target_label }}" name: "/snapshot_error" state: "absent" diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml index 07e65b133c..33cd46ecce 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml @@ -5,37 +5,37 @@ - name: Create a snapshot if one does not already exist at path block: - - name: Create a snapshot named 'snapshot_skip' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/snapshot_skip" - snapshot_source: "/" - snapshot_conflict: "skip" - state: "present" - register: result - - name: Snapshot 'snapshot_skip' created - ansible.builtin.assert: - that: - - result is changed + - name: Create a snapshot named 'snapshot_skip' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/snapshot_skip" + snapshot_source: "/" + snapshot_conflict: "skip" + state: "present" + register: result + - name: Snapshot 'snapshot_skip' created + ansible.builtin.assert: + that: + - result is changed - - name: Create a snapshot named 'snapshot_skip' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/snapshot_skip" - snapshot_source: "/" - snapshot_conflict: "skip" - state: "present" - register: result - - name: Snapshot 'snapshot_skip' created (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Create a snapshot named 'snapshot_skip' (idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/snapshot_skip" + snapshot_source: "/" + snapshot_conflict: "skip" + state: "present" + register: result + - name: Snapshot 'snapshot_skip' created (idempotency) + ansible.builtin.assert: + that: + - result is not changed - name: Cleanup created snapshot community.general.btrfs_subvolume: - automount: Yes + automount: true filesystem_label: "{{ btrfs_subvolume_target_label }}" name: "/snapshot_skip" state: "absent" diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml index f6eed93878..a506d56129 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml @@ -5,95 +5,95 @@ - name: Change the default subvolume block: - - name: Update filesystem default subvolume to '@' - community.general.btrfs_subvolume: - automount: Yes - default: True - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/@" - state: "present" - register: result - - name: Subvolume '@' set to default - ansible.builtin.assert: - that: - - result is changed - - name: Update filesystem default subvolume to '@' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - default: True - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/@" - state: "present" - register: result - - name: Subvolume '@' set to default (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Update filesystem default subvolume to '@' + community.general.btrfs_subvolume: + automount: true + default: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/@" + state: "present" + register: result + - name: Subvolume '@' set to default + ansible.builtin.assert: + that: + - result is changed + - name: Update filesystem default subvolume to '@' (idempotency) + community.general.btrfs_subvolume: + automount: true + default: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/@" + state: "present" + register: result + - name: Subvolume '@' set to default (idempotency) + ansible.builtin.assert: + that: + - result is not changed - name: Revert the default subvolume block: - - name: Revert filesystem default subvolume to '/' - community.general.btrfs_subvolume: - automount: Yes - default: True - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/" - state: "present" - register: result - - name: Subvolume '/' set to default - ansible.builtin.assert: - that: - - result is changed - - name: Revert filesystem default subvolume to '/' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - default: True - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/" - state: "present" - register: result - - name: Subvolume '/' set to default (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Revert filesystem default subvolume to '/' + community.general.btrfs_subvolume: + automount: true + default: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/" + state: "present" + register: result + - name: Subvolume '/' set to default + ansible.builtin.assert: + that: + - result is changed + - name: Revert filesystem default subvolume to '/' (idempotency) + community.general.btrfs_subvolume: + automount: true + default: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/" + state: "present" + register: result + - name: Subvolume '/' set to default (idempotency) + ansible.builtin.assert: + that: + - result is not changed - name: Change the default subvolume again block: - - name: Update filesystem default subvolume to '@' - community.general.btrfs_subvolume: - automount: Yes - default: True - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/@" - state: "present" - register: result - - name: Subvolume '@' set to default - ansible.builtin.assert: - that: - - result is changed + - name: Update filesystem default subvolume to '@' + community.general.btrfs_subvolume: + automount: true + default: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/@" + state: "present" + register: result + - name: Subvolume '@' set to default + ansible.builtin.assert: + that: + - result is changed - name: Revert custom default subvolume to fs_tree root when deleted block: - - name: Delete custom default subvolume '@' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/@" - state: "absent" - register: result - - name: Subvolume '@' deleted - ansible.builtin.assert: - that: - - result is changed - - name: Delete custom default subvolume '@' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/@" - state: "absent" - register: result - - name: Subvolume '@' deleted (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Delete custom default subvolume '@' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/@" + state: "absent" + register: result + - name: Subvolume '@' deleted + ansible.builtin.assert: + that: + - result is changed + - name: Delete custom default subvolume '@' (idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/@" + state: "absent" + register: result + - name: Subvolume '@' deleted (idempotency) + ansible.builtin.assert: + that: + - result is not changed diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml index b706bf72a8..a5c152f9ef 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml @@ -5,57 +5,57 @@ - name: Create parent subvolume 'container' community.general.btrfs_subvolume: - automount: Yes + automount: true filesystem_label: "{{ btrfs_subvolume_target_label }}" name: "/container" state: "present" - name: Create a nested subvolume block: - - name: Create a subvolume named 'nested' inside 'container' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/container/nested" - state: "present" - register: result - - name: Subvolume 'container/nested' created - ansible.builtin.assert: - that: - - result is changed - - name: Create a subvolume named 'nested' inside 'container' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/container/nested" - state: "present" - register: result - - name: Subvolume 'container/nested' created (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Create a subvolume named 'nested' inside 'container' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/container/nested" + state: "present" + register: result + - name: Subvolume 'container/nested' created + ansible.builtin.assert: + that: + - result is changed + - name: Create a subvolume named 'nested' inside 'container' (idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/container/nested" + state: "present" + register: result + - name: Subvolume 'container/nested' created (idempotency) + ansible.builtin.assert: + that: + - result is not changed - name: Remove a nested subvolume block: - - name: Remove a subvolume named 'nested' inside 'container' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/container/nested" - state: "absent" - register: result - - name: Subvolume 'container/nested' removed - ansible.builtin.assert: - that: - - result is changed - - name: Remove a subvolume named 'nested' inside 'container' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/container/nested" - state: "absent" - register: result - - name: Subvolume 'container/nested' removed (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Remove a subvolume named 'nested' inside 'container' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/container/nested" + state: "absent" + register: result + - name: Subvolume 'container/nested' removed + ansible.builtin.assert: + that: + - result is changed + - name: Remove a subvolume named 'nested' inside 'container' (idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/container/nested" + state: "absent" + register: result + - name: Subvolume 'container/nested' removed (idempotency) + ansible.builtin.assert: + that: + - result is not changed diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml index 7e9f990070..a0b86a11ac 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml @@ -5,82 +5,82 @@ - name: Recursively create subvolumes block: - - name: Create a subvolume named '/recursive/son/grandson' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/recursive/son/grandson" - recursive: Yes - state: "present" - register: result - - name: Subvolume named '/recursive/son/grandson' created - ansible.builtin.assert: - that: - - result is changed + - name: Create a subvolume named '/recursive/son/grandson' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/recursive/son/grandson" + recursive: true + state: "present" + register: result + - name: Subvolume named '/recursive/son/grandson' created + ansible.builtin.assert: + that: + - result is changed - - name: Create a subvolume named '/recursive/son/grandson' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/recursive/son/grandson" - recursive: Yes - state: "present" - register: result - - name: Subvolume named '/recursive/son/grandson' created (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Create a subvolume named '/recursive/son/grandson' (idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/recursive/son/grandson" + recursive: true + state: "present" + register: result + - name: Subvolume named '/recursive/son/grandson' created (idempotency) + ansible.builtin.assert: + that: + - result is not changed - - name: Create a subvolume named '/recursive/daughter/granddaughter' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/recursive/daughter/granddaughter" - recursive: Yes - state: "present" - register: result - - name: Subvolume named '/recursive/son/grandson' created - ansible.builtin.assert: - that: - - result is changed + - name: Create a subvolume named '/recursive/daughter/granddaughter' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/recursive/daughter/granddaughter" + recursive: true + state: "present" + register: result + - name: Subvolume named '/recursive/son/grandson' created + ansible.builtin.assert: + that: + - result is changed - - name: Create a subvolume named '/recursive/daughter/granddaughter' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/recursive/daughter/granddaughter" - recursive: Yes - state: "present" - register: result - - name: Subvolume named '/recursive/son/grandson' created (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Create a subvolume named '/recursive/daughter/granddaughter' (idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/recursive/daughter/granddaughter" + recursive: true + state: "present" + register: result + - name: Subvolume named '/recursive/son/grandson' created (idempotency) + ansible.builtin.assert: + that: + - result is not changed - name: Recursively remove subvolumes block: - - name: Remove subvolume '/recursive' and all descendents - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/recursive" - recursive: Yes - state: "absent" - register: result - - name: Subvolume '/recursive' removed - ansible.builtin.assert: - that: - - result is changed + - name: Remove subvolume '/recursive' and all descendents + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/recursive" + recursive: true + state: "absent" + register: result + - name: Subvolume '/recursive' removed + ansible.builtin.assert: + that: + - result is changed - - name: Remove subvolume '/recursive' and all descendents (idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/recursive" - recursive: Yes - state: "absent" - register: result - - name: Subvolume '/recursive' removed (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Remove subvolume '/recursive' and all descendents (idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/recursive" + recursive: true + state: "absent" + register: result + - name: Subvolume '/recursive' removed (idempotency) + ansible.builtin.assert: + that: + - result is not changed diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml index 6cd214e747..bde385aecd 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml @@ -5,50 +5,50 @@ - name: Create a simple subvolume block: - - name: Create a subvolume named 'simple' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/simple" - state: "present" - register: result - - name: Subvolume named 'simple' created - ansible.builtin.assert: - that: - - result is changed - - name: Create a subvolume named 'simple' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/simple" - state: "present" - register: result - - name: Subvolume named 'simple' created (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Create a subvolume named 'simple' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/simple" + state: "present" + register: result + - name: Subvolume named 'simple' created + ansible.builtin.assert: + that: + - result is changed + - name: Create a subvolume named 'simple' (idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/simple" + state: "present" + register: result + - name: Subvolume named 'simple' created (idempotency) + ansible.builtin.assert: + that: + - result is not changed - name: Remove a simple subvolume block: - - name: Remove a subvolume named 'simple' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/simple" - state: "absent" - register: result - - name: Subvolume named 'simple' removed - ansible.builtin.assert: - that: - - result is changed - - name: Remove a subvolume named 'simple' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/simple" - state: "absent" - register: result - - name: Subvolume named 'simple' removed (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Remove a subvolume named 'simple' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/simple" + state: "absent" + register: result + - name: Subvolume named 'simple' removed + ansible.builtin.assert: + that: + - result is changed + - name: Remove a subvolume named 'simple' (idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/simple" + state: "absent" + register: result + - name: Subvolume named 'simple' removed (idempotency) + ansible.builtin.assert: + that: + - result is not changed diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml index 6a0147af6c..8fc798108f 100644 --- a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml +++ b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml @@ -5,58 +5,58 @@ - name: Create a subvolume named 'container' community.general.btrfs_subvolume: - automount: Yes + automount: true filesystem_label: "{{ btrfs_subvolume_target_label }}" name: "/container" state: "present" - name: Create a subvolume with whitespace in the name block: - - name: Create a subvolume named 'container/my data' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/container/my data" - state: "present" - register: result - - name: Subvolume named 'container/my data' created - ansible.builtin.assert: - that: - - result is changed - - name: Create a subvolume named 'container/my data' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/container/my data" - state: "present" - register: result - - name: Subvolume named 'container/my data' created (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Create a subvolume named 'container/my data' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/container/my data" + state: "present" + register: result + - name: Subvolume named 'container/my data' created + ansible.builtin.assert: + that: + - result is changed + - name: Create a subvolume named 'container/my data' (idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/container/my data" + state: "present" + register: result + - name: Subvolume named 'container/my data' created (idempotency) + ansible.builtin.assert: + that: + - result is not changed - name: Remove a subvolume with whitespace in the name block: - - name: Remove a subvolume named 'container/my data' - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/container/my data" - state: "absent" - register: result - - name: Subvolume named 'container/my data' removed - ansible.builtin.assert: - that: - - result is changed + - name: Remove a subvolume named 'container/my data' + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/container/my data" + state: "absent" + register: result + - name: Subvolume named 'container/my data' removed + ansible.builtin.assert: + that: + - result is changed - - name: Remove a subvolume named 'container/my data' (idempotency) - community.general.btrfs_subvolume: - automount: Yes - filesystem_label: "{{ btrfs_subvolume_target_label }}" - name: "/container/my data" - state: "absent" - register: result - - name: Subvolume named 'container/my data' removed (idempotency) - ansible.builtin.assert: - that: - - result is not changed + - name: Remove a subvolume named 'container/my data' (idempotency) + community.general.btrfs_subvolume: + automount: true + filesystem_label: "{{ btrfs_subvolume_target_label }}" + name: "/container/my data" + state: "absent" + register: result + - name: Subvolume named 'container/my data' removed (idempotency) + ansible.builtin.assert: + that: + - result is not changed diff --git a/tests/integration/targets/callback/filter_plugins/helper.py b/tests/integration/targets/callback/filter_plugins/helper.py new file mode 100644 index 0000000000..867dce6c75 --- /dev/null +++ b/tests/integration/targets/callback/filter_plugins/helper.py @@ -0,0 +1,54 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils.six import string_types + + +def callback_results_extractor(outputs_results): + results = [] + for result in outputs_results: + differences = [] + expected_output = result['test']['expected_output'] + stdout_lines = result['stdout_lines'] + for i in range(max(len(expected_output), len(stdout_lines))): + line = "line_%s" % (i + 1) + test_line = stdout_lines[i] if i < len(stdout_lines) else None + expected_lines = expected_output[i] if i < len(expected_output) else None + if not isinstance(expected_lines, string_types) and expected_lines is not None: + if test_line not in expected_lines: + differences.append({ + 'line': { + 'expected_one_of': expected_lines, + 'got': test_line, + } + }) + else: + if test_line != expected_lines: + differences.append({ + 'line': { + 'expected': expected_lines, + 'got': test_line, + } + }) + results.append({ + 'name': result['test']['name'], + 'output': { + 'differences': differences, + 'expected': expected_output, + 'got': stdout_lines, + }, + }) + return results + + +class FilterModule: + ''' Jinja2 compat filters ''' + + def filters(self): + return { + 'callback_results_extractor': callback_results_extractor, + } diff --git a/tests/integration/targets/callback/tasks/main.yml b/tests/integration/targets/callback/tasks/main.yml index 827217a532..88988f9bf9 100644 --- a/tests/integration/targets/callback/tasks/main.yml +++ b/tests/integration/targets/callback/tasks/main.yml @@ -9,92 +9,59 @@ # SPDX-License-Identifier: GPL-3.0-or-later - block: - - name: Create temporary playbook files - tempfile: - state: file - suffix: temp - loop: "{{ tests }}" - loop_control: - loop_var: test - label: "{{ test.name }}" - register: temporary_playbook_files + - name: Create temporary playbook files + tempfile: + state: file + suffix: temp + loop: "{{ tests }}" + loop_control: + loop_var: test + label: "{{ test.name }}" + register: temporary_playbook_files - - name: Set temporary playbook file content - copy: - content: "{{ test.playbook }}" - dest: "{{ temporary_playbook_files.results[test_idx].path }}" - loop: "{{ tests }}" - loop_control: - loop_var: test - index_var: test_idx - label: "{{ test.name }}" + - name: Set temporary playbook file content + copy: + content: "{{ test.playbook }}" + dest: "{{ temporary_playbook_files.results[test_idx].path }}" + loop: "{{ tests }}" + loop_control: + loop_var: test + index_var: test_idx + label: "{{ test.name }}" - - name: Collect outputs - command: "ansible-playbook -i {{ inventory }} {{ playbook }}" - environment: "{{ test.environment }}" - loop: "{{ tests }}" - loop_control: - loop_var: test - label: "{{ test.name }}" - register: outputs - changed_when: false - vars: - inventory: "{{ role_path }}/inventory.yml" - playbook: " - {%- for result in temporary_playbook_files.results -%} - {%- if result.test.name == test.name -%} - {{- result.path -}} - {%- endif -%} - {%- endfor -%}" + - name: Collect outputs + command: "ansible-playbook -i {{ inventory }} {{ playbook }} {{ test.extra_cli_arguments | default('') }}" + environment: "{{ test.environment }}" + loop: "{{ tests }}" + loop_control: + loop_var: test + label: "{{ test.name }}" + register: outputs + changed_when: false + vars: + inventory: "{{ role_path }}/inventory.yml" + playbook: " + {%- for result in temporary_playbook_files.results -%} + {%- if result.test.name == test.name -%} + {{- result.path -}} + {%- endif -%} + {%- endfor -%}" - - name: Assert test output equals expected output - assert: - that: result.output.differences | length == 0 - loop: "{{ results }}" - loop_control: - loop_var: result - label: "{{ result.name }}" - register: assertions - vars: - results: >- - {%- set results = [] -%} - {%- for result in outputs.results -%} - {%- set differences = [] -%} - {%- for i in range([result.test.expected_output | count, result.stdout_lines | count] | max) -%} - {%- set line = "line_%s" | format(i+1) -%} - {%- set test_line = result.stdout_lines[i] | default(none) -%} - {%- set expected_lines = result.test.expected_output[i] | default(none) -%} - {%- if expected_lines is not string and expected_lines is not none -%} - {%- if test_line not in expected_lines -%} - {{- differences.append({ - line: { - 'expected_one_of': expected_lines, - 'got': test_line }}) -}} - {%- endif -%} - {%- else -%} - {%- if expected_lines != test_line -%} - {{- differences.append({ - line: { - 'expected': expected_lines, - 'got': test_line }}) -}} - {%- endif -%} - {%- endif -%} - {%- endfor -%} - {{- results.append({ - 'name': result.test.name, - 'output': { - 'differences': differences, - 'expected': result.test.expected_output, - 'got': result.stdout_lines }}) -}} - {%- endfor -%} - {{- results -}} + - name: Assert test output equals expected output + assert: + that: result.output.differences | length == 0 + loop: "{{ outputs.results | callback_results_extractor }}" + loop_control: + loop_var: result + label: "{{ result.name }}" + register: assertions always: - - name: Remove temporary playbooks - file: - path: "{{ temporary_file.path }}" - state: absent - loop: "{{ temporary_playbook_files.results }}" - loop_control: - loop_var: temporary_file - label: "{{ temporary_file.test.name }}: {{ temporary_file.path }}" + - name: Remove temporary playbooks + file: + path: "{{ temporary_file.path }}" + state: absent + loop: "{{ temporary_playbook_files.results }}" + loop_control: + loop_var: temporary_file + label: "{{ temporary_file.test.name }}: {{ temporary_file.path }}" diff --git a/tests/integration/targets/callback_default_without_diff/tasks/main.yml b/tests/integration/targets/callback_default_without_diff/tasks/main.yml index 5fc656e847..adb760fd02 100644 --- a/tests/integration/targets/callback_default_without_diff/tasks/main.yml +++ b/tests/integration/targets/callback_default_without_diff/tasks/main.yml @@ -41,22 +41,21 @@ content: | Foo bar Bar baz bam! - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Gathering Facts] *********************************************************", - "ok: [testhost]", - "", - "TASK [Create file] *************************************************************", - "changed: [testhost]", - "", - "TASK [Modify file] *************************************************************", - "changed: [testhost]", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=3 changed=2 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ", - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Gathering Facts] *********************************************************" + - "ok: [testhost]" + - "" + - "TASK [Create file] *************************************************************" + - "changed: [testhost]" + - "" + - "TASK [Modify file] *************************************************************" + - "changed: [testhost]" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=3 changed=2 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " always: - name: Clean up temp file diff --git a/tests/integration/targets/callback_diy/tasks/main.yml b/tests/integration/targets/callback_diy/tasks/main.yml index fa468b52ba..9520ac5d55 100644 --- a/tests/integration/targets/callback_diy/tasks/main.yml +++ b/tests/integration/targets/callback_diy/tasks/main.yml @@ -25,18 +25,17 @@ - name: Sample task name debug: msg: sample debug msg - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "ok: [testhost] => {", - " \"msg\": \"sample debug msg\"", - "}", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "ok: [testhost] => {" + - " \"msg\": \"sample debug msg\"" + - "}" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - name: Set playbook_on_start_msg callback using environment variable environment: @@ -51,19 +50,18 @@ - name: Sample task name debug: msg: sample debug msg - expected_output: [ - "Sample output Sample playbook message", - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "ok: [testhost] => {", - " \"msg\": \"sample debug msg\"", - "}", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "Sample output Sample playbook message" + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "ok: [testhost] => {" + - " \"msg\": \"sample debug msg\"" + - "}" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - name: Set playbook_on_play_start_msg callback using play variable environment: @@ -80,17 +78,16 @@ - name: Sample task name debug: msg: sample debug msg - expected_output: [ - "Sample output Sample play name", - "", - "TASK [Sample task name] ********************************************************", - "ok: [testhost] => {", - " \"msg\": \"sample debug msg\"", - "}", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "Sample output Sample play name" + - "" + - "TASK [Sample task name] ********************************************************" + - "ok: [testhost] => {" + - " \"msg\": \"sample debug msg\"" + - "}" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - name: Set playbook_on_task_start_msg callback using play variable environment: @@ -106,17 +103,16 @@ - name: Sample task name debug: msg: sample debug msg - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "Sample output Sample task name", - "ok: [testhost] => {", - " \"msg\": \"sample debug msg\"", - "}", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "Sample output Sample task name" + - "ok: [testhost] => {" + - " \"msg\": \"sample debug msg\"" + - "}" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - name: Set playbook_on_task_start_msg callback using task variable environment: @@ -132,17 +128,16 @@ msg: sample debug msg vars: ansible_callback_diy_playbook_on_task_start_msg: Sample output {{ ansible_callback_diy.task.name }} - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "Sample output Sample task name", - "ok: [testhost] => {", - " \"msg\": \"sample debug msg\"", - "}", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "Sample output Sample task name" + - "ok: [testhost] => {" + - " \"msg\": \"sample debug msg\"" + - "}" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - name: Set runner_on_ok_msg callback using task variable environment: @@ -158,16 +153,15 @@ msg: sample debug msg vars: ansible_callback_diy_runner_on_ok_msg: Sample output {{ ansible_callback_diy.result.output.msg }} - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "Sample output sample debug msg", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "Sample output sample debug msg" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - name: Set runner_on_failed_msg callback using task variable environment: @@ -185,16 +179,15 @@ ignore_errors: true vars: ansible_callback_diy_runner_on_failed_msg: Sample output Sample failure message - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "Sample output Sample failure message", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "Sample output Sample failure message" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 " - name: Set runner_on_skipped_msg callback using task variable environment: @@ -211,16 +204,15 @@ when: false vars: ansible_callback_diy_runner_on_skipped_msg: Sample output Skipped {{ ansible_callback_diy.task.name }} - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "Sample output Skipped Sample task name", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=0 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "Sample output Skipped Sample task name" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=0 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 " - name: Set runner_item_on_ok_msg callback using task variable environment: @@ -240,18 +232,17 @@ - sample item 3 vars: ansible_callback_diy_runner_item_on_ok_msg: Sample output Looping {{ ansible_callback_diy.result.output.msg }} - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "Sample output Looping sample debug msg sample item 1", - "Sample output Looping sample debug msg sample item 2", - "Sample output Looping sample debug msg sample item 3", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "Sample output Looping sample debug msg sample item 1" + - "Sample output Looping sample debug msg sample item 2" + - "Sample output Looping sample debug msg sample item 3" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - name: Set runner_item_on_failed_msg callback using task variable environment: @@ -273,28 +264,25 @@ ignore_errors: true vars: ansible_callback_diy_runner_item_on_failed_msg: Sample output Looping sample failure message - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "ok: [testhost] => (item=sample item 1) => {", - " \"msg\": \"sample debug msg sample item 1\"", - "}", - "Sample output Looping sample failure message", - "ok: [testhost] => (item=sample item 3) => {", - " \"msg\": \"sample debug msg sample item 3\"", - "}", - [ - # Apparently a bug was fixed in Ansible, as before it ran through with "All items completed" - "fatal: [testhost]: FAILED! => {\"msg\": \"All items completed\"}", - "fatal: [testhost]: FAILED! => {\"msg\": \"One or more items failed\"}", - ], - "...ignoring", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "ok: [testhost] => (item=sample item 1) => {" + - " \"msg\": \"sample debug msg sample item 1\"" + - "}" + - "Sample output Looping sample failure message" + - "ok: [testhost] => (item=sample item 3) => {" + - " \"msg\": \"sample debug msg sample item 3\"" + - "}" + - # Apparently a bug was fixed in Ansible, as before it ran through with "All items completed" + - "fatal: [testhost]: FAILED! => {\"msg\": \"All items completed\"}" + - "fatal: [testhost]: FAILED! => {\"msg\": \"One or more items failed\"}" + - "...ignoring" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 " - name: Set runner_item_on_skipped_msg callback using task variable environment: @@ -315,22 +303,21 @@ when: item != 'sample item 2' vars: ansible_callback_diy_runner_item_on_skipped_msg: Sample output Looping Skipped {{ ansible_callback_diy.result.output.item }} - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "ok: [testhost] => (item=sample item 1) => {", - " \"msg\": \"sample debug msg sample item 1\"", - "}", - "Sample output Looping Skipped sample item 2", - "ok: [testhost] => (item=sample item 3) => {", - " \"msg\": \"sample debug msg sample item 3\"", - "}", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "ok: [testhost] => (item=sample item 1) => {" + - " \"msg\": \"sample debug msg sample item 1\"" + - "}" + - "Sample output Looping Skipped sample item 2" + - "ok: [testhost] => (item=sample item 3) => {" + - " \"msg\": \"sample debug msg sample item 3\"" + - "}" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - name: Set playbook_on_stats_msg callback using play variable environment: @@ -371,20 +358,19 @@ - name: Sample task name debug: msg: sample debug msg - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "ok: [testhost] => {", - " \"msg\": \"sample debug msg\"", - "}", - " Sample output stats", - "===============================", - " ok : testhost: 1", - "", - " processed : testhost: 1" - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "ok: [testhost] => {" + - " \"msg\": \"sample debug msg\"" + - "}" + - " Sample output stats" + - "===============================" + - " ok : testhost: 1" + - "" + - " processed : testhost: 1" - name: Suppress output on playbook_on_task_start_msg callback using task variable environment: @@ -400,16 +386,15 @@ msg: sample debug msg vars: ansible_callback_diy_playbook_on_task_start_msg: '' - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "ok: [testhost] => {", - " \"msg\": \"sample debug msg\"", - "}", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "ok: [testhost] => {" + - " \"msg\": \"sample debug msg\"" + - "}" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - name: Suppress output on runner_on_ok_msg callback using task variable environment: @@ -425,15 +410,14 @@ msg: sample debug msg vars: ansible_callback_diy_runner_on_ok_msg: '' - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - name: Set runner_on_ok_msg_color using task variable environment: @@ -450,13 +434,12 @@ vars: ansible_callback_diy_runner_on_ok_msg: Sample output {{ ansible_callback_diy.result.output.msg }} ansible_callback_diy_runner_on_ok_msg_color: blue - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "Sample output sample debug msg", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "Sample output sample debug msg" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " diff --git a/tests/integration/targets/callback_log_plays/runme.sh b/tests/integration/targets/callback_log_plays/runme.sh index 88eea16266..54e1c1938f 100755 --- a/tests/integration/targets/callback_log_plays/runme.sh +++ b/tests/integration/targets/callback_log_plays/runme.sh @@ -17,5 +17,5 @@ ansible-playbook ping_log.yml -v "$@" # now force it to fail export ANSIBLE_LOG_FOLDER="logit.file" touch "${ANSIBLE_LOG_FOLDER}" -ansible-playbook ping_log.yml -v "$@" 2>&1| grep 'Failure using method (v2_runner_on_ok) in callback plugin' +ansible-playbook ping_log.yml -v "$@" 2>&1| grep -E "(Failure using method \(v2_runner_on_ok\) in callback plugin|Callback dispatch 'v2_runner_on_ok' failed for plugin)" [[ ! -f "${ANSIBLE_LOG_FOLDER}/localhost" ]] diff --git a/tests/integration/targets/callback_print_task/aliases b/tests/integration/targets/callback_print_task/aliases new file mode 100644 index 0000000000..3e2dd244c1 --- /dev/null +++ b/tests/integration/targets/callback_print_task/aliases @@ -0,0 +1,6 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/3 +needs/target/callback diff --git a/tests/integration/targets/callback_print_task/tasks/main.yml b/tests/integration/targets/callback_print_task/tasks/main.yml new file mode 100644 index 0000000000..0324a9d698 --- /dev/null +++ b/tests/integration/targets/callback_print_task/tasks/main.yml @@ -0,0 +1,128 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Run tests + include_role: + name: callback + vars: + tests: + - name: community.general.print_task is not enabled + environment: + ANSIBLE_NOCOLOR: 'true' + ANSIBLE_FORCE_COLOR: 'false' + playbook: | + - hosts: testhost + gather_facts: false + tasks: + - name: Sample task + debug: + msg: This is a test + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task] *************************************************************" + - "ok: [testhost] => {" + - " \"msg\": \"This is a test\"" + - "}" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " + + - name: community.general.print_task is enabled + environment: + ANSIBLE_NOCOLOR: 'true' + ANSIBLE_FORCE_COLOR: 'false' + ANSIBLE_CALLBACKS_ENABLED: 'community.general.print_task' + playbook: | + - hosts: testhost + gather_facts: false + tasks: + - name: Sample task + debug: + msg: This is a test + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task] *************************************************************" + - "" + - "- name: Sample task" + - " debug:" + - " msg: This is a test" + - "" + - "ok: [testhost] => {" + - " \"msg\": \"This is a test\"" + - "}" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " + + - name: Print with msg parameter on the same line + environment: + ANSIBLE_NOCOLOR: 'true' + ANSIBLE_FORCE_COLOR: 'false' + ANSIBLE_CALLBACKS_ENABLED: 'community.general.print_task' + playbook: | + - hosts: testhost + gather_facts: false + tasks: + - name: Sample task + debug: msg="This is a test" + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task] *************************************************************" + - "" + - "- name: Sample task" + - " debug: msg=\"This is a test\"" + - "" + - "ok: [testhost] => {" + - " \"msg\": \"This is a test\"" + - "}" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " + + - name: Task with additional parameters + environment: + ANSIBLE_NOCOLOR: 'true' + ANSIBLE_FORCE_COLOR: 'false' + ANSIBLE_CALLBACKS_ENABLED: 'community.general.print_task' + playbook: | + - hosts: testhost + gather_facts: false + tasks: + - name: Sample task + when: True + vars: + test_var: "Hello World" + debug: + var: test_var + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task] *************************************************************" + - "" + - "- name: Sample task" + - " when: true" + - " vars:" + - " test_var: Hello World" + - " debug:" + - " var: test_var" + - "" + - "ok: [testhost] => {" + - " \"test_var\": \"Hello World\"" + - "}" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " diff --git a/tests/integration/targets/callback_tasks_only/aliases b/tests/integration/targets/callback_tasks_only/aliases new file mode 100644 index 0000000000..3e2dd244c1 --- /dev/null +++ b/tests/integration/targets/callback_tasks_only/aliases @@ -0,0 +1,6 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/3 +needs/target/callback diff --git a/tests/integration/targets/callback_tasks_only/tasks/main.yml b/tests/integration/targets/callback_tasks_only/tasks/main.yml new file mode 100644 index 0000000000..b02ddc8efc --- /dev/null +++ b/tests/integration/targets/callback_tasks_only/tasks/main.yml @@ -0,0 +1,79 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- block: + - name: Create temporary file + tempfile: + register: tempfile + + - name: Run tests + include_role: + name: callback + vars: + tests: + - name: Simple test + environment: + ANSIBLE_NOCOLOR: 'true' + ANSIBLE_FORCE_COLOR: 'false' + ANSIBLE_PYTHON_INTERPRETER: "{{ ansible_python_interpreter }}" + ANSIBLE_STDOUT_CALLBACK: community.general.tasks_only + playbook: | + - hosts: testhost + gather_facts: true + tasks: + - name: Create file + copy: + dest: "{{ tempfile.path }}" + content: | + Foo bar + + - name: Modify file + copy: + dest: "{{ tempfile.path }}" + content: | + Foo bar + Bar baz bam! + expected_output: + - "" + - "TASK [Gathering Facts] *********************************************************" + - "ok: [testhost]" + - "" + - "TASK [Create file] *************************************************************" + - "changed: [testhost]" + - "" + - "TASK [Modify file] *************************************************************" + - "changed: [testhost]" + - name: Different column width + environment: + ANSIBLE_NOCOLOR: 'true' + ANSIBLE_FORCE_COLOR: 'false' + ANSIBLE_PYTHON_INTERPRETER: "{{ ansible_python_interpreter }}" + ANSIBLE_STDOUT_CALLBACK: community.general.tasks_only + ANSIBLE_COLLECTIONS_TASKS_ONLY_NUMBER_OF_COLUMNS: 40 + playbook: | + - hosts: testhost + gather_facts: false + tasks: + - name: A task + debug: + msg: Test. + expected_output: + - "" + - "TASK [A task] ***************************" + - "ok: [testhost] => {" + - ' "msg": "Test."' + - "}" + + + always: + - name: Clean up temp file + file: + path: "{{ tempfile.path }}" + state: absent diff --git a/tests/integration/targets/callback_timestamp/tasks/main.yml b/tests/integration/targets/callback_timestamp/tasks/main.yml index 5e0acc15f0..41681a5f42 100644 --- a/tests/integration/targets/callback_timestamp/tasks/main.yml +++ b/tests/integration/targets/callback_timestamp/tasks/main.yml @@ -26,18 +26,17 @@ - name: Sample task name debug: msg: sample debug msg - expected_output: [ - "", - "PLAY [testhost] ******************************************************* 15:04:05", - "", - "TASK [Sample task name] *********************************************** 15:04:05", - "ok: [testhost] => {", - " \"msg\": \"sample debug msg\"", - "}", - "", - "PLAY RECAP ************************************************************ 15:04:05", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ******************************************************* 15:04:05" + - "" + - "TASK [Sample task name] *********************************************** 15:04:05" + - "ok: [testhost] => {" + - " \"msg\": \"sample debug msg\"" + - "}" + - "" + - "PLAY RECAP ************************************************************ 15:04:05" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - name: Enable timestamp in the longer length environment: @@ -52,15 +51,14 @@ - name: Sample task name debug: msg: sample debug msg - expected_output: [ - "", - "PLAY [testhost] ******************************************** 2006-01-02T15:04:05", - "", - "TASK [Sample task name] ************************************ 2006-01-02T15:04:05", - "ok: [testhost] => {", - " \"msg\": \"sample debug msg\"", - "}", - "", - "PLAY RECAP ************************************************* 2006-01-02T15:04:05", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ******************************************** 2006-01-02T15:04:05" + - "" + - "TASK [Sample task name] ************************************ 2006-01-02T15:04:05" + - "ok: [testhost] => {" + - " \"msg\": \"sample debug msg\"" + - "}" + - "" + - "PLAY RECAP ************************************************* 2006-01-02T15:04:05" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " diff --git a/tests/integration/targets/callback_yaml/meta/main.yml b/tests/integration/targets/callback_yaml/meta/main.yml new file mode 100644 index 0000000000..982de6eb03 --- /dev/null +++ b/tests/integration/targets/callback_yaml/meta/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/callback_yaml/tasks/main.yml b/tests/integration/targets/callback_yaml/tasks/main.yml index f3c36663da..8e286e45f4 100644 --- a/tests/integration/targets/callback_yaml/tasks/main.yml +++ b/tests/integration/targets/callback_yaml/tasks/main.yml @@ -8,6 +8,11 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later +- name: Write vault password to disk + ansible.builtin.copy: + dest: "{{ remote_tmp_dir }}/vault-password" + content: asdf + - name: Run tests include_role: name: callback @@ -25,17 +30,17 @@ - name: Sample task name debug: msg: sample debug msg - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Sample task name] ********************************************************", - "ok: [testhost] => ", - " msg: sample debug msg", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Sample task name] ********************************************************" + - "ok: [testhost] => " + - " msg: sample debug msg" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " + - name: Test umlauts in multiline environment: ANSIBLE_NOCOLOR: 'true' @@ -48,26 +53,26 @@ - name: Umlaut output debug: msg: "äöü\néêè\nßï☺" - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Umlaut output] ***********************************************************", - "ok: [testhost] => ", - " msg: |-", - " äöü", - " éêè", - " ßï☺", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Umlaut output] ***********************************************************" + - "ok: [testhost] => " + - " msg: |-" + - " äöü" + - " éêè" + - " ßï☺" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " + - name: Test to_yaml environment: ANSIBLE_NOCOLOR: 'true' ANSIBLE_FORCE_COLOR: 'false' ANSIBLE_STDOUT_CALLBACK: community.general.yaml - playbook: | + playbook: !unsafe | - hosts: testhost gather_facts: false vars: @@ -78,24 +83,61 @@ tasks: - name: Test to_yaml debug: - msg: "{{ '{{' }}'{{ '{{' }}'{{ '}}' }} data | to_yaml {{ '{{' }}'{{ '}}' }}'{{ '}}' }}" - # The above should be: msg: "{{ data | to_yaml }}" - # Unfortunately, the way Ansible handles templating, we need to do some funny 'escaping' tricks... - expected_output: [ - "", - "PLAY [testhost] ****************************************************************", - "", - "TASK [Test to_yaml] ************************************************************", - "ok: [testhost] => ", - " msg: |-", - " 'line 1", - " ", - " line 2", - " ", - " line 3", - " ", - " '", - "", - "PLAY RECAP *********************************************************************", - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " - ] + msg: "{{ data | to_yaml }}" + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Test to_yaml] ************************************************************" + - "ok: [testhost] => " + - " msg: |-" + - " 'line 1" + - " " + - " line 2" + - " " + - " line 3" + - " " + - " '" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " + - name: Some more fun with data tagging + environment: + ANSIBLE_NOCOLOR: 'true' + ANSIBLE_FORCE_COLOR: 'false' + ANSIBLE_STDOUT_CALLBACK: community.general.yaml + extra_cli_arguments: "--vault-password-file {{ remote_tmp_dir }}/vault-password" + playbook: !unsafe | + - hosts: testhost + gather_facts: false + vars: + foo: bar + baz: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 30393064316433636636373336363538663034643135363938646665393661353833633865313765 + 3835366434646339313337663335393865336163663434310a316161313662666466333332353731 + 64663064366461643162666137303737643164376134303034306366383830336232363837636638 + 3830653338626130360a313639623231353931356563313065373661303262646337383534663932 + 64353461663065333362346264326335373032313333343539646661656634653138646332313639 + 3566313765626464613734623664663266336237646139373935 + tasks: + - name: Test regular string + debug: + var: foo + - name: Test vaulted string + debug: + var: baz + expected_output: + - "" + - "PLAY [testhost] ****************************************************************" + - "" + - "TASK [Test regular string] *****************************************************" + - "ok: [testhost] => " + - " foo: bar" + - "" + - "TASK [Test vaulted string] *****************************************************" + - "ok: [testhost] => " + - " baz: aBcDeFgHiJkLmNoPqRsTuVwXyZ012345" + - "" + - "PLAY RECAP *********************************************************************" + - "testhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 " diff --git a/tests/integration/targets/cargo/tasks/main.yml b/tests/integration/targets/cargo/tasks/main.yml index 89f13960a6..f28b459864 100644 --- a/tests/integration/targets/cargo/tasks/main.yml +++ b/tests/integration/targets/cargo/tasks/main.yml @@ -14,10 +14,10 @@ CARGO_NET_GIT_FETCH_WITH_CLI: "true" when: has_cargo | default(false) and ansible_distribution == 'Alpine' - block: - - import_tasks: test_general.yml - - import_tasks: test_version.yml - - import_tasks: test_directory.yml + - import_tasks: test_general.yml + - import_tasks: test_version.yml + - import_tasks: test_directory.yml environment: "{{ cargo_environment }}" when: has_cargo | default(false) - import_tasks: test_rustup_cargo.yml - when: rustup_cargo_bin | default(false) + when: (rustup_cargo_bin | default(false)) is truthy diff --git a/tests/integration/targets/cargo/tasks/setup.yml b/tests/integration/targets/cargo/tasks/setup.yml index 7eec97ac4c..6a98494106 100644 --- a/tests/integration/targets/cargo/tasks/setup.yml +++ b/tests/integration/targets/cargo/tasks/setup.yml @@ -4,12 +4,12 @@ # SPDX-License-Identifier: GPL-3.0-or-later - block: - - name: Install cargo - package: - name: cargo - state: present - - set_fact: - has_cargo: true + - name: Install cargo + package: + name: cargo + state: present + - set_fact: + has_cargo: true when: - ansible_system != 'FreeBSD' - ansible_distribution != 'MacOSX' @@ -18,25 +18,25 @@ - ansible_distribution != 'Ubuntu' or ansible_distribution_version is version('18', '>=') - block: - - name: Install rust (containing cargo) - package: - name: rust - state: present - - set_fact: - has_cargo: true + - name: Install rust (containing cargo) + package: + name: rust + state: present + - set_fact: + has_cargo: true when: - ansible_system == 'FreeBSD' and ansible_distribution_version is version('13.0', '>') - block: - - name: Download rustup - get_url: - url: https://sh.rustup.rs - dest: /tmp/sh.rustup.rs - mode: "0750" - force: true - - name: Install rustup cargo - command: /tmp/sh.rustup.rs -y - - set_fact: - rustup_cargo_bin: "{{ lookup('env', 'HOME') }}/.cargo/bin/cargo" + - name: Download rustup + get_url: + url: https://sh.rustup.rs + dest: /tmp/sh.rustup.rs + mode: "0750" + force: true + - name: Install rustup cargo + command: /tmp/sh.rustup.rs -y + - set_fact: + rustup_cargo_bin: "{{ lookup('env', 'HOME') }}/.cargo/bin/cargo" when: - ansible_distribution != 'CentOS' or ansible_distribution_version is version('7.0', '>=') diff --git a/tests/integration/targets/cargo/tasks/test_directory.yml b/tests/integration/targets/cargo/tasks/test_directory.yml index f4275ede68..b0b7120388 100644 --- a/tests/integration/targets/cargo/tasks/test_directory.yml +++ b/tests/integration/targets/cargo/tasks/test_directory.yml @@ -24,7 +24,7 @@ path: "{{ manifest_path }}" regexp: '^version = ".*"$' line: 'version = "1.0.0"' - + - name: Ensure package is uninstalled community.general.cargo: name: "{{ package_name }}" diff --git a/tests/integration/targets/cargo/tasks/test_general.yml b/tests/integration/targets/cargo/tasks/test_general.yml index 2bffa08f0d..07e96cd4ac 100644 --- a/tests/integration/targets/cargo/tasks/test_general.yml +++ b/tests/integration/targets/cargo/tasks/test_general.yml @@ -29,7 +29,7 @@ - name: Check assertions helloworld assert: that: - - uninstall_absent_helloworld is not changed - - install_absent_helloworld is changed - - install_present_helloworld is not changed - - uninstall_present_helloworld is changed + - uninstall_absent_helloworld is not changed + - install_absent_helloworld is changed + - install_present_helloworld is not changed + - uninstall_present_helloworld is changed diff --git a/tests/integration/targets/cargo/tasks/test_rustup_cargo.yml b/tests/integration/targets/cargo/tasks/test_rustup_cargo.yml index ec2cf6e6de..638dd2600a 100644 --- a/tests/integration/targets/cargo/tasks/test_rustup_cargo.yml +++ b/tests/integration/targets/cargo/tasks/test_rustup_cargo.yml @@ -19,5 +19,5 @@ - name: Check assertions helloworld assert: that: - - rustup_install_absent_helloworld is changed - - rustup_uninstall_present_helloworld is changed + - rustup_install_absent_helloworld is changed + - rustup_uninstall_present_helloworld is changed diff --git a/tests/integration/targets/cargo/tasks/test_version.yml b/tests/integration/targets/cargo/tasks/test_version.yml index c1ab8e198d..701f23e1f5 100644 --- a/tests/integration/targets/cargo/tasks/test_version.yml +++ b/tests/integration/targets/cargo/tasks/test_version.yml @@ -42,9 +42,9 @@ - name: Check assertions helloworld-yliu assert: that: - - install_helloworld_010 is changed - - install_helloworld_010_idem is not changed - - upgrade_helloworld_010 is changed - - upgrade_helloworld_010_idem is not changed - - downgrade_helloworld_010 is changed - - downgrade_helloworld_010_idem is not changed + - install_helloworld_010 is changed + - install_helloworld_010_idem is not changed + - upgrade_helloworld_010 is changed + - upgrade_helloworld_010_idem is not changed + - downgrade_helloworld_010 is changed + - downgrade_helloworld_010_idem is not changed diff --git a/tests/integration/targets/cloud_init_data_facts/tasks/main.yml b/tests/integration/targets/cloud_init_data_facts/tasks/main.yml index 2b67b5c174..71161603f9 100644 --- a/tests/integration/targets/cloud_init_data_facts/tasks/main.yml +++ b/tests/integration/targets/cloud_init_data_facts/tasks/main.yml @@ -25,53 +25,53 @@ # Will also have to skip on OpenSUSE when running on Python 2 on newer Leap versions # (!= 42 and >= 15) as cloud-init will install the Python 3 package, breaking our build on py2. when: - - not (ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int == 14) - - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and ansible_python.version.major != 3) - - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int == 15) - - not (ansible_distribution == "CentOS" and ansible_distribution_major_version|int == 8) # TODO: cannot start service - - not (ansible_distribution == 'Archlinux') # TODO: package seems to be broken, cannot be downloaded from mirrors? - - not (ansible_distribution == 'Alpine') # TODO: not sure what's wrong here, the module doesn't return what the tests expect + - not (ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int == 14) + - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and ansible_python.version.major != 3) + - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int == 15) + - not (ansible_distribution == "CentOS" and ansible_distribution_major_version|int == 8) # TODO: cannot start service + - not (ansible_distribution == 'Archlinux') # TODO: package seems to be broken, cannot be downloaded from mirrors? + - not (ansible_distribution == 'Alpine') # TODO: not sure what's wrong here, the module doesn't return what the tests expect block: - - name: setup install cloud-init - package: - name: - - cloud-init - - udev + - name: setup install cloud-init + package: + name: + - cloud-init + - udev - - name: Ensure systemd-network user exists - user: - name: systemd-network - state: present - when: ansible_distribution == 'Fedora' and ansible_distribution_major_version|int >= 37 + - name: Ensure systemd-network user exists + user: + name: systemd-network + state: present + when: ansible_distribution == 'Fedora' and ansible_distribution_major_version|int >= 37 - - name: setup run cloud-init - service: - name: cloud-init-local - state: restarted + - name: setup run cloud-init + service: + name: cloud-init-local + state: restarted - - name: test gather cloud-init facts in check mode - cloud_init_data_facts: - check_mode: true - register: result - - name: verify test gather cloud-init facts in check mode - assert: - that: - - result.cloud_init_data_facts.status.v1 is defined - - result.cloud_init_data_facts.status.v1.stage is defined - - not result.cloud_init_data_facts.status.v1.stage - - cloud_init_data_facts.status.v1 is defined - - cloud_init_data_facts.status.v1.stage is defined - - not cloud_init_data_facts.status.v1.stage + - name: test gather cloud-init facts in check mode + cloud_init_data_facts: + check_mode: true + register: result + - name: verify test gather cloud-init facts in check mode + assert: + that: + - result.cloud_init_data_facts.status.v1 is defined + - result.cloud_init_data_facts.status.v1.stage is defined + - not result.cloud_init_data_facts.status.v1.stage + - cloud_init_data_facts.status.v1 is defined + - cloud_init_data_facts.status.v1.stage is defined + - not cloud_init_data_facts.status.v1.stage - - name: test gather cloud-init facts - cloud_init_data_facts: - register: result - - name: verify test gather cloud-init facts - assert: - that: - - result.cloud_init_data_facts.status.v1 is defined - - result.cloud_init_data_facts.status.v1.stage is defined - - not result.cloud_init_data_facts.status.v1.stage - - cloud_init_data_facts.status.v1 is defined - - cloud_init_data_facts.status.v1.stage is defined - - not cloud_init_data_facts.status.v1.stage + - name: test gather cloud-init facts + cloud_init_data_facts: + register: result + - name: verify test gather cloud-init facts + assert: + that: + - result.cloud_init_data_facts.status.v1 is defined + - result.cloud_init_data_facts.status.v1.stage is defined + - not result.cloud_init_data_facts.status.v1.stage + - cloud_init_data_facts.status.v1 is defined + - cloud_init_data_facts.status.v1.stage is defined + - not cloud_init_data_facts.status.v1.stage diff --git a/tests/integration/targets/cmd_runner/action_plugins/_unsafe_assert.py b/tests/integration/targets/cmd_runner/action_plugins/_unsafe_assert.py index 498e8258d0..a25e8aa38c 100644 --- a/tests/integration/targets/cmd_runner/action_plugins/_unsafe_assert.py +++ b/tests/integration/targets/cmd_runner/action_plugins/_unsafe_assert.py @@ -9,6 +9,12 @@ from ansible.errors import AnsibleError from ansible.playbook.conditional import Conditional from ansible.plugins.action import ActionBase +try: + from ansible.utils.datatag import trust_value as _trust_value +except ImportError: + def _trust_value(input): + return input + class ActionModule(ActionBase): ''' Fail with custom message ''' @@ -36,12 +42,16 @@ class ActionModule(ActionBase): thats = self._task.args['that'] - cond = Conditional(loader=self._loader) result['_ansible_verbose_always'] = True for that in thats: - cond.when = [str(self._make_safe(that))] - test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars) + if hasattr(self._templar, 'evaluate_conditional'): + trusted_that = _trust_value(that) if _trust_value else that + test_result = self._templar.evaluate_conditional(conditional=trusted_that) + else: + cond = Conditional(loader=self._loader) + cond.when = [str(self._make_safe(that))] + test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars) if not test_result: result['failed'] = True result['evaluated_to'] = test_result diff --git a/tests/integration/targets/cmd_runner/library/cmd_echo.py b/tests/integration/targets/cmd_runner/library/cmd_echo.py index ec0beb98e7..1dda75ec93 100644 --- a/tests/integration/targets/cmd_runner/library/cmd_echo.py +++ b/tests/integration/targets/cmd_runner/library/cmd_echo.py @@ -7,6 +7,8 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type +import traceback + DOCUMENTATION = "" @@ -43,15 +45,18 @@ def main(): arg_formats[arg] = func(*args) - runner = CmdRunner(module, [module.params["cmd"], '--'], arg_formats=arg_formats, path_prefix=module.params["path_prefix"]) + try: + runner = CmdRunner(module, [module.params["cmd"], '--'], arg_formats=arg_formats, path_prefix=module.params["path_prefix"]) - with runner.context(p['arg_order'], check_mode_skip=p['check_mode_skip']) as ctx: - result = ctx.run(**p['arg_values']) - info = ctx.run_info - check = "check" - rc, out, err = result if result is not None else (None, None, None) + with runner.context(p['arg_order'], check_mode_skip=p['check_mode_skip']) as ctx: + result = ctx.run(**p['arg_values']) + info = ctx.run_info + check = "check" + rc, out, err = result if result is not None else (None, None, None) - module.exit_json(rc=rc, out=out, err=err, info=info) + module.exit_json(rc=rc, out=out, err=err, info=info) + except Exception as exc: + module.fail_json(rc=1, module_stderr=traceback.format_exc(), msg="Module crashed with exception") if __name__ == '__main__': diff --git a/tests/integration/targets/cmd_runner/vars/main.yml b/tests/integration/targets/cmd_runner/vars/main.yml index 40c8d10af6..7bce9328ee 100644 --- a/tests/integration/targets/cmd_runner/vars/main.yml +++ b/tests/integration/targets/cmd_runner/vars/main.yml @@ -253,3 +253,5 @@ cmd_echo_tests: assertions: - > "No such file or directory" in test_result.msg + or + "Error executing command." == test_result.msg diff --git a/tests/integration/targets/connection/test_connection.yml b/tests/integration/targets/connection/test_connection.yml index bb0a993995..7f8b5697ce 100644 --- a/tests/integration/targets/connection/test_connection.yml +++ b/tests/integration/targets/connection/test_connection.yml @@ -8,41 +8,41 @@ serial: 1 tasks: - ### raw with unicode arg and output + ### raw with unicode arg and output - - name: raw with unicode arg and output - raw: echo 汉语 - register: command - - name: check output of raw with unicode arg and output - assert: - that: - - "'汉语' in command.stdout" - - command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules + - name: raw with unicode arg and output + raw: echo 汉语 + register: command + - name: check output of raw with unicode arg and output + assert: + that: + - "'汉语' in command.stdout" + - command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules - ### copy local file with unicode filename and content + ### copy local file with unicode filename and content - - name: create local file with unicode filename and content - local_action: lineinfile dest={{ local_tmp }}-汉语/汉语.txt create=true line=汉语 - - name: remove remote file with unicode filename and content - action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语/汉语.txt state=absent" - - name: create remote directory with unicode name - action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=directory" - - name: copy local file with unicode filename and content - action: "{{ action_prefix }}copy src={{ local_tmp }}-汉语/汉语.txt dest={{ remote_tmp }}-汉语/汉语.txt" + - name: create local file with unicode filename and content + local_action: lineinfile dest={{ local_tmp }}-汉语/汉语.txt create=true line=汉语 + - name: remove remote file with unicode filename and content + action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语/汉语.txt state=absent" + - name: create remote directory with unicode name + action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=directory" + - name: copy local file with unicode filename and content + action: "{{ action_prefix }}copy src={{ local_tmp }}-汉语/汉语.txt dest={{ remote_tmp }}-汉语/汉语.txt" - ### fetch remote file with unicode filename and content + ### fetch remote file with unicode filename and content - - name: remove local file with unicode filename and content - local_action: file path={{ local_tmp }}-汉语/汉语.txt state=absent - - name: fetch remote file with unicode filename and content - fetch: src={{ remote_tmp }}-汉语/汉语.txt dest={{ local_tmp }}-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true + - name: remove local file with unicode filename and content + local_action: file path={{ local_tmp }}-汉语/汉语.txt state=absent + - name: fetch remote file with unicode filename and content + fetch: src={{ remote_tmp }}-汉语/汉语.txt dest={{ local_tmp }}-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true - ### remove local and remote temp files + ### remove local and remote temp files - - name: remove local temp file - local_action: file path={{ local_tmp }}-汉语 state=absent - - name: remove remote temp file - action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=absent" + - name: remove local temp file + local_action: file path={{ local_tmp }}-汉语 state=absent + - name: remove remote temp file + action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=absent" - ### test wait_for_connection plugin - - ansible.builtin.wait_for_connection: + ### test wait_for_connection plugin + - ansible.builtin.wait_for_connection: diff --git a/tests/integration/targets/connection_wsl/aliases b/tests/integration/targets/connection_wsl/aliases new file mode 100644 index 0000000000..aeb39c141a --- /dev/null +++ b/tests/integration/targets/connection_wsl/aliases @@ -0,0 +1,11 @@ +# Derived from ../connection_proxmox_pct_remote/aliases Copyright (c) 2025 Nils Stein (@mietzen) +# Copyright (c) 2025 Rui Lopes (@rgl) +# Copyright (c) 2025 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/3 +destructive +needs/root +needs/target/connection +skip/macos diff --git a/tests/integration/targets/connection_wsl/dependencies.yml b/tests/integration/targets/connection_wsl/dependencies.yml new file mode 100644 index 0000000000..fdf36f5e95 --- /dev/null +++ b/tests/integration/targets/connection_wsl/dependencies.yml @@ -0,0 +1,19 @@ +--- +# Derived from ../connection_proxmox_pct_remote/dependencies.yml Copyright (c) 2025 Nils Stein (@mietzen) +# Copyright (c) 2025 Rui Lopes (@rgl) +# Copyright (c) 2025 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- hosts: localhost + gather_facts: true + serial: 1 + tasks: + - name: Copy wsl.exe mock + copy: + src: files/wsl.exe + dest: /usr/local/bin/wsl.exe + mode: '0755' + - name: Install paramiko + pip: + name: "paramiko>=3.0.0" diff --git a/tests/integration/targets/connection_wsl/files/wsl.exe b/tests/integration/targets/connection_wsl/files/wsl.exe new file mode 100755 index 0000000000..0c6aafaf0f --- /dev/null +++ b/tests/integration/targets/connection_wsl/files/wsl.exe @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# Derived from ../../connection_proxmox_pct_remote/files/pct Copyright (c) 2025 Nils Stein (@mietzen) +# Copyright (c) 2025 Rui Lopes (@rgl) +# Copyright (c) 2025 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Shell script to mock wsl.exe behavior + +set -euo pipefail + +function quote_args { + local quoted_args=() + for arg in "$@"; do + if [[ -z "$arg" || "$arg" =~ [^a-zA-Z0-9@%+=:,./-] ]]; then + local escaped_arg=${arg//\'/\'\\\'\'} + quoted_args+=("'$escaped_arg'") + else + quoted_args+=("$arg") + fi + done + echo -n "${quoted_args[@]}" +} + +declare -a mock_args=() +declare -a cmd_args=() +wsl_distribution="" +wsl_user="" + +while [[ $# -gt 0 ]]; do + case $1 in + --distribution|-d) + wsl_distribution="$2" + mock_args+=("$1" "$2") + shift 2 + ;; + --user|-u) + wsl_user="$2" + mock_args+=("$1" "$2") + shift 2 + ;; + --) + mock_args+=("$1") + shift + while [[ $# -gt 0 ]]; do + mock_args+=("$1") + cmd_args+=("$1") + shift + done + ;; + *) + >&2 echo "unexpected args: $@" + exit 1 + ;; + esac +done + +mock_cmd="wsl.exe $(quote_args "${mock_args[@]}")" +cmd="$(quote_args "${cmd_args[@]}")" + +>&2 echo "[INFO] MOCKING: $mock_cmd" +>&2 echo "[INFO] CMD: $cmd" + +tmp_dir="/tmp/ansible-remote/wsl/integration_test/wsl_distribution_${wsl_distribution}" + +mkdir -p "$tmp_dir" + +pushd "$tmp_dir" >/dev/null + +eval "$cmd" + +popd >/dev/null diff --git a/tests/integration/targets/connection_wsl/plugin-specific-tests.yml b/tests/integration/targets/connection_wsl/plugin-specific-tests.yml new file mode 100644 index 0000000000..e7d7434b81 --- /dev/null +++ b/tests/integration/targets/connection_wsl/plugin-specific-tests.yml @@ -0,0 +1,31 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- hosts: "{{ target_hosts }}" + gather_facts: false + tasks: + - name: create file without content + copy: + content: "" + dest: "{{ remote_tmp }}/test_empty.txt" + force: false + mode: '0644' + + - name: assert file without content exists + stat: + path: "{{ remote_tmp }}/test_empty.txt" + register: empty_file_stat + + - name: verify file without content exists + assert: + that: + - empty_file_stat.stat.exists + fail_msg: "The file {{ remote_tmp }}/test_empty.txt does not exist." + + - name: verify file without content is empty + assert: + that: + - empty_file_stat.stat.size == 0 + fail_msg: "The file {{ remote_tmp }}/test_empty.txt is not empty." diff --git a/tests/integration/targets/connection_wsl/runme.sh b/tests/integration/targets/connection_wsl/runme.sh new file mode 100755 index 0000000000..cb1e5b9cd0 --- /dev/null +++ b/tests/integration/targets/connection_wsl/runme.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Derived from ../connection_proxmox_pct_remote/runme.sh Copyright (c) 2025 Nils Stein (@mietzen) +# Copyright (c) 2025 Rui Lopes (@rgl) +# Copyright (c) 2025 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +set -eux + +# signal the wsl connection plugin that its running under the integration testing mode. +# NB while running integration tests, the mock wsl.exe implementation is actually +# running on unix, instead of on running windows, so the wsl.exe command line +# construction must use unix rules instead of windows rules. +export _ANSIBLE_TEST_WSL_CONNECTION_PLUGIN_Waeri5tepheeSha2fae8=1 + +ANSIBLE_ROLES_PATH=../ \ + ansible-playbook dependencies.yml -v "$@" + +./test.sh "$@" + +ansible-playbook plugin-specific-tests.yml -i "./test_connection.inventory" \ + -e target_hosts=wsl \ + -e action_prefix= \ + -e local_tmp=/tmp/ansible-local \ + -e remote_tmp=/tmp/ansible-remote \ + "$@" diff --git a/tests/integration/targets/connection_wsl/test.sh b/tests/integration/targets/connection_wsl/test.sh new file mode 120000 index 0000000000..70aa5dbdba --- /dev/null +++ b/tests/integration/targets/connection_wsl/test.sh @@ -0,0 +1 @@ +../connection_posix/test.sh \ No newline at end of file diff --git a/tests/integration/targets/connection_wsl/test_connection.inventory b/tests/integration/targets/connection_wsl/test_connection.inventory new file mode 100644 index 0000000000..53b9a3a031 --- /dev/null +++ b/tests/integration/targets/connection_wsl/test_connection.inventory @@ -0,0 +1,15 @@ +# Derived from ../connection_proxmox_pct_remote/test_connection.inventory Copyright (c) 2025 Nils Stein (@mietzen) +# Copyright (c) 2025 Rui Lopes (@rgl) +# Copyright (c) 2025 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +[wsl] +wsl-pipelining ansible_ssh_pipelining=true +wsl-no-pipelining ansible_ssh_pipelining=false +[wsl:vars] +ansible_host=localhost +ansible_user=root +ansible_python_interpreter="{{ ansible_playbook_python }}" +ansible_connection=community.general.wsl +wsl_distribution=test diff --git a/tests/integration/targets/consul/tasks/consul_binding_rule.yml b/tests/integration/targets/consul/tasks/consul_binding_rule.yml index 218daf982b..1ada2a330b 100644 --- a/tests/integration/targets/consul/tasks/consul_binding_rule.yml +++ b/tests/integration/targets/consul/tasks/consul_binding_rule.yml @@ -18,7 +18,7 @@ 0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc mwIDAQAB - -----END PUBLIC KEY----- + -----END PUBLIC KEY----- - name: Create a binding rule community.general.consul_binding_rule: @@ -33,7 +33,7 @@ that: - result is changed - result.binding_rule.AuthMethod == 'test' - - result.binding.Description == 'test-binding: my description' + - "result.binding_rule.Description == 'test-binding: my description'" - result.operation == 'create' - name: Update a binding rule @@ -46,7 +46,7 @@ - assert: that: - result is changed - - result.binding.Description == 'test-binding: my description' + - "result.binding_rule.Description == 'test-binding: my description'" - result.operation == 'update' - name: Update a binding rule (noop) @@ -58,7 +58,7 @@ - assert: that: - result is not changed - - result.binding.Description == 'test-binding: my description' + - "result.binding_rule.Description == 'test-binding: my description'" - result.operation is not defined - name: Delete a binding rule diff --git a/tests/integration/targets/consul/tasks/consul_kv.yml b/tests/integration/targets/consul/tasks/consul_kv.yml index 6cca73137a..52b95ddd3d 100644 --- a/tests/integration/targets/consul/tasks/consul_kv.yml +++ b/tests/integration/targets/consul/tasks/consul_kv.yml @@ -15,10 +15,10 @@ - result is changed - result.data.Value == 'somevalue' -#- name: Test the lookup -# assert: -# that: -# - lookup('community.general.consul_kv', 'somekey', token=consul_management_token) == 'somevalue' +# - name: Test the lookup +# assert: +# that: +# - lookup('community.general.consul_kv', 'somekey', token=consul_management_token) == 'somevalue' - name: Update a key with the same data consul_kv: diff --git a/tests/integration/targets/consul/tasks/consul_role.yml b/tests/integration/targets/consul/tasks/consul_role.yml index 9b0504e0b6..57193a0b94 100644 --- a/tests/integration/targets/consul/tasks/consul_role.yml +++ b/tests/integration/targets/consul/tasks/consul_role.yml @@ -44,7 +44,7 @@ consul_role: name: foo-role-with-policy description: "Testing updating description" - check_mode: yes + check_mode: true register: result - assert: @@ -106,7 +106,7 @@ datacenters: - dc2 register: result - check_mode: yes + check_mode: true - assert: that: @@ -146,7 +146,7 @@ name: role-with-service-identity node_identities: [] register: result - check_mode: yes + check_mode: true - assert: that: diff --git a/tests/integration/targets/consul/tasks/consul_token.yml b/tests/integration/targets/consul/tasks/consul_token.yml index 9b3679ef1b..55a39950d1 100644 --- a/tests/integration/targets/consul/tasks/consul_token.yml +++ b/tests/integration/targets/consul/tasks/consul_token.yml @@ -22,7 +22,7 @@ - assert: that: - simple_create_result is changed - - simple_create_result.token.AccessorID + - simple_create_result.token.AccessorID is truthy - simple_create_result.operation == 'create' - name: Create token @@ -67,7 +67,7 @@ state: present accessor_id: 07a7de84-c9c7-448a-99cc-beaf682efd21 policies: - - id: "{{ create_result.token.Policies[-1].ID }}" + - name: foo-access2 register: result - assert: @@ -84,5 +84,5 @@ - assert: that: - result is changed - - not result.token + - result.token is falsy - result.operation == 'remove' diff --git a/tests/integration/targets/consul/tasks/main.yml b/tests/integration/targets/consul/tasks/main.yml index 0ac58fc40e..04e2d1b2b5 100644 --- a/tests/integration/targets/consul/tasks/main.yml +++ b/tests/integration/targets/consul/tasks/main.yml @@ -14,96 +14,96 @@ consul_uri: https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_{{ ansible_system | lower }}_{{ consul_arch }}.zip consul_cmd: '{{ remote_tmp_dir }}/consul' block: - - name: Install requests<2.20 (CentOS/RHEL 6) - pip: - name: requests<2.20 - extra_args: "-c {{ remote_constraints }}" - register: result - until: result is success - when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=') - - name: Install python-consul - pip: - name: python-consul - extra_args: "-c {{ remote_constraints }}" - register: result - until: result is success - - name: Generate privatekey - community.crypto.openssl_privatekey: - path: '{{ remote_tmp_dir }}/privatekey.pem' - - name: Generate CSR - community.crypto.openssl_csr: - path: '{{ remote_tmp_dir }}/csr.csr' - privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' - subject: - commonName: localhost - - name: Generate selfsigned certificate - register: selfsigned_certificate - community.crypto.x509_certificate: - path: '{{ remote_tmp_dir }}/cert.pem' - csr_path: '{{ remote_tmp_dir }}/csr.csr' - privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' - provider: selfsigned - selfsigned_digest: sha256 - - name: Install unzip - package: - name: unzip - register: result - until: result is success - when: ansible_distribution != "MacOSX" - - assert: - that: ansible_architecture in ['i386', 'x86_64', 'amd64'] - - set_fact: - consul_arch: '386' - when: ansible_architecture == 'i386' - - set_fact: - consul_arch: amd64 - when: ansible_architecture in ['x86_64', 'amd64'] - - name: Download consul binary - unarchive: - src: '{{ consul_uri }}' - dest: '{{ remote_tmp_dir }}' - remote_src: true - register: result - until: result is success - - vars: - remote_dir: '{{ echo_remote_tmp_dir.stdout }}' - block: - - command: echo {{ remote_tmp_dir }} - register: echo_remote_tmp_dir - - name: Create configuration file - template: - src: consul_config.hcl.j2 - dest: '{{ remote_tmp_dir }}/consul_config.hcl' - - name: Start Consul (dev mode enabled) - shell: nohup {{ consul_cmd }} agent -dev -config-file {{ remote_tmp_dir }}/consul_config.hcl /dev/null 2>&1 & - - name: Bootstrap ACL - consul_acl_bootstrap: - register: consul_bootstrap_result + - name: Install requests<2.20 (CentOS/RHEL 6) + pip: + name: requests<2.20 + extra_args: "-c {{ remote_constraints }}" + register: result + until: result is success + when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=') + - name: Install python-consul + pip: + name: python-consul + extra_args: "-c {{ remote_constraints }}" + register: result + until: result is success + - name: Generate privatekey + community.crypto.openssl_privatekey: + path: '{{ remote_tmp_dir }}/privatekey.pem' + - name: Generate CSR + community.crypto.openssl_csr: + path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' + subject: + commonName: localhost + - name: Generate selfsigned certificate + register: selfsigned_certificate + community.crypto.x509_certificate: + path: '{{ remote_tmp_dir }}/cert.pem' + csr_path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' + provider: selfsigned + selfsigned_digest: sha256 + - name: Install unzip + package: + name: unzip + register: result + until: result is success + when: ansible_distribution != "MacOSX" + - assert: + that: ansible_architecture in ['i386', 'x86_64', 'amd64'] - set_fact: - consul_management_token: '{{ consul_bootstrap_result.result.SecretID }}' - - name: Create some data - command: '{{ consul_cmd }} kv put -token={{consul_management_token}} data/value{{ item }} foo{{ item }}' - loop: - - 1 - - 2 - - 3 - - import_tasks: consul_general.yml - - import_tasks: consul_kv.yml + consul_arch: '386' + when: ansible_architecture == 'i386' + - set_fact: + consul_arch: amd64 + when: ansible_architecture in ['x86_64', 'amd64'] + - name: Download consul binary + unarchive: + src: '{{ consul_uri }}' + dest: '{{ remote_tmp_dir }}' + remote_src: true + register: result + until: result is success + - vars: + remote_dir: '{{ echo_remote_tmp_dir.stdout }}' + block: + - command: echo {{ remote_tmp_dir }} + register: echo_remote_tmp_dir + - name: Create configuration file + template: + src: consul_config.hcl.j2 + dest: '{{ remote_tmp_dir }}/consul_config.hcl' + - name: Start Consul (dev mode enabled) + shell: nohup {{ consul_cmd }} agent -dev -config-file {{ remote_tmp_dir }}/consul_config.hcl /dev/null 2>&1 & + - name: Bootstrap ACL + consul_acl_bootstrap: + register: consul_bootstrap_result + - set_fact: + consul_management_token: '{{ consul_bootstrap_result.result.SecretID }}' + - name: Create some data + command: '{{ consul_cmd }} kv put -token={{consul_management_token}} data/value{{ item }} foo{{ item }}' + loop: + - 1 + - 2 + - 3 + - import_tasks: consul_general.yml + - import_tasks: consul_kv.yml - - block: - - import_tasks: consul_session.yml - - import_tasks: consul_policy.yml - - import_tasks: consul_role.yml - - import_tasks: consul_token.yml - - import_tasks: consul_auth_method.yml - - import_tasks: consul_binding_rule.yml - - import_tasks: consul_agent_service.yml - - import_tasks: consul_agent_check.yml - module_defaults: - group/community.general.consul: - token: "{{ consul_management_token }}" + - block: + - import_tasks: consul_session.yml + - import_tasks: consul_policy.yml + - import_tasks: consul_role.yml + - import_tasks: consul_token.yml + - import_tasks: consul_auth_method.yml + - import_tasks: consul_binding_rule.yml + - import_tasks: consul_agent_service.yml + - import_tasks: consul_agent_check.yml + module_defaults: + group/community.general.consul: + token: "{{ consul_management_token }}" - always: + always: - name: Kill consul process shell: kill $(cat {{ remote_tmp_dir }}/consul.pid) - ignore_errors: true \ No newline at end of file + ignore_errors: true diff --git a/tests/integration/targets/copr/aliases b/tests/integration/targets/copr/aliases index ed3c1af00d..d333eac1a1 100644 --- a/tests/integration/targets/copr/aliases +++ b/tests/integration/targets/copr/aliases @@ -7,3 +7,4 @@ needs/root skip/macos skip/osx skip/freebsd +skip/rhel10.0 # FIXME diff --git a/tests/integration/targets/copr/tasks/main.yml b/tests/integration/targets/copr/tasks/main.yml index 0d66378112..4fc5ae5c08 100644 --- a/tests/integration/targets/copr/tasks/main.yml +++ b/tests/integration/targets/copr/tasks/main.yml @@ -11,133 +11,133 @@ or (ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora' and ansible_distribution_major_version | int >= 8) block: - - debug: var=copr_chroot - - name: enable copr project - copr: - host: copr.fedorainfracloud.org - state: enabled - name: '{{ copr_fullname }}' - chroot: "{{ copr_chroot }}" - register: result + - debug: var=copr_chroot + - name: enable copr project + copr: + host: copr.fedorainfracloud.org + state: enabled + name: '{{ copr_fullname }}' + chroot: "{{ copr_chroot }}" + register: result - - name: assert that the copr project was enabled - assert: - that: - - 'result is changed' - - result.msg == 'enabled' - - result.info == 'Please note that this repository is not part of the main distribution' + - name: assert that the copr project was enabled + assert: + that: + - 'result is changed' + - result.msg == 'enabled' + - result.info == 'Please note that this repository is not part of the main distribution' - - name: enable copr project - check_mode: true - copr: - state: enabled - name: '{{ copr_fullname }}' - chroot: '{{ copr_chroot }}' - register: result + - name: enable copr project + check_mode: true + copr: + state: enabled + name: '{{ copr_fullname }}' + chroot: '{{ copr_chroot }}' + register: result - - name: assert that the copr project was enabled - assert: - that: - - result is not changed - - result.msg == 'enabled' + - name: assert that the copr project was enabled + assert: + that: + - result is not changed + - result.msg == 'enabled' - - name: Ensure the repo is installed and enabled | slurp - register: result - ansible.builtin.slurp: - src: "{{ copr_repofile }}" + - name: Ensure the repo is installed and enabled | slurp + register: result + ansible.builtin.slurp: + src: "{{ copr_repofile }}" - - name: Ensure the repo is installed and enabled - vars: - content: "{{ result.content | b64decode }}" - _baseurl: "{{ 'https://download.copr.fedorainfracloud.org/results/gotmax23/community.general.copr_integration_tests' | regex_escape }}" - baseurl: "{{ content | regex_search('baseurl=' ~ _baseurl) }}" - block: - - ansible.builtin.debug: - var: content - - ansible.builtin.debug: - var: baseurl - - name: Ensure the repo is installed and enabled - ansible.builtin.assert: - that: - - "'enabled=1' in content" - - baseurl | length > 0 + - name: Ensure the repo is installed and enabled + vars: + content: "{{ result.content | b64decode }}" + _baseurl: "{{ 'https://download.copr.fedorainfracloud.org/results/gotmax23/community.general.copr_integration_tests' | regex_escape }}" + baseurl: "{{ content | regex_search('baseurl=' ~ _baseurl) }}" + block: + - ansible.builtin.debug: + var: content + - ansible.builtin.debug: + var: baseurl + - name: Ensure the repo is installed and enabled + ansible.builtin.assert: + that: + - "'enabled=1' in content" + - baseurl | length > 0 - - name: Install test package from Copr - when: - # Copr does not build new packages for EOL Fedoras. - - > - not (ansible_distribution == 'Fedora' and - ansible_distribution_major_version | int < 35) - block: - - name: install test package from the copr - ansible.builtin.package: - update_cache: true - name: copr-module-integration-dummy-package + - name: Install test package from Copr + when: + # Copr does not build new packages for EOL Fedoras. + - > + not (ansible_distribution == 'Fedora' and + ansible_distribution_major_version | int < 35) + block: + - name: install test package from the copr + ansible.builtin.package: + update_cache: true + name: copr-module-integration-dummy-package - - name: uninstall test package - register: result - ansible.builtin.package: - name: copr-module-integration-dummy-package - state: absent + - name: uninstall test package + register: result + ansible.builtin.package: + name: copr-module-integration-dummy-package + state: absent - - name: check uninstall test package - ansible.builtin.assert: - that: result.changed | bool + - name: check uninstall test package + ansible.builtin.assert: + that: result.changed | bool - - name: remove copr project - copr: - state: absent - name: '{{ copr_fullname }}' - register: result + - name: remove copr project + copr: + state: absent + name: '{{ copr_fullname }}' + register: result - - name: assert that the copr project was removed - assert: - that: - - 'result is changed' - - result.msg == 'absent' + - name: assert that the copr project was removed + assert: + that: + - 'result is changed' + - result.msg == 'absent' - - name: Ensure the repo file was removed | stat - register: result - ansible.builtin.stat: - dest: "{{ copr_repofile }}" + - name: Ensure the repo file was removed | stat + register: result + ansible.builtin.stat: + dest: "{{ copr_repofile }}" - - name: Ensure the repo file was removed - ansible.builtin.assert: - that: not result.stat.exists | bool + - name: Ensure the repo file was removed + ansible.builtin.assert: + that: not result.stat.exists | bool - - name: disable copr project - copr: - state: disabled - name: '{{ copr_fullname }}' - chroot: '{{ copr_chroot }}' - register: result + - name: disable copr project + copr: + state: disabled + name: '{{ copr_fullname }}' + chroot: '{{ copr_chroot }}' + register: result - - name: assert that the copr project was disabled - assert: - that: - - 'result is changed' - - result.msg == 'disabled' + - name: assert that the copr project was disabled + assert: + that: + - 'result is changed' + - result.msg == 'disabled' - - name: Ensure the repo is installed but disabled | slurp - register: result - ansible.builtin.slurp: - src: "{{ copr_repofile }}" + - name: Ensure the repo is installed but disabled | slurp + register: result + ansible.builtin.slurp: + src: "{{ copr_repofile }}" - - name: Ensure the repo is installed but disabled - vars: - content: "{{ result.content | b64decode }}" - _baseurl: "{{ 'https://download.copr.fedorainfracloud.org/results/gotmax23/community.general.copr_integration_tests' | regex_escape }}" - baseurl: "{{ content | regex_search('baseurl=' ~ _baseurl) }}" - block: - - ansible.builtin.debug: - var: content - - ansible.builtin.debug: - var: baseurl - - name: Ensure the repo is installed but disabled - ansible.builtin.assert: - that: - - "'enabled=0' in content" - - baseurl | length > 0 + - name: Ensure the repo is installed but disabled + vars: + content: "{{ result.content | b64decode }}" + _baseurl: "{{ 'https://download.copr.fedorainfracloud.org/results/gotmax23/community.general.copr_integration_tests' | regex_escape }}" + baseurl: "{{ content | regex_search('baseurl=' ~ _baseurl) }}" + block: + - ansible.builtin.debug: + var: content + - ansible.builtin.debug: + var: baseurl + - name: Ensure the repo is installed but disabled + ansible.builtin.assert: + that: + - "'enabled=0' in content" + - baseurl | length > 0 always: - name: clean up diff --git a/tests/integration/targets/cronvar/tasks/main.yml b/tests/integration/targets/cronvar/tasks/main.yml index 73ec41abca..0d3ae30daf 100644 --- a/tests/integration/targets/cronvar/tasks/main.yml +++ b/tests/integration/targets/cronvar/tasks/main.yml @@ -122,3 +122,36 @@ - custom_varcheck1.stdout == '1' - custom_varcheck2.stdout == '1' - custom_varcheck3.stdout == '0' + + +- name: Add variable with empty string + community.general.cronvar: + name: EMPTY_VAR + value: "" + state: present + +- name: Assert empty var present + ansible.builtin.shell: crontab -l + register: result + changed_when: false + +- name: Assert line is quoted + ansible.builtin.assert: + that: >- + 'EMPTY_VAR=""' in result.stdout + +- name: Attempt to add cron variable to non-existent parent directory + cronvar: + name: NOPARENT_VAR + value: noparentval + cron_file: /nonexistent/foo + user: root + register: invalid_directory_cronvar_result + ignore_errors: true + +- name: Assert that the cronvar task failed due to invalid directory + ansible.builtin.assert: + that: + - invalid_directory_cronvar_result is failed + - >- + "Parent directory '/nonexistent' does not exist for cron_file: '/nonexistent/foo'" == invalid_directory_cronvar_result.msg diff --git a/tests/integration/targets/decompress/tasks/dest.yml b/tests/integration/targets/decompress/tasks/dest.yml index 9a7bbe499f..4afd39e7b3 100644 --- a/tests/integration/targets/decompress/tasks/dest.yml +++ b/tests/integration/targets/decompress/tasks/dest.yml @@ -34,7 +34,8 @@ - name: Test that file exists assert: - that: "{{ item.stat.exists }}" + that: + - item.stat.exists quiet: true loop: "{{ result_files_stat.results }}" loop_control: diff --git a/tests/integration/targets/deploy_helper/tasks/main.yml b/tests/integration/targets/deploy_helper/tasks/main.yml index 9bd5f41506..84eb1640bd 100644 --- a/tests/integration/targets/deploy_helper/tasks/main.yml +++ b/tests/integration/targets/deploy_helper/tasks/main.yml @@ -16,43 +16,43 @@ - name: Assert State=query with default parameters assert: that: - - "'project_path' in deploy_helper" - - "deploy_helper.current_path == deploy_helper.project_path ~ '/current'" - - "deploy_helper.releases_path == deploy_helper.project_path ~ '/releases'" - - "deploy_helper.shared_path == deploy_helper.project_path ~ '/shared'" - - "deploy_helper.unfinished_filename == 'DEPLOY_UNFINISHED'" - - "'previous_release' in deploy_helper" - - "'previous_release_path' in deploy_helper" - - "'new_release' in deploy_helper" - - "'new_release_path' in deploy_helper" - - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release" + - "'project_path' in deploy_helper" + - "deploy_helper.current_path == deploy_helper.project_path ~ '/current'" + - "deploy_helper.releases_path == deploy_helper.project_path ~ '/releases'" + - "deploy_helper.shared_path == deploy_helper.project_path ~ '/shared'" + - "deploy_helper.unfinished_filename == 'DEPLOY_UNFINISHED'" + - "'previous_release' in deploy_helper" + - "'previous_release_path' in deploy_helper" + - "'new_release' in deploy_helper" + - "'new_release_path' in deploy_helper" + - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release" - name: State=query with relative overridden paths deploy_helper: path={{ deploy_helper_test_root }} current_path=CURRENT_PATH releases_path=RELEASES_PATH shared_path=SHARED_PATH state=query - name: Assert State=query with relative overridden paths assert: that: - - "deploy_helper.current_path == deploy_helper.project_path ~ '/CURRENT_PATH'" - - "deploy_helper.releases_path == deploy_helper.project_path ~ '/RELEASES_PATH'" - - "deploy_helper.shared_path == deploy_helper.project_path ~ '/SHARED_PATH'" - - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release" + - "deploy_helper.current_path == deploy_helper.project_path ~ '/CURRENT_PATH'" + - "deploy_helper.releases_path == deploy_helper.project_path ~ '/RELEASES_PATH'" + - "deploy_helper.shared_path == deploy_helper.project_path ~ '/SHARED_PATH'" + - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release" - name: State=query with absolute overridden paths deploy_helper: path={{ deploy_helper_test_root }} current_path=/CURRENT_PATH releases_path=/RELEASES_PATH shared_path=/SHARED_PATH state=query - name: Assert State=query with absolute overridden paths assert: that: - - "deploy_helper.current_path == '/CURRENT_PATH'" - - "deploy_helper.releases_path == '/RELEASES_PATH'" - - "deploy_helper.shared_path == '/SHARED_PATH'" - - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release" + - "deploy_helper.current_path == '/CURRENT_PATH'" + - "deploy_helper.releases_path == '/RELEASES_PATH'" + - "deploy_helper.shared_path == '/SHARED_PATH'" + - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release" - name: State=query with overridden unfinished_filename deploy_helper: path={{ deploy_helper_test_root }} unfinished_filename=UNFINISHED_DEPLOY state=query - name: Assert State=query with overridden unfinished_filename assert: that: - - "'UNFINISHED_DEPLOY' == deploy_helper.unfinished_filename" + - "'UNFINISHED_DEPLOY' == deploy_helper.unfinished_filename" # Remove the root folder just in case it exists - file: path={{ deploy_helper_test_root }} state=absent @@ -66,8 +66,8 @@ - name: Assert State=present with default parameters assert: that: - - "releases_path.stat.exists" - - "shared_path.stat.exists" + - "releases_path.stat.exists" + - "shared_path.stat.exists" # Setup older releases for tests - file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory @@ -88,9 +88,9 @@ - name: Assert State=finalize with default parameters assert: that: - - "current_path.stat.islnk" - - "deploy_helper.new_release_path in current_path.stat.lnk_source" - - "not current_path_unfinished_filename.stat.exists" + - "current_path.stat.islnk" + - "deploy_helper.new_release_path in current_path.stat.lnk_source" + - "not current_path_unfinished_filename.stat.exists" - stat: path={{ deploy_helper.releases_path }}/third register: third_release_path - shell: "ls {{ deploy_helper.releases_path }} | wc -l" @@ -98,13 +98,13 @@ - name: Assert State=finalize with default parameters (clean=true checks) assert: that: - - "not third_release_path.stat.exists" - - "releases_count.stdout|trim == '6'" + - "not third_release_path.stat.exists" + - "releases_count.stdout|trim == '6'" - deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=query - name: Assert State=finalize with default parameters (previous_release checks) assert: that: - - "deploy_helper.new_release == deploy_helper.previous_release" + - "deploy_helper.new_release == deploy_helper.previous_release" - name: State=absent with default parameters deploy_helper: path={{ deploy_helper_test_root }} state=absent @@ -113,7 +113,7 @@ - name: Assert State=absent with default parameters assert: that: - - "not project_path.stat.exists" + - "not project_path.stat.exists" - debug: msg="Clearing all release data and facts ---------" @@ -123,11 +123,12 @@ register: releases_path - stat: path={{ deploy_helper.shared_path }} register: shared_path + when: deploy_helper.shared_path is truthy - name: Assert State=present with shared_path set to False assert: that: - - "releases_path.stat.exists" - - "not shared_path.stat.exists" + - "releases_path.stat.exists" + - "deploy_helper.shared_path is falsy or not shared_path.stat.exists" # Setup older releases for tests - file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory @@ -150,9 +151,9 @@ - name: Assert State=finalize with default parameters (clean=true checks) assert: that: - - "not third_release_path.stat.exists" - - "before_releases_count.stdout|trim == '6'" - - "releases_count.stdout|trim == '3'" + - "not third_release_path.stat.exists" + - "before_releases_count.stdout|trim == '6'" + - "releases_count.stdout|trim == '3'" # Remove the root folder - file: path={{ deploy_helper_test_root }} state=absent diff --git a/tests/integration/targets/discord/defaults/main.yml b/tests/integration/targets/discord/defaults/main.yml index ef01141ca0..e53245324c 100644 --- a/tests/integration/targets/discord/defaults/main.yml +++ b/tests/integration/targets/discord/defaults/main.yml @@ -3,5 +3,5 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -discord_id: 000 +discord_id: 0 discord_token: xxx diff --git a/tests/integration/targets/django_command/aliases b/tests/integration/targets/django_command/aliases index ae3c2623a0..c14251f85e 100644 --- a/tests/integration/targets/django_command/aliases +++ b/tests/integration/targets/django_command/aliases @@ -19,3 +19,6 @@ skip/rhel9.1 skip/rhel9.2 skip/rhel9.3 skip/rhel9.4 +skip/rhel9.5 +skip/rhel9.6 +skip/rhel10.0 diff --git a/tests/integration/targets/django_manage/aliases b/tests/integration/targets/django_manage/aliases index ae3c2623a0..c14251f85e 100644 --- a/tests/integration/targets/django_manage/aliases +++ b/tests/integration/targets/django_manage/aliases @@ -19,3 +19,6 @@ skip/rhel9.1 skip/rhel9.2 skip/rhel9.3 skip/rhel9.4 +skip/rhel9.5 +skip/rhel9.6 +skip/rhel10.0 diff --git a/tests/integration/targets/filesize/tasks/basics.yml b/tests/integration/targets/filesize/tasks/basics.yml index 3c06731899..d4675556a0 100644 --- a/tests/integration/targets/filesize/tasks/basics.yml +++ b/tests/integration/targets/filesize/tasks/basics.yml @@ -177,7 +177,6 @@ - filesize_stat_basic_14.stat.checksum == filesize_test_checksum - - name: Expand the file with 1 byte (57001B) (check mode) community.general.filesize: path: "{{ filesize_testfile }}" @@ -253,7 +252,6 @@ - filesize_stat_basic_24.stat.checksum != filesize_test_checksum - - name: Expand the file up to 2 MiB (2*1024*1024 bytes) (check mode) community.general.filesize: path: "{{ filesize_testfile }}" @@ -327,7 +325,6 @@ - filesize_stat_basic_34.stat.size == 2*1024**2 - - name: Truncate the file to 57kB (57000B) (check mode) community.general.filesize: path: "{{ filesize_testfile }}" @@ -404,7 +401,6 @@ - filesize_stat_basic_44.stat.checksum == filesize_test_checksum - - name: Remove test file ansible.builtin.file: path: "{{ filesize_testfile }}" diff --git a/tests/integration/targets/filesize/tasks/floats.yml b/tests/integration/targets/filesize/tasks/floats.yml index 6d1bde22c9..9c743f261d 100644 --- a/tests/integration/targets/filesize/tasks/floats.yml +++ b/tests/integration/targets/filesize/tasks/floats.yml @@ -89,7 +89,6 @@ - filesize_stat_float_04.stat.size == 512512 - - name: Create a file with a size of 512.513kB (check mode) community.general.filesize: path: "{{ filesize_testfile }}" @@ -166,7 +165,6 @@ - filesize_stat_float_14.stat.size == 512513 - - name: Create a file with a size of 4.004MB (check mode) community.general.filesize: path: "{{ filesize_testfile }}" diff --git a/tests/integration/targets/filesize/tasks/sparse.yml b/tests/integration/targets/filesize/tasks/sparse.yml index 348a1eea1b..7c1b6744b2 100644 --- a/tests/integration/targets/filesize/tasks/sparse.yml +++ b/tests/integration/targets/filesize/tasks/sparse.yml @@ -119,7 +119,6 @@ - filesize_stat_sparse_06.stat.size == 2*1000**4 - - name: Change sparse file size to 2TiB (check mode) community.general.filesize: path: "{{ filesize_testfile }}" @@ -198,7 +197,6 @@ - filesize_stat_sparse_14.stat.size == 2199023255552 - - name: Change sparse file size to 2.321TB (check mode) community.general.filesize: path: "{{ filesize_testfile }}" @@ -279,7 +277,6 @@ - filesize_stat_sparse_24.stat.size == 2321000000000 - - name: Remove test file ansible.builtin.file: path: "{{ filesize_testfile }}" diff --git a/tests/integration/targets/filesize/tasks/symlinks.yml b/tests/integration/targets/filesize/tasks/symlinks.yml index 0118896568..4f65d80c8c 100644 --- a/tests/integration/targets/filesize/tasks/symlinks.yml +++ b/tests/integration/targets/filesize/tasks/symlinks.yml @@ -13,14 +13,13 @@ - name: Create a broken symlink in the same directory ansible.builtin.file: - src: "{{ filesize_testfile | basename }}" + src: "{{ filesize_testfile | basename }}" dest: "{{ filesize_testlink }}" state: link force: true follow: false - - name: Create a file with a size of 512 kB (512000 bytes) (check mode) community.general.filesize: path: "{{ filesize_testlink }}" @@ -85,7 +84,6 @@ - filesize_test_symlink_04.path != filesize_testlink - - name: Remove test file ansible.builtin.file: path: "{{ filesize_testfile }}" diff --git a/tests/integration/targets/filesystem/tasks/main.yml b/tests/integration/targets/filesystem/tasks/main.yml index 51361079ce..55a513bf77 100644 --- a/tests/integration/targets/filesystem/tasks/main.yml +++ b/tests/integration/targets/filesystem/tasks/main.yml @@ -59,6 +59,8 @@ item.0.key == "reiserfs")' # reiserfs packages apparently not available with Alpine - 'not (ansible_distribution == "Alpine" and item.0.key == "reiserfs")' + # reiserfsprogs packages no longer available with Arch Linux + - 'not (ansible_distribution == "Archlinux" and item.0.key == "reiserfs")' # ocfs2 only available on Debian based distributions - 'not (item.0.key == "ocfs2" and ansible_os_family != "Debian")' # Tests use losetup which can not be used inside unprivileged container diff --git a/tests/integration/targets/filesystem/tasks/reset_fs_uuid.yml b/tests/integration/targets/filesystem/tasks/reset_fs_uuid.yml index 77dad22033..bd9aa607c4 100644 --- a/tests/integration/targets/filesystem/tasks/reset_fs_uuid.yml +++ b/tests/integration/targets/filesystem/tasks/reset_fs_uuid.yml @@ -8,52 +8,52 @@ - new_uuid | default(False) - not (ansible_system == "FreeBSD" and fstype == "xfs") block: - - name: "Create filesystem ({{ fstype }})" - community.general.filesystem: - dev: '{{ dev }}' - fstype: '{{ fstype }}' - register: fs_result + - name: "Create filesystem ({{ fstype }})" + community.general.filesystem: + dev: '{{ dev }}' + fstype: '{{ fstype }}' + register: fs_result - - name: "Get UUID of created filesystem" - ansible.builtin.shell: - cmd: "{{ get_uuid_cmd }}" - changed_when: false - register: uuid + - name: "Get UUID of created filesystem" + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" + changed_when: false + register: uuid - - name: "Reset filesystem ({{ fstype }}) UUID" - community.general.filesystem: - dev: '{{ dev }}' - fstype: '{{ fstype }}' - uuid: "{{ new_uuid }}" - register: fs_resetuuid_result - - - name: "Get UUID of the filesystem" - ansible.builtin.shell: - cmd: "{{ get_uuid_cmd }}" - changed_when: false - register: uuid2 - - - name: "Assert that filesystem UUID is changed" - ansible.builtin.assert: - that: - - 'fs_resetuuid_result is changed' - - 'fs_resetuuid_result is success' - - 'uuid.stdout != uuid2.stdout' - - - when: - - (grow | bool and (fstype != "vfat" or resize_vfat)) or - (fstype == "xfs" and ansible_system == "Linux" and - ansible_distribution not in ["CentOS", "Ubuntu"]) - block: - - name: "Reset filesystem ({{ fstype }}) UUID and resizefs" - ignore_errors: true + - name: "Reset filesystem ({{ fstype }}) UUID" community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' uuid: "{{ new_uuid }}" - resizefs: true - register: fs_resetuuid_and_resizefs_result + register: fs_resetuuid_result - - name: "Assert that filesystem UUID reset and resizefs failed" + - name: "Get UUID of the filesystem" + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" + changed_when: false + register: uuid2 + + - name: "Assert that filesystem UUID is changed" ansible.builtin.assert: - that: fs_resetuuid_and_resizefs_result is failed + that: + - 'fs_resetuuid_result is changed' + - 'fs_resetuuid_result is success' + - 'uuid.stdout != uuid2.stdout' + + - when: + - (grow | bool and (fstype != "vfat" or resize_vfat)) or + (fstype == "xfs" and ansible_system == "Linux" and + ansible_distribution not in ["CentOS", "Ubuntu"]) + block: + - name: "Reset filesystem ({{ fstype }}) UUID and resizefs" + ignore_errors: true + community.general.filesystem: + dev: '{{ dev }}' + fstype: '{{ fstype }}' + uuid: "{{ new_uuid }}" + resizefs: true + register: fs_resetuuid_and_resizefs_result + + - name: "Assert that filesystem UUID reset and resizefs failed" + ansible.builtin.assert: + that: fs_resetuuid_and_resizefs_result is failed diff --git a/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation.yml b/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation.yml index f52c44d655..9ec45e9e5c 100644 --- a/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation.yml +++ b/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation.yml @@ -12,33 +12,33 @@ - new_uuid | default(False) - not (ansible_system == "FreeBSD" and fstype == "xfs") block: - - name: "Create filesystem ({{ fstype }}) with UUID" - community.general.filesystem: - dev: '{{ dev }}' - fstype: '{{ fstype }}' - uuid: '{{ random_uuid }}' - register: fs_result + - name: "Create filesystem ({{ fstype }}) with UUID" + community.general.filesystem: + dev: '{{ dev }}' + fstype: '{{ fstype }}' + uuid: '{{ random_uuid }}' + register: fs_result - - name: "Get UUID of the created filesystem" - ansible.builtin.shell: - cmd: "{{ get_uuid_cmd }}" - changed_when: false - register: uuid + - name: "Get UUID of the created filesystem" + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" + changed_when: false + register: uuid - - name: "Assert that filesystem UUID is the random UUID set on creation" - ansible.builtin.assert: - that: (random_uuid | replace('-','')) == ( uuid.stdout | replace('-','')) + - name: "Assert that filesystem UUID is the random UUID set on creation" + ansible.builtin.assert: + that: (random_uuid | replace('-','')) == ( uuid.stdout | replace('-','')) - when: not (new_uuid | default(False)) block: - - name: "Create filesystem ({{ fstype }}) without UUID support" - ignore_errors: true - community.general.filesystem: - dev: '{{ dev }}' - fstype: '{{ fstype }}' - uuid: '{{ random_uuid }}' - register: fs_result + - name: "Create filesystem ({{ fstype }}) without UUID support" + ignore_errors: true + community.general.filesystem: + dev: '{{ dev }}' + fstype: '{{ fstype }}' + uuid: '{{ random_uuid }}' + register: fs_result - - name: "Assert that filesystem creation failed" - ansible.builtin.assert: - that: fs_result is failed + - name: "Assert that filesystem creation failed" + ansible.builtin.assert: + that: fs_result is failed diff --git a/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation_with_opts.yml b/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation_with_opts.yml index fc73e57ee2..e89668bad0 100644 --- a/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation_with_opts.yml +++ b/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation_with_opts.yml @@ -9,25 +9,25 @@ - fstype != "xfs" block: - - name: "Generate random UUIDs" - ansible.builtin.set_fact: - random_uuid: '{{ "first_random_uuid" | ansible.builtin.to_uuid }}' - random_uuid2: '{{ "second_random_uuid" | ansible.builtin.to_uuid }}' + - name: "Generate random UUIDs" + ansible.builtin.set_fact: + random_uuid: '{{ "first_random_uuid" | ansible.builtin.to_uuid }}' + random_uuid2: '{{ "second_random_uuid" | ansible.builtin.to_uuid }}' - - name: "Create filesystem ({{ fstype }}) with fix UUID as opt" - community.general.filesystem: - dev: '{{ dev }}' - fstype: '{{ fstype }}' - opts: "{{ ((fstype == 'lvm') | ansible.builtin.ternary('--norestorefile --uuid ', '-U ')) + random_uuid2 }}" - uuid: '{{ random_uuid }}' - register: fs_result2 + - name: "Create filesystem ({{ fstype }}) with fix UUID as opt" + community.general.filesystem: + dev: '{{ dev }}' + fstype: '{{ fstype }}' + opts: "{{ ((fstype == 'lvm') | ansible.builtin.ternary('--norestorefile --uuid ', '-U ')) + random_uuid2 }}" + uuid: '{{ random_uuid }}' + register: fs_result2 - - name: "Get UUID of the created filesystem" - ansible.builtin.shell: - cmd: "{{ get_uuid_cmd }}" - changed_when: false - register: uuid2 + - name: "Get UUID of the created filesystem" + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" + changed_when: false + register: uuid2 - - name: "Assert that filesystem UUID is the one set on creation with opt" - ansible.builtin.assert: - that: (random_uuid2 | replace('-','')) == ( uuid2.stdout | replace('-','')) + - name: "Assert that filesystem UUID is the one set on creation with opt" + ansible.builtin.assert: + that: (random_uuid2 | replace('-','')) == ( uuid2.stdout | replace('-','')) diff --git a/tests/integration/targets/filesystem/tasks/setup.yml b/tests/integration/targets/filesystem/tasks/setup.yml index 77c028acaf..aeda1e4a7f 100644 --- a/tests/integration/targets/filesystem/tasks/setup.yml +++ b/tests/integration/targets/filesystem/tasks/setup.yml @@ -75,7 +75,7 @@ state: present when: - ansible_system == 'Linux' - - ansible_os_family not in ['Suse', 'RedHat', 'Alpine'] + - ansible_os_family not in ['Suse', 'RedHat', 'Alpine', 'Archlinux'] - name: "Install reiserfs progs (FreeBSD)" ansible.builtin.package: diff --git a/tests/integration/targets/filter_accumulate/tasks/main.yml b/tests/integration/targets/filter_accumulate/tasks/main.yml index 8fe854228a..7f2ddfb7d7 100644 --- a/tests/integration/targets/filter_accumulate/tasks/main.yml +++ b/tests/integration/targets/filter_accumulate/tasks/main.yml @@ -30,6 +30,6 @@ assert: that: - integer_result is failed - - integer_result.msg is match('Invalid value type.*') + - integer_result.msg is search('Invalid value type') - non_uniform_list_result is failed - - non_uniform_list_result.msg is match('Unexpected templating type error.*can only concatenate str.*') + - non_uniform_list_result.msg is search('can only concatenate str') diff --git a/tests/integration/targets/filter_counter/tasks/main.yml b/tests/integration/targets/filter_counter/tasks/main.yml index 77d6b1b020..2879aaea12 100644 --- a/tests/integration/targets/filter_counter/tasks/main.yml +++ b/tests/integration/targets/filter_counter/tasks/main.yml @@ -26,7 +26,7 @@ assert: that: - res is failed - - res.msg is match('Argument for community.general.counter must be a sequence') + - res.msg is search('Argument for community.general.counter must be a sequence') - name: test fail element not hashable debug: @@ -38,4 +38,4 @@ assert: that: - res is failed - - res.msg is match('community.general.counter needs a sequence with hashable elements') + - res.msg is search('community.general.counter needs a sequence with hashable elements') diff --git a/tests/integration/targets/filter_from_csv/tasks/main.yml b/tests/integration/targets/filter_from_csv/tasks/main.yml index 5c58f85d47..ae9af332d7 100644 --- a/tests/integration/targets/filter_from_csv/tasks/main.yml +++ b/tests/integration/targets/filter_from_csv/tasks/main.yml @@ -51,4 +51,4 @@ assert: that: - _invalid_csv_strict_true is failed - - _invalid_csv_strict_true.msg is match('Unable to process file:.*') + - _invalid_csv_strict_true.msg is search('Unable to process file:.*') diff --git a/tests/integration/targets/filter_groupby_as_dict/tasks/main.yml b/tests/integration/targets/filter_groupby_as_dict/tasks/main.yml index f4047f4ac6..f420d671f2 100644 --- a/tests/integration/targets/filter_groupby_as_dict/tasks/main.yml +++ b/tests/integration/targets/filter_groupby_as_dict/tasks/main.yml @@ -16,7 +16,7 @@ - assert: that: - - result.msg == 'Input is not a sequence' + - result.msg is search('Input is not a sequence') - name: 'Test error: list element not a mapping' set_fact: @@ -26,7 +26,7 @@ - assert: that: - - "result.msg == 'Sequence element #0 is not a mapping'" + - "result.msg is search('Sequence element #0 is not a mapping')" - name: 'Test error: list element does not have attribute' set_fact: @@ -36,7 +36,7 @@ - assert: that: - - "result.msg == 'Attribute not contained in element #1 of sequence'" + - "result.msg is search('Attribute not contained in element #1 of sequence')" - name: 'Test error: attribute collision' set_fact: @@ -46,4 +46,4 @@ - assert: that: - - result.msg == "Multiple sequence entries have attribute value 'a'" or result.msg == "Multiple sequence entries have attribute value u'a'" + - result.msg is search("Multiple sequence entries have attribute value u?'a'") diff --git a/tests/integration/targets/filter_hashids/tasks/main.yml b/tests/integration/targets/filter_hashids/tasks/main.yml index 4a76540f6b..3cd9106aa7 100644 --- a/tests/integration/targets/filter_hashids/tasks/main.yml +++ b/tests/integration/targets/filter_hashids/tasks/main.yml @@ -31,7 +31,7 @@ - name: Register result of invalid salt debug: - var: "invalid_input | community.general.hashids_encode(salt=10)" + var: "single_int | community.general.hashids_encode(salt=10)" register: invalid_salt_message ignore_errors: true @@ -42,7 +42,7 @@ - name: Register result of invalid alphabet debug: - var: "invalid_input | community.general.hashids_encode(alphabet='abc')" + var: "single_int | community.general.hashids_encode(alphabet='abc')" register: invalid_alphabet_message ignore_errors: true @@ -53,7 +53,7 @@ - name: Register result of invalid min_length debug: - var: "invalid_input | community.general.hashids_encode(min_length='foo')" + var: "single_int | community.general.hashids_encode(min_length='foo')" register: invalid_min_length_message ignore_errors: true diff --git a/tests/integration/targets/filter_jc/aliases b/tests/integration/targets/filter_jc/aliases index a39321e96d..978a58095d 100644 --- a/tests/integration/targets/filter_jc/aliases +++ b/tests/integration/targets/filter_jc/aliases @@ -7,3 +7,5 @@ skip/python2.7 # jc only supports python3.x skip/freebsd13.3 # FIXME - ruyaml compilation fails skip/freebsd14.0 # FIXME - ruyaml compilation fails skip/freebsd14.1 # FIXME - ruyaml compilation fails +skip/freebsd14.2 # FIXME - ruyaml compilation fails +skip/freebsd14.3 # FIXME - ruyaml compilation fails diff --git a/tests/integration/targets/filter_json_patch/runme.sh b/tests/integration/targets/filter_json_patch/runme.sh new file mode 100755 index 0000000000..d591ee3289 --- /dev/null +++ b/tests/integration/targets/filter_json_patch/runme.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +set -eux + +source virtualenv.sh + +# Requirements have to be installed prior to running ansible-playbook +# because plugins and requirements are loaded before the task runs + +pip install jsonpatch + +ANSIBLE_ROLES_PATH=../ ansible-playbook runme.yml "$@" diff --git a/tests/integration/targets/filter_json_patch/runme.yml b/tests/integration/targets/filter_json_patch/runme.yml new file mode 100644 index 0000000000..f98c70f697 --- /dev/null +++ b/tests/integration/targets/filter_json_patch/runme.yml @@ -0,0 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- hosts: localhost + roles: + - { role: filter_json_patch } diff --git a/tests/integration/targets/filter_json_patch/tasks/main.yml b/tests/integration/targets/filter_json_patch/tasks/main.yml new file mode 100644 index 0000000000..014133acad --- /dev/null +++ b/tests/integration/targets/filter_json_patch/tasks/main.yml @@ -0,0 +1,137 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Test json_patch + assert: + that: + - > # Insert a new element into an array at a specified index + list_input | + community.general.json_patch("add", "/1", {"baz": "qux"}) + == + [{"foo": {"one": 1}}, {"baz": "qux"}, {"bar": {"two": 2}}] + - > # Insert a new key into a dictionary + dict_input | + community.general.json_patch("add", "/bar/baz", "qux") + == + {"foo": {"one": 1}, "bar": {"baz": "qux", "two": 2}} + - > # Input is a string + '{ "foo": { "one": 1 }, "bar": { "two": 2 } }' | + community.general.json_patch("add", "/bar/baz", "qux") + == + {"foo": {"one": 1}, "bar": {"baz": "qux", "two": 2}} + - > # Existing key is replaced + dict_input | + community.general.json_patch("add", "/bar", "qux") + == + {"foo": {"one": 1}, "bar": "qux"} + - > # Escaping tilde as ~0 and slash as ~1 in the path + {} | + community.general.json_patch("add", "/~0~1", "qux") + == + {"~/": "qux"} + - > # Add at the end of the array + [1, 2, 3] | + community.general.json_patch("add", "/-", 4) + == + [1, 2, 3, 4] + - > # Remove a key + dict_input | + community.general.json_patch("remove", "/bar") + == + {"foo": {"one": 1} } + - > # Replace a value + dict_input | + community.general.json_patch("replace", "/bar", 2) + == + {"foo": {"one": 1}, "bar": 2} + - > # Copy a value + dict_input | + community.general.json_patch("copy", "/baz", from="/bar") + == + {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": { "two": 2 }} + - > # Move a value + dict_input | + community.general.json_patch("move", "/baz", from="/bar") + == + {"foo": {"one": 1}, "baz": { "two": 2 }} + - > # Successful test + dict_input | + community.general.json_patch("test", "/bar/two", 2) | + ternary("OK", "Failed") + == + "OK" + - > # Unuccessful test + dict_input | + community.general.json_patch("test", "/bar/two", 9) | + ternary("OK", "Failed") + == + "Failed" + vars: + list_input: + - foo: { one: 1 } + - bar: { two: 2 } + dict_input: + foo: { one: 1 } + bar: { two: 2 } + +- name: Test json_patch_recipe + assert: + that: + - > # List of operations + input | + community.general.json_patch_recipe(operations) + == + {"bar":[2],"bax":1,"bay":1,"baz":[10,20,30]} + vars: + input: {} + operations: + - op: 'add' + path: '/foo' + value: 1 + - op: 'add' + path: '/bar' + value: [] + - op: 'add' + path: '/bar/-' + value: 2 + - op: 'add' + path: '/bar/0' + value: 1 + - op: 'remove' + path: '/bar/0' + - op: 'move' + from: '/foo' + path: '/baz' + - op: 'copy' + from: '/baz' + path: '/bax' + - op: 'copy' + from: '/baz' + path: '/bay' + - op: 'replace' + path: '/baz' + value: [10, 20, 30] + +- name: Test json_diff + assert: + that: # The order in the result array is not stable, sort by path + - > + input | + community.general.json_diff(target) | + sort(attribute='path') + == + [ + {"op": "add", "path": "/baq", "value": {"baz": 2}}, + {"op": "remove", "path": "/baw/1"}, + {"op": "replace", "path": "/hello", "value": "night"}, + ] + vars: + input: {"foo": 1, "bar":{"baz": 2}, "baw": [1, 2, 3], "hello": "day"} + target: {"foo": 1, "bar": {"baz": 2}, "baw": [1, 3], "baq": {"baz": 2}, "hello": "night"} diff --git a/tests/integration/targets/filter_keep_keys/tasks/tests.yml b/tests/integration/targets/filter_keep_keys/tasks/tests.yml index fa821702f0..bebfc5fcbf 100644 --- a/tests/integration/targets/filter_keep_keys/tasks/tests.yml +++ b/tests/integration/targets/filter_keep_keys/tasks/tests.yml @@ -6,7 +6,7 @@ - name: Debug ansible_version ansible.builtin.debug: var: ansible_version - when: not quite_test | d(true) | bool + when: not (quiet_test | default(true) | bool) tags: ansible_version - name: Tests @@ -19,13 +19,13 @@ fail_msg: | [ERR] result: {{ result | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" + quiet: "{{ quiet_test | default(true) | bool }}" loop: "{{ tests | subelements('group') }}" loop_control: loop_var: i - label: "{{ i.1.mp | d('default') }}: {{ i.1.tt }}" + label: "{{ i.1.mp | default('default') }}: {{ i.1.tt }}" vars: input: "{{ i.0.input }}" target: "{{ i.1.tt }}" - mp: "{{ i.1.mp | d('default') }}" + mp: "{{ i.1.mp | default('default') }}" result: "{{ lookup('template', i.0.template) }}" diff --git a/tests/integration/targets/filter_keep_keys/vars/main/tests.yml b/tests/integration/targets/filter_keep_keys/vars/main/tests.yml index f1abceddda..c480a675d0 100644 --- a/tests/integration/targets/filter_keep_keys/vars/main/tests.yml +++ b/tests/integration/targets/filter_keep_keys/vars/main/tests.yml @@ -11,8 +11,8 @@ tests: - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} result: - - {k0_x0: A0, k1_x1: B0} - - {k0_x0: A1, k1_x1: B1} + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} - template: mp.j2 group: - {mp: equal, tt: [k0_x0, k1_x1], d: Match keys that equal any of the items in the target.} @@ -24,8 +24,8 @@ tests: - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} result: - - {k0_x0: A0, k1_x1: B0} - - {k0_x0: A1, k1_x1: B1} + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} - template: mp.j2 group: - {mp: equal, tt: k0_x0, d: Match keys that equal the target.} @@ -36,5 +36,5 @@ tests: - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} result: - - {k0_x0: A0} - - {k0_x0: A1} + - {k0_x0: A0} + - {k0_x0: A1} diff --git a/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_2-10.yml b/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_2-10.yml index 62896e1b01..1e000c9753 100644 --- a/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_2-10.yml +++ b/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_2-10.yml @@ -16,7 +16,7 @@ {{ my_list|to_nice_yaml|indent(2) }} my_list|difference(result101): {{ my_list|difference(result101)|to_nice_yaml|indent(2) }} - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Merge 2 lists by attribute name. list_merge='keep'. assert assert: that: my_list | difference(result101) | length == 0 @@ -35,7 +35,7 @@ {{ my_list|to_nice_yaml|indent(2) }} my_list|difference(result102): {{ my_list|difference(result102)|to_nice_yaml|indent(2) }} - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Merge 2 lists by attribute name. list_merge='append'. assert assert: that: my_list | difference(result102) | length == 0 @@ -54,7 +54,7 @@ {{ my_list|to_nice_yaml|indent(2) }} my_list|difference(result103): {{ my_list|difference(result103)|to_nice_yaml|indent(2) }} - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Merge 2 lists by attribute name. list_merge='prepend'. assert assert: that: my_list | difference(result103) | length == 0 @@ -73,7 +73,7 @@ {{ my_list|to_nice_yaml|indent(2) }} my_list|difference(result104): {{ my_list|difference(result104)|to_nice_yaml|indent(2) }} - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Merge 2 lists by attribute name. list_merge='append_rp'. assert assert: that: my_list | difference(result104) | length == 0 @@ -92,7 +92,7 @@ {{ my_list|to_nice_yaml|indent(2) }} my_list|difference(result105): {{ my_list|difference(result105)|to_nice_yaml|indent(2) }} - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Merge 2 lists by attribute name. list_merge='prepend_rp'. assert assert: that: my_list | difference(result105) | length == 0 @@ -115,7 +115,7 @@ {{ my_list|to_nice_yaml|indent(2) }} my_list|difference(result200): {{ my_list|difference(result200)|to_nice_yaml|indent(2) }} - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Merge by name. recursive=True list_merge='append_rp'. assert assert: that: my_list | difference(result200) | length == 0 @@ -136,7 +136,7 @@ {{ my_list|to_nice_yaml|indent(2) }} my_list|difference(result201): {{ my_list|difference(result201)|to_nice_yaml|indent(2) }} - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Merge by name. recursive=False list_merge='append_rp'. assert assert: that: my_list | difference(result201) | length == 0 diff --git a/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_default.yml b/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_default.yml index 93917c97cc..b0484af2f2 100644 --- a/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_default.yml +++ b/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_default.yml @@ -6,7 +6,7 @@ - name: Debug ansible_version debug: var: ansible_version - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool tags: t0 - name: 1. Test lists merged by attribute name @@ -14,7 +14,7 @@ - name: Test lists merged by attribute name debug debug: msg: "{{ list1 | community.general.lists_mergeby(list2, 'name') }}" - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Test lists merged by attribute name assert assert: that: @@ -27,7 +27,7 @@ - name: Test list1 empty debug debug: msg: "{{ [] | community.general.lists_mergeby(list2, 'name') }}" - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Test list1 empty assert assert: that: @@ -40,7 +40,7 @@ - name: Test all lists empty debug debug: msg: "{{ [] | community.general.lists_mergeby([], 'name') }}" - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Test all lists empty assert assert: that: @@ -58,7 +58,7 @@ - name: First argument must be list debug debug: var: my_list - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: First argument must be list assert assert: that: @@ -76,7 +76,7 @@ - name: Second argument must be list set debug debug: var: my_list - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Second argument must be list set assert assert: that: @@ -94,7 +94,7 @@ - name: First arguments after the lists must be string debug debug: var: my_list - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: First arguments after the lists must be string assert assert: that: @@ -112,7 +112,7 @@ - name: Elements of list must be dictionaries debug debug: var: my_list - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Elements of list must be dictionaries assert assert: that: @@ -128,7 +128,7 @@ - name: Merge 3 lists by attribute name. 1 list in params. debug debug: var: my_list - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Merge 3 lists by attribute name. 1 list in params. assert assert: that: my_list | difference(result1) | length == 0 @@ -142,7 +142,7 @@ - name: Merge 3 lists by attribute name. No list in the params. debug debug: var: my_list - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Merge 3 lists by attribute name. No list in the params. asset assert: that: my_list | difference(result1) | length == 0 @@ -162,7 +162,7 @@ {{ my_list|to_nice_yaml|indent(2) }} my_list|difference(result100): {{ my_list|difference(result100)|to_nice_yaml|indent(2) }} - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Merge 2 lists by attribute name. list_merge='replace'. assert assert: that: my_list | difference(result100) | length == 0 diff --git a/tests/integration/targets/filter_lists_mergeby/tasks/main.yml b/tests/integration/targets/filter_lists_mergeby/tasks/main.yml index d0bda368cd..f599c2d93e 100644 --- a/tests/integration/targets/filter_lists_mergeby/tasks/main.yml +++ b/tests/integration/targets/filter_lists_mergeby/tasks/main.yml @@ -6,6 +6,5 @@ - name: Test list_merge default options import_tasks: lists_mergeby_default.yml -- name: Test list_merge non-default options in Ansible 2.10 and higher +- name: Test list_merge non-default options import_tasks: lists_mergeby_2-10.yml - when: ansible_version.full is version('2.10', '>=') diff --git a/tests/integration/targets/filter_random_mac/tasks/main.yml b/tests/integration/targets/filter_random_mac/tasks/main.yml index 230f9776d5..583ef498a3 100644 --- a/tests/integration/targets/filter_random_mac/tasks/main.yml +++ b/tests/integration/targets/filter_random_mac/tasks/main.yml @@ -19,7 +19,7 @@ assert: that: - _bad_random_mac_filter is failed - - "_bad_random_mac_filter.msg is match('Invalid value type (.*int.*) for random_mac .*')" + - "_bad_random_mac_filter.msg is search('Invalid value type (.*int.*) for random_mac .*')" - name: Test random_mac filter bad argument value debug: @@ -31,7 +31,7 @@ assert: that: - _bad_random_mac_filter is failed - - "_bad_random_mac_filter.msg is match('Invalid value (.*) for random_mac: .* not hexa byte')" + - "_bad_random_mac_filter.msg is search('Invalid value (.*) for random_mac: .* not hexa byte')" - name: Test random_mac filter prefix too big debug: @@ -43,17 +43,17 @@ assert: that: - _bad_random_mac_filter is failed - - "_bad_random_mac_filter.msg is match('Invalid value (.*) for random_mac: 5 colon.* separated items max')" + - "_bad_random_mac_filter.msg is search('Invalid value (.*) for random_mac: 5 colon.* separated items max')" -- name: Verify random_mac filter +- name: Verify random_mac filter assert: that: - - "'00' | community.general.random_mac is match('^00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')" - - "'00:00' | community.general.random_mac is match('^00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')" - - "'00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')" - - "'00:00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')" - - "'00:00:00:00:00' | community.general.random_mac is match('^00:00:00:00:00:[a-f0-9][a-f0-9]$')" - - "'00:00:00' | community.general.random_mac != '00:00:00' | community.general.random_mac" + - "'00' | community.general.random_mac is match('^00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')" + - "'00:00' | community.general.random_mac is match('^00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')" + - "'00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')" + - "'00:00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')" + - "'00:00:00:00:00' | community.general.random_mac is match('^00:00:00:00:00:[a-f0-9][a-f0-9]$')" + - "'00:00:00' | community.general.random_mac != '00:00:00' | community.general.random_mac" - name: Verify random_mac filter with seed assert: diff --git a/tests/integration/targets/filter_remove_keys/tasks/tests.yml b/tests/integration/targets/filter_remove_keys/tasks/tests.yml index fa821702f0..bebfc5fcbf 100644 --- a/tests/integration/targets/filter_remove_keys/tasks/tests.yml +++ b/tests/integration/targets/filter_remove_keys/tasks/tests.yml @@ -6,7 +6,7 @@ - name: Debug ansible_version ansible.builtin.debug: var: ansible_version - when: not quite_test | d(true) | bool + when: not (quiet_test | default(true) | bool) tags: ansible_version - name: Tests @@ -19,13 +19,13 @@ fail_msg: | [ERR] result: {{ result | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" + quiet: "{{ quiet_test | default(true) | bool }}" loop: "{{ tests | subelements('group') }}" loop_control: loop_var: i - label: "{{ i.1.mp | d('default') }}: {{ i.1.tt }}" + label: "{{ i.1.mp | default('default') }}: {{ i.1.tt }}" vars: input: "{{ i.0.input }}" target: "{{ i.1.tt }}" - mp: "{{ i.1.mp | d('default') }}" + mp: "{{ i.1.mp | default('default') }}" result: "{{ lookup('template', i.0.template) }}" diff --git a/tests/integration/targets/filter_remove_keys/vars/main/tests.yml b/tests/integration/targets/filter_remove_keys/vars/main/tests.yml index a4767ea799..45b89ba62d 100644 --- a/tests/integration/targets/filter_remove_keys/vars/main/tests.yml +++ b/tests/integration/targets/filter_remove_keys/vars/main/tests.yml @@ -37,4 +37,4 @@ tests: - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} result: - {k1_x1: B0, k2_x2: [C0], k3_x3: foo} - - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} + - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} diff --git a/tests/integration/targets/filter_replace_keys/tasks/tests.yml b/tests/integration/targets/filter_replace_keys/tasks/tests.yml index fa821702f0..bebfc5fcbf 100644 --- a/tests/integration/targets/filter_replace_keys/tasks/tests.yml +++ b/tests/integration/targets/filter_replace_keys/tasks/tests.yml @@ -6,7 +6,7 @@ - name: Debug ansible_version ansible.builtin.debug: var: ansible_version - when: not quite_test | d(true) | bool + when: not (quiet_test | default(true) | bool) tags: ansible_version - name: Tests @@ -19,13 +19,13 @@ fail_msg: | [ERR] result: {{ result | to_yaml }} - quiet: "{{ quiet_test | d(true) | bool }}" + quiet: "{{ quiet_test | default(true) | bool }}" loop: "{{ tests | subelements('group') }}" loop_control: loop_var: i - label: "{{ i.1.mp | d('default') }}: {{ i.1.tt }}" + label: "{{ i.1.mp | default('default') }}: {{ i.1.tt }}" vars: input: "{{ i.0.input }}" target: "{{ i.1.tt }}" - mp: "{{ i.1.mp | d('default') }}" + mp: "{{ i.1.mp | default('default') }}" result: "{{ lookup('template', i.0.template) }}" diff --git a/tests/integration/targets/filter_replace_keys/vars/main/tests.yml b/tests/integration/targets/filter_replace_keys/vars/main/tests.yml index ca906a770b..a6e04f3b2e 100644 --- a/tests/integration/targets/filter_replace_keys/vars/main/tests.yml +++ b/tests/integration/targets/filter_replace_keys/vars/main/tests.yml @@ -14,8 +14,8 @@ tests: - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} result: - - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} - - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} - template: mp.j2 group: - d: Replace keys that starts with any of the attributes before. @@ -37,8 +37,8 @@ tests: - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} result: - - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} - - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} - template: mp.j2 group: - d: If more keys match the same attribute before the last one will be used. @@ -54,8 +54,8 @@ tests: - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} result: - - X: foo - - X: bar + - X: foo + - X: bar - template: mp.j2 group: - d: If there are more matches for a key the first one will be used. diff --git a/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml b/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml index 37d3abcb71..48a819f62d 100644 --- a/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml +++ b/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml @@ -2,53 +2,60 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -# Substitution converts str to AnsibleUnicode -# ------------------------------------------- +# Substitution converts str to AnsibleUnicode/_AnsibleTaggedStr +# ------------------------------------------------------------- -- name: String. AnsibleUnicode. +- name: String. AnsibleUnicode/_AnsibleTaggedStr. assert: - that: result == dtype - success_msg: '"abc" is {{ dtype }}' - fail_msg: '"abc" is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + that: result in dtype + success_msg: '"abc" is one of {{ dtype }}' + fail_msg: '"abc" is {{ result }}, not one of {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: data: "abc" result: '{{ data | community.general.reveal_ansible_type }}' - dtype: 'AnsibleUnicode' + dtype: + - 'AnsibleUnicode' + - '_AnsibleTaggedStr' -- name: String. AnsibleUnicode alias str. +- name: String. AnsibleUnicode/_AnsibleTaggedStr alias str. assert: - that: result == dtype - success_msg: '"abc" is {{ dtype }}' - fail_msg: '"abc" is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + that: result in dtype + success_msg: '"abc" is one of {{ dtype }}' + fail_msg: '"abc" is {{ result }}, not one of {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"AnsibleUnicode": "str"} + alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str"} data: "abc" result: '{{ data | community.general.reveal_ansible_type(alias) }}' - dtype: 'str' + dtype: + - 'str' -- name: List. All items are AnsibleUnicode. +- name: List. All items are AnsibleUnicode/_AnsibleTaggedStr. assert: - that: result == dtype - success_msg: '["a", "b", "c"] is {{ dtype }}' - fail_msg: '["a", "b", "c"] is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + that: result in dtype + success_msg: '["a", "b", "c"] is one of {{ dtype }}' + fail_msg: '["a", "b", "c"] is {{ result }}, not one of {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: data: ["a", "b", "c"] result: '{{ data | community.general.reveal_ansible_type }}' - dtype: 'list[AnsibleUnicode]' + dtype: + - 'list[AnsibleUnicode]' + - 'list[_AnsibleTaggedStr]' -- name: Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. +- name: Dictionary. All keys and values are AnsibleUnicode/_AnsibleTaggedStr. assert: - that: result == dtype - success_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ dtype }}' - fail_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + that: result in dtype + success_msg: '{"a": "foo", "b": "bar", "c": "baz"} is one of {{ dtype }}' + fail_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ result }}, not one of {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: data: {"a": "foo", "b": "bar", "c": "baz"} result: '{{ data | community.general.reveal_ansible_type }}' - dtype: 'dict[AnsibleUnicode, AnsibleUnicode]' + dtype: + - 'dict[AnsibleUnicode, AnsibleUnicode]' + - 'dict[_AnsibleTaggedStr, _AnsibleTaggedStr]' # No substitution and no alias. Type of strings is str # ---------------------------------------------------- @@ -57,8 +64,8 @@ assert: that: result == dtype success_msg: '"abc" is {{ dtype }}' - fail_msg: '"abc" is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '"abc" is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ "abc" | community.general.reveal_ansible_type }}' dtype: str @@ -67,8 +74,8 @@ assert: that: result == dtype success_msg: '123 is {{ dtype }}' - fail_msg: '123 is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '123 is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ 123 | community.general.reveal_ansible_type }}' dtype: int @@ -77,8 +84,8 @@ assert: that: result == dtype success_msg: '123.45 is {{ dtype }}' - fail_msg: '123.45 is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '123.45 is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ 123.45 | community.general.reveal_ansible_type }}' dtype: float @@ -87,8 +94,8 @@ assert: that: result == dtype success_msg: 'true is {{ dtype }}' - fail_msg: 'true is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: 'true is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ true | community.general.reveal_ansible_type }}' dtype: bool @@ -97,8 +104,8 @@ assert: that: result == dtype success_msg: '["a", "b", "c"] is {{ dtype }}' - fail_msg: '["a", "b", "c"] is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '["a", "b", "c"] is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' dtype: list[str] @@ -107,8 +114,8 @@ assert: that: result == dtype success_msg: '[{"a": 1}, {"b": 2}] is {{ dtype }}' - fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' dtype: list[dict] @@ -117,8 +124,8 @@ assert: that: result == dtype success_msg: '{"a": 1} is {{ dtype }}' - fail_msg: '{"a": 1} is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '{"a": 1} is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' dtype: dict[str, int] @@ -127,23 +134,23 @@ assert: that: result == dtype success_msg: '{"a": 1, "b": 2} is {{ dtype }}' - fail_msg: '{"a": 1, "b": 2} is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '{"a": 1, "b": 2} is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' dtype: dict[str, int] -# Type of strings is AnsibleUnicode or str -# ---------------------------------------- +# Type of strings is AnsibleUnicode/_AnsibleTaggedStr or str +# ---------------------------------------------------------- - name: Dictionary. The keys are integers or strings. All values are strings. assert: that: result == dtype success_msg: 'data is {{ dtype }}' - fail_msg: 'data is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: 'data is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"AnsibleUnicode": "str"} + alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str", "_AnsibleTaggedInt": "int"} data: {1: 'a', 'b': 'b'} result: '{{ data | community.general.reveal_ansible_type(alias) }}' dtype: dict[int|str, str] @@ -152,10 +159,10 @@ assert: that: result == dtype success_msg: 'data is {{ dtype }}' - fail_msg: 'data is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: 'data is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"AnsibleUnicode": "str"} + alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str", "_AnsibleTaggedInt": "int"} data: {1: 'a', 2: 'b'} result: '{{ data | community.general.reveal_ansible_type(alias) }}' dtype: dict[int, str] @@ -164,11 +171,11 @@ assert: that: result == dtype success_msg: 'data is {{ dtype }}' - fail_msg: 'data is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: 'data is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"AnsibleUnicode": "str"} - data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} + alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str", "_AnsibleTaggedInt": "int", "_AnsibleTaggedFloat": "float"} + data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': true, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} result: '{{ data | community.general.reveal_ansible_type(alias) }}' dtype: dict[str, bool|dict|float|int|list|str] @@ -176,10 +183,10 @@ assert: that: result == dtype success_msg: 'data is {{ dtype }}' - fail_msg: 'data is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: 'data is {{ result }}, not {{ dtype }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"AnsibleUnicode": "str"} - data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] + alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str", "_AnsibleTaggedInt": "int", "_AnsibleTaggedFloat": "float"} + data: [1, 2, 1.1, 'abc', true, ['x', 'y', 'z'], {'x': 1, 'y': 2}] result: '{{ data | community.general.reveal_ansible_type(alias) }}' dtype: list[bool|dict|float|int|list|str] diff --git a/tests/integration/targets/filter_to_prettytable/aliases b/tests/integration/targets/filter_to_prettytable/aliases new file mode 100644 index 0000000000..afda346c4e --- /dev/null +++ b/tests/integration/targets/filter_to_prettytable/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/1 diff --git a/tests/integration/targets/filter_to_prettytable/tasks/main.yml b/tests/integration/targets/filter_to_prettytable/tasks/main.yml new file mode 100644 index 0000000000..95d4118e27 --- /dev/null +++ b/tests/integration/targets/filter_to_prettytable/tasks/main.yml @@ -0,0 +1,658 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) 2025, Timur Gadiev (tgadiev@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Install required libs + pip: + name: prettytable + state: present + delegate_to: localhost + become: false + +- name: Set test data + set_fact: + test_data: + - name: Alice + age: 25 + role: admin + - name: Bob + age: 30 + role: user + data_for_align: + - date: 2023-01-01 + description: Office supplies + amount: 123.45 + +# Test basic functionality +- name: Test basic table creation + set_fact: + basic_table: '{{ test_data | community.general.to_prettytable }}' + expected_basic_table: |- + +-------+-----+-------+ + | name | age | role | + +-------+-----+-------+ + | Alice | 25 | admin | + | Bob | 30 | user | + +-------+-----+-------+ + +- name: Verify basic table output + assert: + that: + - basic_table == expected_basic_table + +# Test column ordering +- name: Test column ordering + set_fact: + ordered_table: "{{ test_data | community.general.to_prettytable(column_order=['role', 'name', 'age']) }}" + expected_ordered_table: |- + +-------+-------+-----+ + | role | name | age | + +-------+-------+-----+ + | admin | Alice | 25 | + | user | Bob | 30 | + +-------+-------+-----+ + +- name: Verify ordered table output + assert: + that: + - ordered_table == expected_ordered_table + +# Test selective column ordering (subset of keys) +- name: Test selective column ordering + set_fact: + selective_ordered_table: "{{ test_data | community.general.to_prettytable(column_order=['name', 'role']) }}" + expected_selective_table: |- + +-------+-------+ + | name | role | + +-------+-------+ + | Alice | admin | + | Bob | user | + +-------+-------+ + +- name: Verify selective column ordering + assert: + that: + - selective_ordered_table == expected_selective_table + +# Test custom headers +- name: Test custom headers + set_fact: + headers_table: "{{ test_data | community.general.to_prettytable(header_names=['User Name', 'User Age', 'User Role']) }}" + expected_headers_table: |- + +-----------+----------+-----------+ + | User Name | User Age | User Role | + +-----------+----------+-----------+ + | Alice | 25 | admin | + | Bob | 30 | user | + +-----------+----------+-----------+ + +- name: Verify custom headers output + assert: + that: + - headers_table == expected_headers_table + +# Test selective column ordering with custom headers (subset of keys) +- name: Test selective column ordering with custom headers + set_fact: + selective_ordered_headers_table: "{{ test_data | community.general.to_prettytable(column_order=['name', 'role'], header_names=['User Name', 'User Role']) }}" + expected_selective_headers_table: |- + +-----------+-----------+ + | User Name | User Role | + +-----------+-----------+ + | Alice | admin | + | Bob | user | + +-----------+-----------+ + +- name: Verify selective column ordering with custom headers + assert: + that: + - selective_ordered_headers_table == expected_selective_headers_table + +# Test alignments +- name: Test column alignments + set_fact: + aligned_table: "{{ data_for_align | community.general.to_prettytable(column_alignments={'amount': 'right', 'description': 'left', 'date': 'center'}) }}" + expected_aligned_table: |- + +------------+-----------------+--------+ + | date | description | amount | + +------------+-----------------+--------+ + | 2023-01-01 | Office supplies | 123.45 | + +------------+-----------------+--------+ + +- name: Verify aligned table output + assert: + that: + - aligned_table == expected_aligned_table + +# Test combined options +- name: Test combined options + set_fact: + combined_table: "{{ test_data | community.general.to_prettytable( + column_order=['role', 'name', 'age'], + header_names=['Position', 'Full Name', 'Years'], + column_alignments={'role': 'left', 'name': 'center', 'age': 'right'}) }}" + expected_combined_table: |- + +----------+-----------+-------+ + | Position | Full Name | Years | + +----------+-----------+-------+ + | admin | Alice | 25 | + | user | Bob | 30 | + +----------+-----------+-------+ + +- name: Verify combined table output + assert: + that: + - combined_table == expected_combined_table + +# Test empty data +- name: Test empty data list with no parameters + set_fact: + empty_table: "{{ [] | community.general.to_prettytable }}" + expected_empty_table: |- + ++ + ++ + +- name: Verify empty table output + assert: + that: + - empty_table == expected_empty_table + +# Test empty data with column_order +- name: Test empty data list with column_order + set_fact: + empty_with_columns: "{{ [] | community.general.to_prettytable(column_order=['name', 'age', 'role']) }}" + expected_empty_with_columns: |- + +------+-----+------+ + | name | age | role | + +------+-----+------+ + +------+-----+------+ + +- name: Verify empty table with column_order + assert: + that: + - empty_with_columns == expected_empty_with_columns + +# Test empty data with header_names +- name: Test empty data list with header_names + set_fact: + empty_with_headers: "{{ [] | community.general.to_prettytable(header_names=['User Name', 'User Age', 'User Role']) }}" + expected_empty_with_headers: |- + +-----------+----------+-----------+ + | User Name | User Age | User Role | + +-----------+----------+-----------+ + +-----------+----------+-----------+ + +- name: Verify empty table with header_names + assert: + that: + - empty_with_headers == expected_empty_with_headers + +# Test empty data with combined parameters +- name: Test empty data with combined parameters + set_fact: + empty_combined: "{{ [] | community.general.to_prettytable( + column_order=['role', 'name', 'age'], + header_names=['Position', 'Full Name', 'Years'], + column_alignments={'role': 'left', 'name': 'center', 'age': 'right'}) }}" + expected_empty_combined: |- + +----------+-----------+-------+ + | Position | Full Name | Years | + +----------+-----------+-------+ + +----------+-----------+-------+ + +- name: Verify empty table with combined parameters + assert: + that: + - empty_combined == expected_empty_combined + +# Test validation with empty data +- name: Test empty data with non-list column_order (expect failure) + block: + - set_fact: + invalid_table: "{{ [] | community.general.to_prettytable(column_order=123) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for empty data with invalid column_order + assert: + that: + - failure_result is failed + - > + "Expected a list of column names, got a int" in failure_result.msg + +- name: Test empty data with non-list header_names (expect failure) + block: + - set_fact: + invalid_table: "{{ [] | community.general.to_prettytable(header_names='invalid_headers') }}" + register: failure_result + ignore_errors: true + - name: Verify error message for empty data with invalid header_names + assert: + that: + - failure_result is failed + - > + "Expected a list of header names, got a string" in failure_result.msg + +- name: Test empty data with non-dictionary column_alignments (expect failure) + block: + - set_fact: + invalid_table: "{{ [] | community.general.to_prettytable(column_alignments='invalid') }}" + register: failure_result + ignore_errors: true + - name: Verify error message for empty data with invalid column_alignments + assert: + that: + - failure_result is failed + - > + "Expected a dictionary for column_alignments, got a string" in failure_result.msg + +- name: Test empty data with non-string values in column_alignments (expect failure) + block: + - set_fact: + invalid_table: "{{ [] | community.general.to_prettytable(column_alignments={'name': 123}) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for empty data with non-string values in column_alignments + assert: + that: + - failure_result is failed + - > + "Expected a string for column_alignments value, got a int" in failure_result.msg + +- name: Test empty data with invalid alignment value in column_alignments (expect failure) + block: + - set_fact: + invalid_table: "{{ [] | community.general.to_prettytable(column_alignments={'name': 'invalid'}) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for empty data with invalid alignment value + assert: + that: + - failure_result is failed + - > + "Invalid alignment 'invalid' in 'column_alignments'" in failure_result.msg + - > + "Valid alignments are" in failure_result.msg + +- name: Test empty data with mismatched column_order and header_names (expect failure) + block: + - set_fact: + invalid_table: "{{ [] | community.general.to_prettytable(column_order=['a', 'b', 'c'], header_names=['X', 'Y']) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for empty data with mismatched lengths + assert: + that: + - failure_result is failed + - > + "'column_order' and 'header_names' must have the same number of elements" in failure_result.msg + +# Test error conditions +- name: Test non-list input (expect failure) + block: + - set_fact: + invalid_table: "{{ 'not_a_list' | community.general.to_prettytable }}" + register: failure_result + ignore_errors: true + - name: Verify error message for non-list input + assert: + that: + - failure_result is failed + - > + "Expected a list of dictionaries, got a string" in failure_result.msg + +- name: Test list with non-dictionary items (expect failure) + block: + - set_fact: + invalid_table: "{{ ['not_a_dict', 'also_not_a_dict'] | community.general.to_prettytable }}" + register: failure_result + ignore_errors: true + - name: Verify error message for non-dictionary items + assert: + that: + - failure_result is failed + - > + "Expected all items in the list to be dictionaries, got a string" in failure_result.msg + +- name: Test non-list column_order (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(column_order=123) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for non-list column_order + assert: + that: + - failure_result is failed + - > + "Expected a list of column names, got a int" in failure_result.msg + +- name: Test non-list header_names (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(header_names='invalid_headers') }}" + register: failure_result + ignore_errors: true + - name: Verify error message for non-list header_names + assert: + that: + - failure_result is failed + - > + "Expected a list of header names, got a string" in failure_result.msg + +- name: Test unknown parameters (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(unknown_param='value') }}" + register: failure_result + ignore_errors: true + - name: Verify error message for unknown parameters + assert: + that: + - failure_result is failed + - > + "Unknown parameter(s) for to_prettytable filter: unknown_param" in failure_result.msg + +- name: Test both positional args and column_order (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable('role', 'name', column_order=['name', 'age', 'role']) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for using both positional args and column_order + assert: + that: + - failure_result is failed + - > + "Cannot use both positional arguments and the 'column_order' keyword argument" in failure_result.msg + +- name: Test non-string values in positional args (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable('name', 123, 'role') }}" + register: failure_result + ignore_errors: true + - name: Verify error message for non-string values in positional args + assert: + that: + - failure_result is failed + - > + "Expected a string for column name, got a int" in failure_result.msg + +- name: Test non-string values in column_order (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(column_order=['name', 123, 'role']) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for non-string values in column_order + assert: + that: + - failure_result is failed + - > + "Expected a string for column name, got a int" in failure_result.msg + +- name: Test non-string values in header_names (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(header_names=['User Name', 456, 'User Role']) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for non-string values in header_names + assert: + that: + - failure_result is failed + - > + "Expected a string for header name, got a int" in failure_result.msg + +- name: Test mismatched sizes of column_order and header_names (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(column_order=['name', 'age', 'role'], header_names=['User Name', 'User Age']) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for mismatched sizes + assert: + that: + - failure_result is failed + - > + "'column_order' and 'header_names' must have the same number of elements" in failure_result.msg + +- name: Test column_order with more elements than available fields (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(column_order=['name', 'age', 'role', 'extra_field', 'another_extra']) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for column_order with too many elements + assert: + that: + - failure_result is failed + - > + "'column_order' has more elements" in failure_result.msg + +- name: Test header_names with more elements than available fields (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(header_names=['User Name', 'User Age', 'User Role', 'Extra Field', 'Another Extra']) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for header_names with too many elements + assert: + that: + - failure_result is failed + - > + "'header_names' has more elements" in failure_result.msg + +- name: Test column_alignments with more elements than available fields (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(column_alignments={'name': 'center', 'age': 'right', 'role': 'left', 'extra': 'center', 'another': 'left'}) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for column_alignments with too many elements + assert: + that: + - failure_result is failed + - > + "'column_alignments' has more elements" in failure_result.msg + +- name: Test non-dictionary column_alignments (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(column_alignments='invalid') }}" + register: failure_result + ignore_errors: true + - name: Verify error message for non-dictionary column_alignments + assert: + that: + - failure_result is failed + - > + "Expected a dictionary for column_alignments, got a string" in failure_result.msg + +- name: Test non-string keys in column_alignments (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(column_alignments={123: 'center'}) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for non-string keys in column_alignments + assert: + that: + - failure_result is failed + - > + "Expected a string for column_alignments key, got a int" in failure_result.msg + +- name: Test non-string values in column_alignments (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(column_alignments={'name': 123}) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for non-string values in column_alignments + assert: + that: + - failure_result is failed + - > + "Expected a string for column_alignments value, got a int" in failure_result.msg + +- name: Test invalid alignment value in column_alignments (expect failure) + block: + - set_fact: + invalid_table: "{{ test_data | community.general.to_prettytable(column_alignments={'name': 'invalid'}) }}" + register: failure_result + ignore_errors: true + - name: Verify error message for invalid alignment value in column_alignments + assert: + that: + - failure_result is failed + - > + "Invalid alignment 'invalid' in 'column_alignments'" in failure_result.msg + - > + "Valid alignments are" in failure_result.msg + +# Test using explicit python script to create dictionary with mixed key types +- name: Create test data with numeric keys + set_fact: + mixed_key_data: + - name: Alice + role: admin + 1: ID001 + - name: Bob + role: user + 1: ID002 + +- name: Test prettytable with mixed key types + set_fact: + mixed_key_table: "{{ mixed_key_data | community.general.to_prettytable }}" + expected_mixed_key_table: |- + +-------+-------+-------+ + | name | role | 1 | + +-------+-------+-------+ + | Alice | admin | ID001 | + | Bob | user | ID002 | + +-------+-------+-------+ + +- name: Verify mixed key types were handled correctly + assert: + that: + - mixed_key_table == expected_mixed_key_table + +# Test column ordering with numeric keys +- name: Test column ordering with numeric keys + set_fact: + mixed_ordered_table: "{{ mixed_key_data | community.general.to_prettytable(column_order=['1', 'name', 'role']) }}" + expected_ordered_numeric_table: |- + +-------+-------+-------+ + | 1 | name | role | + +-------+-------+-------+ + | ID001 | Alice | admin | + | ID002 | Bob | user | + +-------+-------+-------+ + +- name: Verify column ordering with numeric keys + assert: + that: + - mixed_ordered_table == expected_ordered_numeric_table + +# Test custom headers with numeric keys +- name: Test custom headers with numeric keys + set_fact: + mixed_headers_table: "{{ mixed_key_data | community.general.to_prettytable(header_names=['Name', 'Role', 'ID']) }}" + expected_headers_numeric_table: |- + +-------+-------+-------+ + | Name | Role | ID | + +-------+-------+-------+ + | Alice | admin | ID001 | + | Bob | user | ID002 | + +-------+-------+-------+ + +- name: Verify custom headers with numeric keys + assert: + that: + - mixed_headers_table == expected_headers_numeric_table + +# Test column alignments with numeric keys +- name: Test column alignments with numeric keys + set_fact: + mixed_aligned_table: "{{ mixed_key_data | community.general.to_prettytable(column_alignments={'1': 'right', 'name': 'left', 'role': 'center'}) }}" + expected_aligned_numeric_table: |- + +-------+-------+-------+ + | name | role | 1 | + +-------+-------+-------+ + | Alice | admin | ID001 | + | Bob | user | ID002 | + +-------+-------+-------+ + +- name: Verify column alignments with numeric keys + assert: + that: + - mixed_aligned_table == expected_aligned_numeric_table + +# Test with boolean-like string keys +- name: Create test data with boolean-like string keys + set_fact: + boolean_data: + - name: Alice + role: admin + true: 'Yes' + false: 'No' + - name: Bob + role: user + true: 'No' + false: 'Yes' + +- name: Test prettytable with boolean-like string keys + set_fact: + bool_table: "{{ boolean_data | community.general.to_prettytable }}" + expected_bool_table: |- + +-------+-------+------+-------+ + | name | role | True | False | + +-------+-------+------+-------+ + | Alice | admin | Yes | No | + | Bob | user | No | Yes | + +-------+-------+------+-------+ + +- name: Verify boolean-like keys were handled correctly + assert: + that: + - bool_table == expected_bool_table + +# Test that column_order with capitalized boolean names works via case-insensitive matching +- name: Test column ordering with capitalized boolean names + set_fact: + bool_ordered_table: "{{ boolean_data | community.general.to_prettytable(column_order=['True', 'False', 'name', 'role']) }}" + expected_bool_ordered_table: |- + +------+-------+-------+-------+ + | True | False | name | role | + +------+-------+-------+-------+ + | Yes | No | Alice | admin | + | No | Yes | Bob | user | + +------+-------+-------+-------+ + +- name: Verify that 'True' in column_order works with 'true' keys + assert: + that: + - bool_ordered_table == expected_bool_ordered_table + +# Test column alignments with boolean-like string keys +- name: Test column alignments with boolean-like string keys + set_fact: + bool_aligned_table: "{{ boolean_data | community.general.to_prettytable(column_alignments={'true': 'right', 'false': 'center', 'name': 'left'}) }}" + expected_bool_aligned_table: |- + +-------+-------+------+-------+ + | name | role | True | False | + +-------+-------+------+-------+ + | Alice | admin | Yes | No | + | Bob | user | No | Yes | + +-------+-------+------+-------+ + +- name: Verify column alignments with boolean-like string keys + assert: + that: + - bool_aligned_table == expected_bool_aligned_table diff --git a/tests/integration/targets/filter_version_sort/tasks/main.yml b/tests/integration/targets/filter_version_sort/tasks/main.yml index 08985d1bae..e7a7e3757c 100644 --- a/tests/integration/targets/filter_version_sort/tasks/main.yml +++ b/tests/integration/targets/filter_version_sort/tasks/main.yml @@ -8,7 +8,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: validate that versions are properly sorted in a stable way +- name: validate that versions are properly sorted in a stable way assert: that: - "['a-1.9.rpm', 'a-1.10-1.rpm', 'a-1.09.rpm', 'b-1.01.rpm', 'a-2.1-0.rpm', 'a-1.10-0.rpm'] | community.general.version_sort == ['a-1.9.rpm', 'a-1.09.rpm', 'a-1.10-0.rpm', 'a-1.10-1.rpm', 'a-2.1-0.rpm', 'b-1.01.rpm']" diff --git a/tests/integration/targets/flatpak/tasks/main.yml b/tests/integration/targets/flatpak/tasks/main.yml index deaf354e8a..e05e2a168e 100644 --- a/tests/integration/targets/flatpak/tasks/main.yml +++ b/tests/integration/targets/flatpak/tasks/main.yml @@ -11,53 +11,53 @@ - block: - - import_tasks: setup.yml - become: true + - import_tasks: setup.yml + become: true - # executable override + # executable override - - name: Test executable override - flatpak: - name: com.dummy.App1 - remote: dummy-remote - state: present - executable: nothing-that-exists - ignore_errors: true - register: executable_override_result + - name: Test executable override + flatpak: + name: com.dummy.App1 + remote: dummy-remote + state: present + executable: nothing-that-exists + ignore_errors: true + register: executable_override_result - - name: Verify executable override test result - assert: - that: - - executable_override_result is failed - - executable_override_result is not changed - msg: "Specifying non-existing executable shall fail module execution" + - name: Verify executable override test result + assert: + that: + - executable_override_result is failed + - executable_override_result is not changed + msg: "Specifying non-existing executable shall fail module execution" - - import_tasks: check_mode.yml - become: false + - import_tasks: check_mode.yml + become: false - - import_tasks: test.yml - become: false - vars: - method: user + - import_tasks: test.yml + become: false + vars: + method: user - - import_tasks: test.yml - become: true - vars: - method: system + - import_tasks: test.yml + become: true + vars: + method: system always: - - name: Check HTTP server status - async_status: - jid: "{{ webserver_status.ansible_job_id }}" - ignore_errors: true + - name: Check HTTP server status + async_status: + jid: "{{ webserver_status.ansible_job_id }}" + ignore_errors: true - - name: List processes - command: ps aux + - name: List processes + command: ps aux - - name: Stop HTTP server - command: >- - pkill -f -- '{{ remote_tmp_dir }}/serve.py' + - name: Stop HTTP server + command: >- + pkill -f -- '{{ remote_tmp_dir }}/serve.py' when: | ansible_distribution == 'Fedora' or diff --git a/tests/integration/targets/flatpak/tasks/setup.yml b/tests/integration/targets/flatpak/tasks/setup.yml index 4dfdd68cb9..041c736624 100644 --- a/tests/integration/targets/flatpak/tasks/setup.yml +++ b/tests/integration/targets/flatpak/tasks/setup.yml @@ -11,17 +11,17 @@ when: ansible_distribution == 'Fedora' - block: - - name: Activate flatpak ppa on Ubuntu - apt_repository: - repo: ppa:alexlarsson/flatpak - state: present - mode: '0644' - when: ansible_lsb.major_release | int < 18 + - name: Activate flatpak ppa on Ubuntu + apt_repository: + repo: ppa:alexlarsson/flatpak + state: present + mode: '0644' + when: ansible_lsb.major_release | int < 18 - - name: Install flatpak package on Ubuntu - apt: - name: flatpak - state: present + - name: Install flatpak package on Ubuntu + apt: + name: flatpak + state: present when: ansible_distribution == 'Ubuntu' diff --git a/tests/integration/targets/flatpak/tasks/test.yml b/tests/integration/targets/flatpak/tasks/test.yml index 658f7b1168..1c580b6fbf 100644 --- a/tests/integration/targets/flatpak/tasks/test.yml +++ b/tests/integration/targets/flatpak/tasks/test.yml @@ -164,25 +164,25 @@ - when: url_removal_result is not failed block: - - name: Verify removal test result - {{ method }} - assert: - that: - - url_removal_result is changed - msg: "state=absent with url as name shall remove flatpak when present" + - name: Verify removal test result - {{ method }} + assert: + that: + - url_removal_result is changed + msg: "state=absent with url as name shall remove flatpak when present" - - name: Test idempotency of removal with url - {{ method }} - flatpak: - name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref - state: absent - method: "{{ method }}" - no_dependencies: true - register: double_url_removal_result + - name: Test idempotency of removal with url - {{ method }} + flatpak: + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + state: absent + method: "{{ method }}" + no_dependencies: true + register: double_url_removal_result - - name: Verify idempotency of removal with url test result - {{ method }} - assert: - that: - - double_url_removal_result is not changed - msg: "state=absent with url as name shall not do anything when flatpak is not present" + - name: Verify idempotency of removal with url test result - {{ method }} + assert: + that: + - double_url_removal_result is not changed + msg: "state=absent with url as name shall not do anything when flatpak is not present" - name: Make sure flatpak is really gone - {{ method }} flatpak: diff --git a/tests/integration/targets/flatpak_remote/tasks/main.yml b/tests/integration/targets/flatpak_remote/tasks/main.yml index 1c50912328..951ab5aefb 100644 --- a/tests/integration/targets/flatpak_remote/tasks/main.yml +++ b/tests/integration/targets/flatpak_remote/tasks/main.yml @@ -11,39 +11,39 @@ - block: - - import_tasks: setup.yml - become: true + - import_tasks: setup.yml + become: true - # executable override + # executable override - - name: Test executable override - flatpak_remote: - name: irrelevant - remote: irrelevant - state: present - executable: nothing-that-exists - ignore_errors: true - register: executable_override_result + - name: Test executable override + flatpak_remote: + name: irrelevant + remote: irrelevant + state: present + executable: nothing-that-exists + ignore_errors: true + register: executable_override_result - - name: Verify executable override test result - assert: - that: - - executable_override_result is failed - - executable_override_result is not changed - msg: "Specifying non-existing executable shall fail module execution" + - name: Verify executable override test result + assert: + that: + - executable_override_result is failed + - executable_override_result is not changed + msg: "Specifying non-existing executable shall fail module execution" - - import_tasks: check_mode.yml - become: false + - import_tasks: check_mode.yml + become: false - - import_tasks: test.yml - become: false - vars: - method: user + - import_tasks: test.yml + become: false + vars: + method: user - - import_tasks: test.yml - become: true - vars: - method: system + - import_tasks: test.yml + become: true + vars: + method: system when: | ansible_distribution == 'Fedora' or diff --git a/tests/integration/targets/flatpak_remote/tasks/setup.yml b/tests/integration/targets/flatpak_remote/tasks/setup.yml index 55a14c9724..9fbf4cbe15 100644 --- a/tests/integration/targets/flatpak_remote/tasks/setup.yml +++ b/tests/integration/targets/flatpak_remote/tasks/setup.yml @@ -9,16 +9,16 @@ state: present when: ansible_distribution == 'Fedora' - block: - - name: Activate flatpak ppa on Ubuntu versions older than 18.04/bionic - apt_repository: - repo: ppa:alexlarsson/flatpak - state: present - mode: '0644' - when: ansible_lsb.major_release | int < 18 - - name: Install flatpak package on Ubuntu - apt: - name: flatpak - state: present + - name: Activate flatpak ppa on Ubuntu versions older than 18.04/bionic + apt_repository: + repo: ppa:alexlarsson/flatpak + state: present + mode: '0644' + when: ansible_lsb.major_release | int < 18 + - name: Install flatpak package on Ubuntu + apt: + name: flatpak + state: present when: ansible_distribution == 'Ubuntu' - name: Install flatpak remote for testing check mode flatpak_remote: diff --git a/tests/integration/targets/gandi_livedns/defaults/main.yml b/tests/integration/targets/gandi_livedns/defaults/main.yml index ec1808d8b5..7acd5c0cab 100644 --- a/tests/integration/targets/gandi_livedns/defaults/main.yml +++ b/tests/integration/targets/gandi_livedns/defaults/main.yml @@ -6,32 +6,32 @@ gandi_livedns_domain_name: "ansible-tests.org" gandi_livedns_record_items: -# Single A record -- record: test-www - type: A - values: + # Single A record + - record: test-www + type: A + values: - 10.10.10.10 - ttl: 400 - update_values: + ttl: 400 + update_values: - 10.10.10.11 - update_ttl: 800 + update_ttl: 800 -# Multiple A records -- record: test-www-multiple - type: A - ttl: 3600 - values: + # Multiple A records + - record: test-www-multiple + type: A + ttl: 3600 + values: - 10.10.11.10 - 10.10.11.10 - update_values: + update_values: - 10.10.11.11 - 10.10.11.13 -# CNAME -- record: test-cname - type: CNAME - ttl: 10800 - values: + # CNAME + - record: test-cname + type: CNAME + ttl: 10800 + values: - test-www2 - update_values: + update_values: - test-www diff --git a/tests/integration/targets/gandi_livedns/tasks/create_record.yml b/tests/integration/targets/gandi_livedns/tasks/create_record.yml index 87056aa865..708fa02715 100644 --- a/tests/integration/targets/gandi_livedns/tasks/create_record.yml +++ b/tests/integration/targets/gandi_livedns/tasks/create_record.yml @@ -15,7 +15,7 @@ - name: verify test absent dns record assert: that: - - result is successful + - result is successful - name: test create a dns record in check mode community.general.gandi_livedns: @@ -30,7 +30,7 @@ - name: verify test create a dns record in check mode assert: that: - - result is changed + - result is changed - name: test create a dns record community.general.gandi_livedns: @@ -44,11 +44,11 @@ - name: verify test create a dns record assert: that: - - result is changed - - result.record['values'] == item['values'] - - result.record.record == item.record - - result.record.type == item.type - - result.record.ttl == item.ttl + - result is changed + - result.record['values'] == item['values'] + - result.record.record == item.record + - result.record.type == item.type + - result.record.ttl == item.ttl - name: test create a dns record idempotence community.general.gandi_livedns: @@ -62,11 +62,11 @@ - name: verify test create a dns record idempotence assert: that: - - result is not changed - - result.record['values'] == item['values'] - - result.record.record == item.record - - result.record.type == item.type - - result.record.ttl == item.ttl + - result is not changed + - result.record['values'] == item['values'] + - result.record.record == item.record + - result.record.type == item.type + - result.record.ttl == item.ttl - name: test create a DNS record with personal access token community.general.gandi_livedns: diff --git a/tests/integration/targets/gandi_livedns/tasks/remove_record.yml b/tests/integration/targets/gandi_livedns/tasks/remove_record.yml index c4b937fd5a..1e46ac32f8 100644 --- a/tests/integration/targets/gandi_livedns/tasks/remove_record.yml +++ b/tests/integration/targets/gandi_livedns/tasks/remove_record.yml @@ -16,7 +16,7 @@ - name: verify test remove a dns record in check mode assert: that: - - result is changed + - result is changed - name: test remove a dns record community.general.gandi_livedns: @@ -30,7 +30,7 @@ - name: verify test remove a dns record assert: that: - - result is changed + - result is changed - name: test remove a dns record idempotence community.general.gandi_livedns: @@ -44,7 +44,7 @@ - name: verify test remove a dns record idempotence assert: that: - - result is not changed + - result is not changed - name: test remove second dns record idempotence community.general.gandi_livedns: @@ -58,4 +58,4 @@ - name: verify test remove a dns record idempotence assert: that: - - result is not changed + - result is not changed diff --git a/tests/integration/targets/gandi_livedns/tasks/update_record.yml b/tests/integration/targets/gandi_livedns/tasks/update_record.yml index 5f19bfa244..1bcd82fb3f 100644 --- a/tests/integration/targets/gandi_livedns/tasks/update_record.yml +++ b/tests/integration/targets/gandi_livedns/tasks/update_record.yml @@ -16,11 +16,11 @@ - name: verify test update in check mode assert: that: - - result is changed - - result.record['values'] == (item.update_values | default(item['values'])) - - result.record.record == item.record - - result.record.type == item.type - - result.record.ttl == (item.update_ttl | default(item.ttl)) + - result is changed + - result.record['values'] == (item.update_values | default(item['values'])) + - result.record.record == item.record + - result.record.type == item.type + - result.record.ttl == (item.update_ttl | default(item.ttl)) - name: test update or add another dns record community.general.gandi_livedns: @@ -34,11 +34,11 @@ - name: verify test update a dns record assert: that: - - result is changed - - result.record['values'] == (item.update_values | default(item['values'])) - - result.record.record == item.record - - result.record.ttl == (item.update_ttl | default(item.ttl)) - - result.record.type == item.type + - result is changed + - result.record['values'] == (item.update_values | default(item['values'])) + - result.record.record == item.record + - result.record.ttl == (item.update_ttl | default(item.ttl)) + - result.record.type == item.type - name: test update or add another dns record idempotence community.general.gandi_livedns: @@ -52,8 +52,8 @@ - name: verify test update a dns record idempotence assert: that: - - result is not changed - - result.record['values'] == (item.update_values | default(item['values'])) - - result.record.record == item.record - - result.record.ttl == (item.update_ttl | default(item.ttl)) - - result.record.type == item.type + - result is not changed + - result.record['values'] == (item.update_values | default(item['values'])) + - result.record.record == item.record + - result.record.ttl == (item.update_ttl | default(item.ttl)) + - result.record.type == item.type diff --git a/tests/integration/targets/gem/tasks/main.yml b/tests/integration/targets/gem/tasks/main.yml index 2d615304f8..0c85e56489 100644 --- a/tests/integration/targets/gem/tasks/main.yml +++ b/tests/integration/targets/gem/tasks/main.yml @@ -13,201 +13,202 @@ - not (ansible_os_family == 'Alpine') # TODO block: - - include_vars: '{{ item }}' - with_first_found: - - files: - - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' - - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml' - - '{{ ansible_os_family }}.yml' - - 'default.yml' - paths: '../vars' + - include_vars: '{{ item }}' + with_first_found: + - files: + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' + - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml' + - '{{ ansible_distribution }}.yml' + - '{{ ansible_os_family }}.yml' + - 'default.yml' + paths: '../vars' - - name: Install dependencies for test - package: - name: "{{ item }}" - state: present - loop: "{{ test_packages }}" - when: ansible_distribution != "MacOSX" + - name: Install dependencies for test + package: + name: "{{ item }}" + state: present + loop: "{{ test_packages }}" + when: ansible_distribution != "MacOSX" - - name: Install a gem - gem: - name: gist - state: present - register: install_gem_result - ignore_errors: true - - # when running as root on Fedora, '--install-dir' is set in the os defaults which is - # incompatible with '--user-install', we ignore this error for this case only - - name: fail if failed to install gem - fail: - msg: "failed to install gem: {{ install_gem_result.msg }}" - when: - - install_gem_result is failed - - not (ansible_user_uid == 0 and "User --install-dir or --user-install but not both" not in install_gem_result.msg) - - - block: - - name: List gems - command: gem list - register: current_gems - - - name: Ensure gem was installed - assert: - that: - - install_gem_result is changed - - current_gems.stdout is search('gist\s+\([0-9.]+\)') - - - name: Remove a gem + - name: Install a gem gem: name: gist - state: absent - register: remove_gem_results + state: present + register: install_gem_result + ignore_errors: true - - name: List gems - command: gem list - register: current_gems + # when running as root on Fedora, '--install-dir' is set in the os defaults which is + # incompatible with '--user-install', we ignore this error for this case only + - name: fail if failed to install gem + fail: + msg: "failed to install gem: {{ install_gem_result.msg }}" + when: + - install_gem_result is failed + - not (ansible_user_uid == 0 and "User --install-dir or --user-install but not both" not in install_gem_result.msg) - - name: Verify gem is not installed + - block: + - name: List gems + command: gem list + register: current_gems + + - name: Ensure gem was installed + assert: + that: + - install_gem_result is changed + - current_gems.stdout is search('gist\s+\([0-9.]+\)') + + - name: Remove a gem + gem: + name: gist + state: absent + register: remove_gem_results + + - name: List gems + command: gem list + register: current_gems + + - name: Verify gem is not installed + assert: + that: + - remove_gem_results is changed + - current_gems.stdout is not search('gist\s+\([0-9.]+\)') + when: not install_gem_result is failed + + # install gem in --no-user-install + - block: + - name: Install a gem with --no-user-install + gem: + name: gist + state: present + user_install: false + register: install_gem_result + + - name: List gems + command: gem list + register: current_gems + + - name: Ensure gem was installed + assert: + that: + - install_gem_result is changed + - current_gems.stdout is search('gist\s+\([0-9.]+\)') + + - name: Remove a gem + gem: + name: gist + state: absent + register: remove_gem_results + + - name: List gems + command: gem list + register: current_gems + + - name: Verify gem is not installed + assert: + that: + - remove_gem_results is changed + - current_gems.stdout is not search('gist\s+\([0-9.]+\)') + when: ansible_user_uid == 0 + + # Check custom gem directory + - name: Install gem in a custom directory with incorrect options + gem: + name: gist + state: present + install_dir: "{{ remote_tmp_dir }}/gems" + ignore_errors: true + register: install_gem_fail_result + + - debug: + var: install_gem_fail_result + tags: debug + + - name: Ensure previous task failed assert: that: - - remove_gem_results is changed - - current_gems.stdout is not search('gist\s+\([0-9.]+\)') - when: not install_gem_result is failed + - install_gem_fail_result is failed + - install_gem_fail_result.msg == 'install_dir requires user_install=false' - # install gem in --no-user-install - - block: - - name: Install a gem with --no-user-install + - name: Install a gem in a custom directory gem: name: gist state: present user_install: false + install_dir: "{{ remote_tmp_dir }}/gems" register: install_gem_result - - name: List gems - command: gem list - register: current_gems + - name: Find gems in custom directory + find: + paths: "{{ remote_tmp_dir }}/gems/gems" + file_type: directory + contains: gist + register: gem_search - - name: Ensure gem was installed + - name: Ensure gem was installed in custom directory assert: that: - install_gem_result is changed - - current_gems.stdout is search('gist\s+\([0-9.]+\)') + - gem_search.files[0].path is search('gist-[0-9.]+') + ignore_errors: true - - name: Remove a gem + - name: Remove a gem in a custom directory gem: name: gist state: absent - register: remove_gem_results + user_install: false + install_dir: "{{ remote_tmp_dir }}/gems" + register: install_gem_result - - name: List gems - command: gem list - register: current_gems + - name: Find gems in custom directory + find: + paths: "{{ remote_tmp_dir }}/gems/gems" + file_type: directory + contains: gist + register: gem_search - - name: Verify gem is not installed + - name: Ensure gem was removed in custom directory assert: that: - - remove_gem_results is changed - - current_gems.stdout is not search('gist\s+\([0-9.]+\)') - when: ansible_user_uid == 0 + - install_gem_result is changed + - gem_search.files | length == 0 - # Check custom gem directory - - name: Install gem in a custom directory with incorrect options - gem: - name: gist - state: present - install_dir: "{{ remote_tmp_dir }}/gems" - ignore_errors: true - register: install_gem_fail_result + # Custom directory for executables (--bindir) + - name: Install gem with custom bindir + gem: + name: gist + state: present + bindir: "{{ remote_tmp_dir }}/custom_bindir" + norc: true + user_install: false # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL + register: install_gem_result - - debug: - var: install_gem_fail_result - tags: debug + - name: Get stats of gem executable + stat: + path: "{{ remote_tmp_dir }}/custom_bindir/gist" + register: gem_bindir_stat - - name: Ensure previous task failed - assert: - that: - - install_gem_fail_result is failed - - install_gem_fail_result.msg == 'install_dir requires user_install=false' + - name: Ensure gem executable was installed in custom directory + assert: + that: + - install_gem_result is changed + - gem_bindir_stat.stat.exists and gem_bindir_stat.stat.isreg - - name: Install a gem in a custom directory - gem: - name: gist - state: present - user_install: false - install_dir: "{{ remote_tmp_dir }}/gems" - register: install_gem_result + - name: Remove gem with custom bindir + gem: + name: gist + state: absent + bindir: "{{ remote_tmp_dir }}/custom_bindir" + norc: true + user_install: false # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL + register: install_gem_result - - name: Find gems in custom directory - find: - paths: "{{ remote_tmp_dir }}/gems/gems" - file_type: directory - contains: gist - register: gem_search + - name: Get stats of gem executable + stat: + path: "{{ remote_tmp_dir }}/custom_bindir/gist" + register: gem_bindir_stat - - name: Ensure gem was installed in custom directory - assert: - that: - - install_gem_result is changed - - gem_search.files[0].path is search('gist-[0-9.]+') - ignore_errors: true - - - name: Remove a gem in a custom directory - gem: - name: gist - state: absent - user_install: false - install_dir: "{{ remote_tmp_dir }}/gems" - register: install_gem_result - - - name: Find gems in custom directory - find: - paths: "{{ remote_tmp_dir }}/gems/gems" - file_type: directory - contains: gist - register: gem_search - - - name: Ensure gem was removed in custom directory - assert: - that: - - install_gem_result is changed - - gem_search.files | length == 0 - - # Custom directory for executables (--bindir) - - name: Install gem with custom bindir - gem: - name: gist - state: present - bindir: "{{ remote_tmp_dir }}/custom_bindir" - norc: true - user_install: false # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL - register: install_gem_result - - - name: Get stats of gem executable - stat: - path: "{{ remote_tmp_dir }}/custom_bindir/gist" - register: gem_bindir_stat - - - name: Ensure gem executable was installed in custom directory - assert: - that: - - install_gem_result is changed - - gem_bindir_stat.stat.exists and gem_bindir_stat.stat.isreg - - - name: Remove gem with custom bindir - gem: - name: gist - state: absent - bindir: "{{ remote_tmp_dir }}/custom_bindir" - norc: true - user_install: false # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL - register: install_gem_result - - - name: Get stats of gem executable - stat: - path: "{{ remote_tmp_dir }}/custom_bindir/gist" - register: gem_bindir_stat - - - name: Ensure gem executable was removed from custom directory - assert: - that: - - install_gem_result is changed - - not gem_bindir_stat.stat.exists + - name: Ensure gem executable was removed from custom directory + assert: + that: + - install_gem_result is changed + - not gem_bindir_stat.stat.exists diff --git a/tests/integration/targets/gem/vars/Ubuntu.yml b/tests/integration/targets/gem/vars/Ubuntu.yml new file mode 100644 index 0000000000..5f81e7e487 --- /dev/null +++ b/tests/integration/targets/gem/vars/Ubuntu.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +test_packages: + - "ruby" diff --git a/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml b/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml deleted file mode 100644 index e294a83fb5..0000000000 --- a/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- import_tasks: setup_no_value.yml - -- name: testing exclusion between state and list_all parameters - git_config: - list_all: true - state: absent - register: result - ignore_errors: true - -- name: assert git_config failed - assert: - that: - - result is failed - - "result.msg == 'parameters are mutually exclusive: list_all|state'" -... diff --git a/tests/integration/targets/git_config/tasks/get_set_no_state.yml b/tests/integration/targets/git_config/tasks/get_set_no_state.yml index 4e41bf4e9d..6963e679d4 100644 --- a/tests/integration/targets/git_config/tasks/get_set_no_state.yml +++ b/tests/integration/targets/git_config/tasks/get_set_no_state.yml @@ -13,7 +13,7 @@ register: set_result - name: getting value without state - git_config: + git_config_info: name: "{{ option_name }}" scope: "{{ option_scope }}" register: get_result @@ -24,6 +24,5 @@ - set_result is changed - set_result.diff.before == "\n" - set_result.diff.after == option_value + "\n" - - get_result is not changed - get_result.config_value == option_value ... diff --git a/tests/integration/targets/git_config/tasks/get_set_state_present.yml b/tests/integration/targets/git_config/tasks/get_set_state_present.yml index cfc3bbe78d..28f031aeb1 100644 --- a/tests/integration/targets/git_config/tasks/get_set_state_present.yml +++ b/tests/integration/targets/git_config/tasks/get_set_state_present.yml @@ -14,10 +14,9 @@ register: result - name: getting value with state=present - git_config: + git_config_info: name: "{{ option_name }}" scope: "{{ option_scope }}" - state: present register: get_result - name: assert set changed and value is correct with state=present @@ -26,6 +25,5 @@ - set_result is changed - set_result.diff.before == "\n" - set_result.diff.after == option_value + "\n" - - get_result is not changed - get_result.config_value == option_value ... diff --git a/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml b/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml index c410bfe189..f36b3f3aed 100644 --- a/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml +++ b/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml @@ -15,11 +15,10 @@ register: result - name: getting value with state=present - git_config: + git_config_info: name: "{{ option_name }}" scope: "file" - file: "{{ remote_tmp_dir }}/gitconfig_file" - state: present + path: "{{ remote_tmp_dir }}/gitconfig_file" register: get_result - name: assert set changed and value is correct with state=present @@ -28,6 +27,5 @@ - set_result is changed - set_result.diff.before == "\n" - set_result.diff.after == option_value + "\n" - - get_result is not changed - get_result.config_value == option_value ... \ No newline at end of file diff --git a/tests/integration/targets/git_config/tasks/main.yml b/tests/integration/targets/git_config/tasks/main.yml index 5fddaf7649..48e411cc22 100644 --- a/tests/integration/targets/git_config/tasks/main.yml +++ b/tests/integration/targets/git_config/tasks/main.yml @@ -14,8 +14,6 @@ - block: - import_tasks: set_value.yml - # testing parameters exclusion: state and list_all - - import_tasks: exclusion_state_list-all.yml # testing get/set option without state - import_tasks: get_set_no_state.yml # testing get/set option with state=present diff --git a/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml b/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml index a76fbab9cd..ebcd2e8b89 100644 --- a/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml +++ b/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml @@ -14,7 +14,7 @@ register: unset_result - name: getting value - git_config: + git_config_info: name: "{{ option_name }}" scope: "{{ option_scope }}" register: get_result diff --git a/tests/integration/targets/git_config/tasks/set_multi_value.yml b/tests/integration/targets/git_config/tasks/set_multi_value.yml index 8d2710b761..94edf94df4 100644 --- a/tests/integration/targets/git_config/tasks/set_multi_value.yml +++ b/tests/integration/targets/git_config/tasks/set_multi_value.yml @@ -31,17 +31,11 @@ - 'merge_request.target=foobar' register: set_result2 -- name: getting the multi-value - git_config: - name: push.pushoption - scope: global - register: get_single_result - - name: getting all values for the single option git_config_info: name: push.pushoption scope: global - register: get_all_result + register: get_result - name: replace-all values git_config: @@ -62,8 +56,8 @@ - set_result2.results[1] is not changed - set_result2.results[2] is not changed - set_result3 is changed - - get_single_result.config_value == 'merge_request.create' - - 'get_all_result.config_values == {"push.pushoption": ["merge_request.create", "merge_request.draft", "merge_request.target=foobar"]}' + - get_result.config_value == 'merge_request.create' + - 'get_result.config_values == {"push.pushoption": ["merge_request.create", "merge_request.draft", "merge_request.target=foobar"]}' - name: assert the diffs are also right assert: diff --git a/tests/integration/targets/git_config/tasks/set_value.yml b/tests/integration/targets/git_config/tasks/set_value.yml index 774e3136a5..54505438cb 100644 --- a/tests/integration/targets/git_config/tasks/set_value.yml +++ b/tests/integration/targets/git_config/tasks/set_value.yml @@ -20,7 +20,7 @@ register: set_result2 - name: getting value - git_config: + git_config_info: name: core.name scope: global register: get_result @@ -30,7 +30,6 @@ that: - set_result1 is changed - set_result2 is changed - - get_result is not changed - get_result.config_value == 'bar' - set_result1.diff.before == "\n" - set_result1.diff.after == "foo\n" diff --git a/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml b/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml index 3ca9023aad..e4b1195194 100644 --- a/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml +++ b/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml @@ -3,7 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -#- import_tasks: setup_no_value.yml +# - import_tasks: setup_no_value.yml - name: setting value git_config: @@ -22,7 +22,7 @@ register: set_result2 - name: getting value - git_config: + git_config_info: name: core.hooksPath scope: global register: get_result @@ -32,6 +32,5 @@ that: - set_result1 is changed - set_result2 is not changed - - get_result is not changed - get_result.config_value == '~/foo/bar' ... diff --git a/tests/integration/targets/git_config/tasks/unset_check_mode.yml b/tests/integration/targets/git_config/tasks/unset_check_mode.yml index 39bce33790..dc73b07a52 100644 --- a/tests/integration/targets/git_config/tasks/unset_check_mode.yml +++ b/tests/integration/targets/git_config/tasks/unset_check_mode.yml @@ -14,7 +14,7 @@ register: unset_result - name: getting value - git_config: + git_config_info: name: "{{ option_name }}" scope: "{{ option_scope }}" register: get_result diff --git a/tests/integration/targets/git_config/tasks/unset_no_value.yml b/tests/integration/targets/git_config/tasks/unset_no_value.yml index 394276cad7..7c10a474d9 100644 --- a/tests/integration/targets/git_config/tasks/unset_no_value.yml +++ b/tests/integration/targets/git_config/tasks/unset_no_value.yml @@ -13,7 +13,7 @@ register: unset_result - name: getting value - git_config: + git_config_info: name: "{{ option_name }}" scope: "{{ option_scope }}" register: get_result diff --git a/tests/integration/targets/git_config/tasks/unset_value.yml b/tests/integration/targets/git_config/tasks/unset_value.yml index 5f8c52c96f..dce0818b96 100644 --- a/tests/integration/targets/git_config/tasks/unset_value.yml +++ b/tests/integration/targets/git_config/tasks/unset_value.yml @@ -13,7 +13,7 @@ register: unset_result - name: getting value - git_config: + git_config_info: name: "{{ option_name }}" scope: "{{ option_scope }}" register: get_result @@ -37,7 +37,7 @@ register: unset_result - name: getting value - git_config: + git_config_info: name: "{{ option_name }}" scope: "{{ option_scope }}" register: get_result diff --git a/tests/integration/targets/git_config_info/tasks/main.yml b/tests/integration/targets/git_config_info/tasks/main.yml index 993238805e..20042e7427 100644 --- a/tests/integration/targets/git_config_info/tasks/main.yml +++ b/tests/integration/targets/git_config_info/tasks/main.yml @@ -15,18 +15,27 @@ - block: - include_tasks: get_simple_value.yml loop: - - { import_file: setup_global.yml, git_scope: 'global' } - - { import_file: setup_file.yml, git_scope: 'file', git_file: "{{ remote_tmp_dir }}/gitconfig_file" } + - import_file: setup_global.yml + git_scope: 'global' + - import_file: setup_file.yml + git_scope: 'file' + git_file: "{{ remote_tmp_dir }}/gitconfig_file" - include_tasks: get_multi_value.yml loop: - - { import_file: setup_global.yml, git_scope: 'global' } - - { import_file: setup_file.yml, git_scope: 'file', git_file: "{{ remote_tmp_dir }}/gitconfig_file" } + - import_file: setup_global.yml + git_scope: 'global' + - import_file: setup_file.yml + git_scope: 'file' + git_file: "{{ remote_tmp_dir }}/gitconfig_file" - include_tasks: get_all_values.yml loop: - - { import_file: setup_global.yml, git_scope: 'global' } - - { import_file: setup_file.yml, git_scope: 'file', git_file: "{{ remote_tmp_dir }}/gitconfig_file" } + - import_file: setup_global.yml + git_scope: 'global' + - import_file: setup_file.yml + git_scope: 'file' + git_file: "{{ remote_tmp_dir }}/gitconfig_file" - include_tasks: error_handling.yml when: git_installed is succeeded and git_version.stdout is version(git_version_supporting_includes, ">=") diff --git a/tests/integration/targets/github_app_access_token/tasks/main.yml b/tests/integration/targets/github_app_access_token/tasks/main.yml index 9b7ba5d2c1..dbaa61d230 100644 --- a/tests/integration/targets/github_app_access_token/tasks/main.yml +++ b/tests/integration/targets/github_app_access_token/tasks/main.yml @@ -12,7 +12,7 @@ - name: Install JWT ansible.builtin.pip: name: - - jwt + - jwt - name: Read file ansible.builtin.set_fact: @@ -26,5 +26,5 @@ - assert: that: - - github_app_access_token is failed - - '"Github return error" in github_app_access_token.msg' + - github_app_access_token is failed + - '"Github return error" in github_app_access_token.msg' diff --git a/tests/integration/targets/proxmox_template/aliases b/tests/integration/targets/github_key/aliases similarity index 92% rename from tests/integration/targets/proxmox_template/aliases rename to tests/integration/targets/github_key/aliases index 5d9af81016..9ee6676643 100644 --- a/tests/integration/targets/proxmox_template/aliases +++ b/tests/integration/targets/github_key/aliases @@ -3,4 +3,4 @@ # SPDX-License-Identifier: GPL-3.0-or-later unsupported -proxmox_template +destructive diff --git a/tests/integration/targets/github_key/tasks/main.yml b/tests/integration/targets/github_key/tasks/main.yml new file mode 100644 index 0000000000..d9bbf9d229 --- /dev/null +++ b/tests/integration/targets/github_key/tasks/main.yml @@ -0,0 +1,58 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Test code for the github_key module. +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Test api_url parameter with GitHub.com + community.general.github_key: + token: "{{ fake_token }}" + name: "{{ test_key_name }}" + pubkey: "{{ test_pubkey }}" + state: present + api_url: "{{ github_api_url }}" + register: github_api_result + ignore_errors: true + +- name: Assert api_url parameter works with GitHub.com + assert: + that: + - github_api_result is failed + - '"Unauthorized" in github_api_result.msg or "401" in github_api_result.msg' + +- name: Test api_url parameter with GitHub Enterprise + community.general.github_key: + token: "{{ fake_token }}" + name: "{{ test_key_name }}" + pubkey: "{{ test_pubkey }}" + state: present + api_url: "{{ enterprise_api_url }}" + register: enterprise_api_result + ignore_errors: true + +- name: Assert api_url parameter works with GitHub Enterprise + assert: + that: + - enterprise_api_result is failed + - '"github.company.com" in enterprise_api_result.msg' + +- name: Test api_url with trailing slash + community.general.github_key: + token: "{{ fake_token }}" + name: "{{ test_key_name }}" + pubkey: "{{ test_pubkey }}" + state: present + api_url: "{{ enterprise_api_url_trailing }}" + register: trailing_slash_result + ignore_errors: true + +- name: Assert trailing slash is handled correctly + assert: + that: + - trailing_slash_result is failed + - '"github.company.com" in trailing_slash_result.msg' diff --git a/tests/integration/targets/github_key/vars/main.yml b/tests/integration/targets/github_key/vars/main.yml new file mode 100644 index 0000000000..23ac841f98 --- /dev/null +++ b/tests/integration/targets/github_key/vars/main.yml @@ -0,0 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +fake_token: "fake_token_for_testing" +test_key_name: "ansible-test-key" +github_api_url: "https://api.github.com" +enterprise_api_url: "https://github.company.com/api/v3" +enterprise_api_url_trailing: "https://github.company.com/api/v3/" +test_pubkey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTgvwjlRHZ8E1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ test@example.com" diff --git a/tests/integration/targets/gitlab_branch/tasks/main.yml b/tests/integration/targets/gitlab_branch/tasks/main.yml index 19d90e15cf..ee39f452fc 100644 --- a/tests/integration/targets/gitlab_branch/tasks/main.yml +++ b/tests/integration/targets/gitlab_branch/tasks/main.yml @@ -22,7 +22,7 @@ initialize_with_readme: true state: present -- name: Create branch {{ gitlab_branch }} +- name: Create branch {{ gitlab_branch }} community.general.gitlab_branch: api_url: https://gitlab.com api_token: secret_access_token @@ -54,12 +54,12 @@ branch: "{{ gitlab_branch }}" state: absent register: delete_branch - + - name: Test module is idempotent assert: that: - delete_branch is changed - + - name: Clean up {{ gitlab_project_name }} gitlab_project: server_url: "{{ gitlab_host }}" diff --git a/tests/integration/targets/gitlab_group_access_token/defaults/main.yml b/tests/integration/targets/gitlab_group_access_token/defaults/main.yml index 1b0dab2892..16d4208e08 100644 --- a/tests/integration/targets/gitlab_group_access_token/defaults/main.yml +++ b/tests/integration/targets/gitlab_group_access_token/defaults/main.yml @@ -8,8 +8,8 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -gitlab_api_token: -gitlab_api_url: +gitlab_api_token: +gitlab_api_url: gitlab_validate_certs: false -gitlab_group_name: -gitlab_token_name: +gitlab_group_name: +gitlab_token_name: diff --git a/tests/integration/targets/gitlab_group_access_token/tasks/main.yml b/tests/integration/targets/gitlab_group_access_token/tasks/main.yml index 4e6234238e..da2c9698f7 100644 --- a/tests/integration/targets/gitlab_group_access_token/tasks/main.yml +++ b/tests/integration/targets/gitlab_group_access_token/tasks/main.yml @@ -25,8 +25,8 @@ expires_at: '2025-01-01' access_level: developer scopes: - - api - - read_api + - api + - read_api register: create_pfail_token_status always: - name: Assert that token creation in nonexisting group failed @@ -47,8 +47,8 @@ expires_at: '2025-13-01' access_level: developer scopes: - - api - - read_api + - api + - read_api register: create_efail_token_status always: - name: Assert that token creation with invalid expires_at failed @@ -68,8 +68,8 @@ expires_at: '2024-12-31' access_level: developer scopes: - - api - - read_api + - api + - read_api register: create_token_status - name: Assert that token creation with valid arguments is successfull assert: @@ -88,8 +88,8 @@ expires_at: '2024-12-31' access_level: developer scopes: - - api - - read_api + - api + - read_api register: check_token_status - name: Assert that token creation without changes and recreate=never succeeds with status not changed assert: @@ -108,8 +108,8 @@ expires_at: '2024-12-31' access_level: developer scopes: - - api - - read_api + - api + - read_api recreate: state_change register: check_recreate_token_status - name: Assert that token creation without changes and recreate=state_change succeeds with status not changed @@ -130,8 +130,8 @@ expires_at: '2025-01-01' access_level: developer scopes: - - api - - read_api + - api + - read_api register: change_token_status always: - name: Assert that token change with recreate=never fails @@ -151,8 +151,8 @@ expires_at: '2025-01-01' access_level: developer scopes: - - api - - read_api + - api + - read_api recreate: state_change register: change_recreate_token_status - name: Assert that token change with recreate=state_change succeeds @@ -172,8 +172,8 @@ expires_at: '2025-01-01' access_level: developer scopes: - - api - - read_api + - api + - read_api recreate: always register: change_recreate1_token_status - name: Assert that token change with recreate=always succeeds @@ -193,8 +193,8 @@ expires_at: '2024-12-31' access_level: developer scopes: - - api - - read_api + - api + - read_api register: revoke_token_status - name: Assert that token revocation succeeds assert: @@ -212,8 +212,8 @@ expires_at: '2024-12-31' access_level: developer scopes: - - api - - read_api + - api + - read_api register: revoke_token_status - name: Assert that token revocation succeeds with status not changed assert: diff --git a/tests/integration/targets/gitlab_group_variable/tasks/main.yml b/tests/integration/targets/gitlab_group_variable/tasks/main.yml index 39a3a5df8d..2627080e38 100644 --- a/tests/integration/targets/gitlab_group_variable/tasks/main.yml +++ b/tests/integration/targets/gitlab_group_variable/tasks/main.yml @@ -242,7 +242,7 @@ - gitlab_group_variable_state is changed when: gitlab_premium_tests is defined -- name: apply again the environment scope change +- name: apply again the environment scope change gitlab_group_variable: api_url: "{{ gitlab_host }}" api_token: "{{ gitlab_login_token }}" diff --git a/tests/integration/targets/gitlab_instance_variable/tasks/main.yml b/tests/integration/targets/gitlab_instance_variable/tasks/main.yml index 94a81698bc..36079e3957 100644 --- a/tests/integration/targets/gitlab_instance_variable/tasks/main.yml +++ b/tests/integration/targets/gitlab_instance_variable/tasks/main.yml @@ -438,7 +438,7 @@ - gitlab_instance_variable_state.instance_variable.removed|length == 0 - gitlab_instance_variable_state.instance_variable.updated|length == 0 # VALUE_SPECIFIED_IN_NO_LOG_PARAMETER - #- gitlab_instance_variable_state.instance_variable.added[0] == "my_test_var" + # - gitlab_instance_variable_state.instance_variable.added[0] == "my_test_var" - name: change variable_type attribute gitlab_instance_variable: diff --git a/tests/integration/targets/gitlab_issue/tasks/main.yml b/tests/integration/targets/gitlab_issue/tasks/main.yml index af1416c3dd..5667851f19 100644 --- a/tests/integration/targets/gitlab_issue/tasks/main.yml +++ b/tests/integration/targets/gitlab_issue/tasks/main.yml @@ -14,137 +14,137 @@ state: present - block: - - name: Create {{ gitlab_project_name }} project - gitlab_project: - api_url: "{{ gitlab_host }}" - validate_certs: true - api_token: "{{ gitlab_api_token }}" - name: "{{ gitlab_project_name }}" - group: "{{ gitlab_project_group }}" - default_branch: "{{ gitlab_branch }}" - initialize_with_readme: true - state: present + - name: Create {{ gitlab_project_name }} project + gitlab_project: + api_url: "{{ gitlab_host }}" + validate_certs: true + api_token: "{{ gitlab_api_token }}" + name: "{{ gitlab_project_name }}" + group: "{{ gitlab_project_group }}" + default_branch: "{{ gitlab_branch }}" + initialize_with_readme: true + state: present - - name: Create Issue - gitlab_issue: - api_token: "{{ gitlab_api_token }}" - api_url: "{{ gitlab_host }}" - description: "Test description" - project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" - state: present - title: "Ansible test issue" - register: gitlab_issue_create + - name: Create Issue + gitlab_issue: + api_token: "{{ gitlab_api_token }}" + api_url: "{{ gitlab_host }}" + description: "Test description" + project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" + state: present + title: "Ansible test issue" + register: gitlab_issue_create - - name: Test Issue Created - assert: - that: - - gitlab_issue_create is changed + - name: Test Issue Created + assert: + that: + - gitlab_issue_create is changed - - name: Create Issue ( Idempotency test ) - gitlab_issue: - api_token: "{{ gitlab_api_token }}" - api_url: "{{ gitlab_host }}" - description: "Test description" - project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" - state: present - title: "Ansible test issue" - register: gitlab_issue_create_idempotence + - name: Create Issue ( Idempotency test ) + gitlab_issue: + api_token: "{{ gitlab_api_token }}" + api_url: "{{ gitlab_host }}" + description: "Test description" + project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" + state: present + title: "Ansible test issue" + register: gitlab_issue_create_idempotence - - name: Test Create Issue is Idempotent - assert: - that: - - gitlab_issue_create_idempotence is not changed + - name: Test Create Issue is Idempotent + assert: + that: + - gitlab_issue_create_idempotence is not changed - - name: Update Issue Test ( Additions ) - gitlab_issue: - api_token: "{{ gitlab_api_token }}" - api_url: "{{ gitlab_host }}" - assignee_ids: "{{ gitlab_assignee_ids }}" - description_path: "{{ gitlab_description_path }}" - labels: "{{ gitlab_labels }}" - milestone_search: "{{ gitlab_milestone_search }}" - milestone_group_id: "{{ gitlab_milestone_group_id }}" - project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" - state: present - title: "Ansible test issue" - register: gitlab_issue_update_additions + - name: Update Issue Test ( Additions ) + gitlab_issue: + api_token: "{{ gitlab_api_token }}" + api_url: "{{ gitlab_host }}" + assignee_ids: "{{ gitlab_assignee_ids }}" + description_path: "{{ gitlab_description_path }}" + labels: "{{ gitlab_labels }}" + milestone_search: "{{ gitlab_milestone_search }}" + milestone_group_id: "{{ gitlab_milestone_group_id }}" + project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" + state: present + title: "Ansible test issue" + register: gitlab_issue_update_additions - - name: Test Issue Updated ( Additions ) - assert: - that: - - gitlab_issue_update_additions.issue.labels[0] == "{{ gitlab_labels[0] }}" - - gitlab_issue_update_additions.issue.assignees[0].username == "{{ gitlab_assignee_ids[0] }}" - - "'### Description\n\nIssue test description' in gitlab_issue_update_additions.issue.description" - - gitlab_issue_update_additions.issue.milestone.title == "{{ gitlab_milestone_search }}" + - name: Test Issue Updated ( Additions ) + assert: + that: + - gitlab_issue_update_additions.issue.labels[0] == "{{ gitlab_labels[0] }}" + - gitlab_issue_update_additions.issue.assignees[0].username == "{{ gitlab_assignee_ids[0] }}" + - "'### Description\n\nIssue test description' in gitlab_issue_update_additions.issue.description" + - gitlab_issue_update_additions.issue.milestone.title == "{{ gitlab_milestone_search }}" - - name: Update Issue Test ( Persistence ) - gitlab_issue: - api_token: "{{ gitlab_api_token }}" - api_url: "{{ gitlab_host }}" - description_path: "{{ gitlab_description_path }}" - milestone_search: "{{ gitlab_milestone_search }}" - milestone_group_id: "{{ gitlab_milestone_group_id }}" - project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" - state: present - title: "Ansible test issue" - register: gitlab_issue_update_persistence + - name: Update Issue Test ( Persistence ) + gitlab_issue: + api_token: "{{ gitlab_api_token }}" + api_url: "{{ gitlab_host }}" + description_path: "{{ gitlab_description_path }}" + milestone_search: "{{ gitlab_milestone_search }}" + milestone_group_id: "{{ gitlab_milestone_group_id }}" + project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" + state: present + title: "Ansible test issue" + register: gitlab_issue_update_persistence - - name: Test issue Not Updated ( Persistence ) - assert: - that: - - gitlab_issue_update_persistence.issue.labels[0] == "{{ gitlab_labels[0] }}" - - gitlab_issue_update_persistence.issue.assignees[0].username == "{{ gitlab_assignee_ids[0] }}" + - name: Test issue Not Updated ( Persistence ) + assert: + that: + - gitlab_issue_update_persistence.issue.labels[0] == "{{ gitlab_labels[0] }}" + - gitlab_issue_update_persistence.issue.assignees[0].username == "{{ gitlab_assignee_ids[0] }}" - - name: Update Issue Test ( Removals ) - gitlab_issue: - api_token: "{{ gitlab_api_token }}" - api_url: "{{ gitlab_host }}" - assignee_ids: [] - description_path: "{{ gitlab_description_path }}" - labels: [] - milestone_search: "" - milestone_group_id: "" - project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" - state: present - title: "Ansible test issue" - register: gitlab_issue_update_removal + - name: Update Issue Test ( Removals ) + gitlab_issue: + api_token: "{{ gitlab_api_token }}" + api_url: "{{ gitlab_host }}" + assignee_ids: [] + description_path: "{{ gitlab_description_path }}" + labels: [] + milestone_search: "" + milestone_group_id: "" + project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" + state: present + title: "Ansible test issue" + register: gitlab_issue_update_removal - - name: Test issue updated - assert: - that: - - gitlab_issue_update_removal.issue.labels == [] - - gitlab_issue_update_removal.issue.assignees == [] - - gitlab_issue_update_removal.issue.milestone == None + - name: Test issue updated + assert: + that: + - gitlab_issue_update_removal.issue.labels == [] + - gitlab_issue_update_removal.issue.assignees == [] + - gitlab_issue_update_removal.issue.milestone == None - - name: Delete Issue - gitlab_issue: - api_url: "{{ gitlab_host }}" - api_token: "{{ gitlab_api_token }}" - project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" - title: "Ansible test issue" - state: absent - register: gitlab_issue_delete + - name: Delete Issue + gitlab_issue: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_api_token }}" + project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" + title: "Ansible test issue" + state: absent + register: gitlab_issue_delete - - name: Test issue is deleted - assert: - that: - - gitlab_issue_delete is changed + - name: Test issue is deleted + assert: + that: + - gitlab_issue_delete is changed always: - - name: Delete Issue - gitlab_issue: - api_url: "{{ gitlab_host }}" - api_token: "{{ gitlab_api_token }}" - project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" - title: "Ansible test issue" - state_filter: "opened" - state: absent - register: gitlab_issue_delete - - name: Clean up {{ gitlab_project_name }} - gitlab_project: - api_url: "{{ gitlab_host }}" - validate_certs: false - api_token: "{{ gitlab_api_token }}" - name: "{{ gitlab_project_name }}" - group: "{{ gitlab_project_group }}" - state: absent + - name: Delete Issue + gitlab_issue: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_api_token }}" + project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}" + title: "Ansible test issue" + state_filter: "opened" + state: absent + register: gitlab_issue_delete + - name: Clean up {{ gitlab_project_name }} + gitlab_project: + api_url: "{{ gitlab_host }}" + validate_certs: false + api_token: "{{ gitlab_api_token }}" + name: "{{ gitlab_project_name }}" + group: "{{ gitlab_project_group }}" + state: absent diff --git a/tests/integration/targets/gitlab_label/README.md b/tests/integration/targets/gitlab_label/README.md index e27cb74c8c..06e662749f 100644 --- a/tests/integration/targets/gitlab_label/README.md +++ b/tests/integration/targets/gitlab_label/README.md @@ -1,4 +1,4 @@ - + +The integration test can be performed as follows: + +``` +# 1. Start docker-compose: +docker-compose -f tests/integration/targets/jenkins_credential/docker-compose.yml down +docker-compose -f tests/integration/targets/jenkins_credential/docker-compose.yml up -d + +# 2. Run the integration tests: +ansible-test integration jenkins_credential --allow-unsupported -v +``` diff --git a/tests/integration/targets/jenkins_credential/aliases b/tests/integration/targets/jenkins_credential/aliases new file mode 100644 index 0000000000..d2086eecf8 --- /dev/null +++ b/tests/integration/targets/jenkins_credential/aliases @@ -0,0 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +unsupported \ No newline at end of file diff --git a/tests/integration/targets/jenkins_credential/docker-compose.yml b/tests/integration/targets/jenkins_credential/docker-compose.yml new file mode 100644 index 0000000000..c99c9ed575 --- /dev/null +++ b/tests/integration/targets/jenkins_credential/docker-compose.yml @@ -0,0 +1,21 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +version: "3.8" + +services: + jenkins: + image: bitnami/jenkins + container_name: jenkins-test + ports: + - "8080:8080" + environment: + JENKINS_USERNAME: "FishLegs" + JENKINS_PASSWORD: "MeatLug" + JENKINS_PLUGINS: "credentials,cloudbees-folder,plain-credentials,github-branch-source,github-api,scm-api,workflow-step-api" + healthcheck: + test: curl -s http://localhost:8080/login || exit 1 + interval: 10s + timeout: 10s + retries: 10 diff --git a/tests/integration/targets/jenkins_credential/tasks/add.yml b/tests/integration/targets/jenkins_credential/tasks/add.yml new file mode 100644 index 0000000000..c956773454 --- /dev/null +++ b/tests/integration/targets/jenkins_credential/tasks/add.yml @@ -0,0 +1,169 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Add CUSTOM scope (run {{ run_number }}) + community.general.jenkins_credential: + id: "CUSTOM" + type: "scope" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "Custom scope credential" + inc_path: + - "include/path" + - "include/path2" + exc_path: + - "exclude/path" + - "exclude/path2" + inc_hostname: + - "included-hostname" + - "included-hostname2" + exc_hostname: + - "excluded-hostname" + - "excluded-hostname2" + schemes: + - "http" + - "https" + inc_hostname_port: + - "included-hostname:7000" + - "included-hostname2:7000" + exc_hostname_port: + - "excluded-hostname:7000" + - "excluded-hostname2:7000" + register: custom_scope + +- name: Assert CUSTOM scope changed value + assert: + that: + - custom_scope.changed == (run_number == 1) + fail_msg: "CUSTOM scope changed status incorrect on run {{ run_number }}" + success_msg: "CUSTOM scope behaved correctly on run {{ run_number }}" + +- name: Add user_and_pass credential (run {{ run_number }}) + community.general.jenkins_credential: + id: "userpass-id" + type: "user_and_pass" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "User and password credential" + username: "user1" + password: "pass1" + register: userpass_cred + +- name: Assert user_and_pass changed value + assert: + that: + - userpass_cred.changed == (run_number == 1) + fail_msg: "user_and_pass credential changed status incorrect on run {{ run_number }}" + success_msg: "user_and_pass credential behaved correctly on run {{ run_number }}" + +- name: Add file credential to custom scope (run {{ run_number }}) + community.general.jenkins_credential: + id: "file-id" + type: "file" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + scope: "CUSTOM" + description: "File credential" + file_path: "{{ output_dir }}/my-secret.pem" + register: file_cred + +- name: Assert file credential changed value + assert: + that: + - file_cred.changed == (run_number == 1) + fail_msg: "file credential changed status incorrect on run {{ run_number }}" + success_msg: "file credential behaved correctly on run {{ run_number }}" + +- name: Add text credential to folder (run {{ run_number }}) + community.general.jenkins_credential: + id: "text-id" + type: "text" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "Text credential" + secret: "mysecrettext" + location: "folder" + url: "http://localhost:8080/job/test" + register: text_cred + +- name: Assert text credential changed value + assert: + that: + - text_cred.changed == (run_number == 1) + fail_msg: "text credential changed status incorrect on run {{ run_number }}" + success_msg: "text credential behaved correctly on run {{ run_number }}" + +- name: Add githubApp credential (run {{ run_number }}) + community.general.jenkins_credential: + id: "githubapp-id" + type: "github_app" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "GitHub app credential" + appID: "12345" + private_key_path: "{{ output_dir }}/github.pem" + owner: "github_owner" + register: githubapp_cred + +- name: Assert githubApp credential changed value + assert: + that: + - githubapp_cred.changed == (run_number == 1) + fail_msg: "githubApp credential changed status incorrect on run {{ run_number }}" + success_msg: "githubApp credential behaved correctly on run {{ run_number }}" + +- name: Add sshKey credential (run {{ run_number }}) + community.general.jenkins_credential: + id: "sshkey-id" + type: "ssh_key" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "SSH key credential" + username: "sshuser" + private_key_path: "{{ output_dir }}/ssh_key" + passphrase: 1234 + register: sshkey_cred + +- name: Assert sshKey credential changed value + assert: + that: + - sshkey_cred.changed == (run_number == 1) + fail_msg: "sshKey credential changed status incorrect on run {{ run_number }}" + success_msg: "sshKey credential behaved correctly on run {{ run_number }}" + +- name: Add certificate (p12) credential (run {{ run_number }}) + community.general.jenkins_credential: + id: "certificate-id" + type: "certificate" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "Certificate credential" + password: "12345678901234" + file_path: "{{ output_dir }}/certificate.p12" + register: cert_p12_cred + +- name: Assert certificate (p12) credential changed value + assert: + that: + - cert_p12_cred.changed == (run_number == 1) + fail_msg: "certificate (p12) credential changed status incorrect on run {{ run_number }}" + success_msg: "certificate (p12) credential behaved correctly on run {{ run_number }}" + +- name: Add certificate (pem) credential (run {{ run_number }}) + community.general.jenkins_credential: + id: "certificate-id-pem" + type: "certificate" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "Certificate credential (pem)" + file_path: "{{ output_dir }}/cert.pem" + private_key_path: "{{ output_dir }}/private.key" + register: cert_pem_cred + +- name: Assert certificate (pem) credential changed value + assert: + that: + - cert_pem_cred.changed == (run_number == 1) + fail_msg: "certificate (pem) credential changed status incorrect on run {{ run_number }}" + success_msg: "certificate (pem) credential behaved correctly on run {{ run_number }}" diff --git a/tests/integration/targets/jenkins_credential/tasks/del.yml b/tests/integration/targets/jenkins_credential/tasks/del.yml new file mode 100644 index 0000000000..036b65d3a1 --- /dev/null +++ b/tests/integration/targets/jenkins_credential/tasks/del.yml @@ -0,0 +1,128 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Delete user_and_pass credential (run {{ run_number }}) + community.general.jenkins_credential: + id: "userpass-id" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + state: "absent" + register: userpass_cred + +- name: Assert user_and_pass changed value + assert: + that: + - userpass_cred.changed == (run_number == 1) + fail_msg: "user_and_pass credential changed status incorrect on run {{ run_number }}" + success_msg: "user_and_pass credential behaved correctly on run {{ run_number }}" + +- name: Delete file credential to custom scope (run {{ run_number }}) + community.general.jenkins_credential: + id: "file-id" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + scope: "CUSTOM" + state: "absent" + register: file_cred + +- name: Assert file credential changed value + assert: + that: + - file_cred.changed == (run_number == 1) + fail_msg: "file credential changed status incorrect on run {{ run_number }}" + success_msg: "file credential behaved correctly on run {{ run_number }}" + +- name: Delete CUSTOM scope credential (run {{ run_number}}) + community.general.jenkins_credential: + id: "CUSTOM" + type: "scope" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + state: "absent" + register: custom_scope + +- name: Assert CUSTOM scope changed value + assert: + that: + - custom_scope.changed == (run_number == 1) + fail_msg: "CUSTOM scope changed status incorrect on run {{ run_number }}" + success_msg: "CUSTOM scope behaved correctly on run {{ run_number }}" + +- name: Delete text credential to folder (run {{ run_number }}) + community.general.jenkins_credential: + id: "text-id" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + state: "absent" + location: "folder" + url: "http://localhost:8080/job/test" + register: text_cred + +- name: Assert text credential changed value + assert: + that: + - text_cred.changed == (run_number == 1) + fail_msg: "text credential changed status incorrect on run {{ run_number }}" + success_msg: "text credential behaved correctly on run {{ run_number }}" + +- name: Delete githubApp credential (run {{ run_number }}) + community.general.jenkins_credential: + id: "githubapp-id" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + state: "absent" + register: githubapp_cred + +- name: Assert githubApp credential changed value + assert: + that: + - githubapp_cred.changed == (run_number == 1) + fail_msg: "githubApp credential changed status incorrect on run {{ run_number }}" + success_msg: "githubApp credential behaved correctly on run {{ run_number }}" + +- name: Delete sshKey credential (run {{ run_number }}) + community.general.jenkins_credential: + id: "sshkey-id" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "SSH key credential" + state: "absent" + register: sshkey_cred + +- name: Assert sshKey credential changed value + assert: + that: + - sshkey_cred.changed == (run_number == 1) + fail_msg: "sshKey credential changed status incorrect on run {{ run_number }}" + success_msg: "sshKey credential behaved correctly on run {{ run_number }}" + +- name: Delete certificate credential (p12) (run {{ run_number }}) + community.general.jenkins_credential: + id: "certificate-id" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + state: "absent" + register: cert_p12_cred + +- name: Assert certificate (p12) credential changed value + assert: + that: + - cert_p12_cred.changed == (run_number == 1) + fail_msg: "certificate (p12) credential changed status incorrect on run {{ run_number }}" + success_msg: "certificate (p12) credential behaved correctly on run {{ run_number }}" + +- name: Delete certificate credential (pem) (run {{ run_number }}) + community.general.jenkins_credential: + id: "certificate-id-pem" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + state: "absent" + register: cert_pem_cred + +- name: Assert certificate (pem) credential changed value + assert: + that: + - cert_pem_cred.changed == (run_number == 1) + fail_msg: "certificate (pem) credential changed status incorrect on run {{ run_number }}" + success_msg: "certificate (pem) credential behaved correctly on run {{ run_number }}" diff --git a/tests/integration/targets/jenkins_credential/tasks/edit.yml b/tests/integration/targets/jenkins_credential/tasks/edit.yml new file mode 100644 index 0000000000..bd8d1eff7b --- /dev/null +++ b/tests/integration/targets/jenkins_credential/tasks/edit.yml @@ -0,0 +1,192 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Generate token + community.general.jenkins_credential: + id: "{{ tokenUuid}}" + name: "test-token-2" + jenkins_user: "{{ jenkins_username }}" + jenkins_password: "{{ jenkins_password }}" + type: "token" + force: true + register: token_result + +- name: Set token in vars + set_fact: + token: "{{ token_result.token }}" + tokenUuid: "{{ token_result.token_uuid }}" + +- name: Edit CUSTOM scope credential + community.general.jenkins_credential: + id: "CUSTOM" + type: "scope" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "New custom scope credential" + inc_path: + - "new_include/path" + - "new_include/path2" + exc_path: + - "new_exclude/path" + - "new_exclude/path2" + inc_hostname: + - "new_included-hostname" + - "new_included-hostname2" + exc_hostname: + - "new_excluded-hostname" + - "new_excluded-hostname2" + schemes: + - "new_http" + - "new_https" + inc_hostname_port: + - "new_included-hostname:7000" + - "new_included-hostname2:7000" + exc_hostname_port: + - "new_excluded-hostname:7000" + - "new_excluded-hostname2:7000" + force: true + register: custom_scope + +- name: Assert CUSTOM scope changed value + assert: + that: + - custom_scope.changed == true + fail_msg: "CUSTOM scope changed status when it shouldn't" + success_msg: "CUSTOM scope behaved correctly" + +- name: Edit user_and_pass credential + community.general.jenkins_credential: + id: "userpass-id" + type: "user_and_pass" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "new user and password credential" + username: "user2" + password: "pass2" + force: true + register: userpass_cred + +- name: Assert user_and_pass changed value + assert: + that: + - userpass_cred.changed == true + fail_msg: "user_and_pass credential changed status incorrect" + success_msg: "user_and_pass credential behaved correctly" + +- name: Edit file credential to custom scope + community.general.jenkins_credential: + id: "file-id" + type: "file" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + scope: "CUSTOM" + description: "New file credential" + file_path: "{{ output_dir }}/my-secret.pem" + force: true + register: file_cred + +- name: Assert file credential changed value + assert: + that: + - file_cred.changed == true + fail_msg: "file credential changed status incorrect" + success_msg: "file credential behaved correctly" + +- name: Edit text credential to folder + community.general.jenkins_credential: + id: "text-id" + type: "text" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "New text credential" + secret: "mynewsecrettext" + location: "folder" + url: "http://localhost:8080/job/test" + force: true + register: text_cred + +- name: Assert text credential changed value + assert: + that: + - text_cred.changed == true + fail_msg: "text credential changed status incorrect" + success_msg: "text credential behaved correctly" + +- name: Edit githubApp credential + community.general.jenkins_credential: + id: "githubapp-id" + type: "github_app" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "New GitHub app credential" + appID: "12345678" + private_key_path: "{{ output_dir }}/github.pem" + owner: "new_github_owner" + force: true + register: githubapp_cred + +- name: Assert githubApp credential changed value + assert: + that: + - githubapp_cred.changed == true + fail_msg: "githubApp credential changed status incorrect" + success_msg: "githubApp credential behaved correctly" + +- name: Edit sshKey credential + community.general.jenkins_credential: + id: "sshkey-id" + type: "ssh_key" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "New SSH key credential" + username: "new_sshuser" + private_key_path: "{{ output_dir }}/ssh_key" + passphrase: 1234 + force: true + register: sshkey_cred + +- name: Assert sshKey credential changed value + assert: + that: + - sshkey_cred.changed == true + fail_msg: "sshKey credential changed status incorrect" + success_msg: "sshKey credential behaved correctly" + +- name: Edit certificate credential (p12) + community.general.jenkins_credential: + id: "certificate-id" + type: "certificate" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "New certificate credential" + password: "12345678901234" + file_path: "{{ output_dir }}/certificate.p12" + force: true + register: cert_p12_cred + +- name: Assert certificate (p12) credential changed value + assert: + that: + - cert_p12_cred.changed == true + fail_msg: "certificate (p12) credential changed status incorrect" + success_msg: "certificate (p12) credential behaved correctly" + +- name: Edit certificate credential (pem) + community.general.jenkins_credential: + id: "certificate-id-pem" + type: "certificate" + jenkins_user: "{{ jenkins_username }}" + token: "{{ token }}" + description: "New certificate credential (pem)" + file_path: "{{ output_dir }}/cert.pem" + private_key_path: "{{ output_dir }}/private.key" + force: true + register: cert_pem_cred + +- name: Assert certificate (pem) credential changed value + assert: + that: + - cert_pem_cred.changed == true + fail_msg: "certificate (pem) credential changed status incorrect" + success_msg: "certificate (pem) credential behaved correctly" diff --git a/tests/integration/targets/jenkins_credential/tasks/main.yml b/tests/integration/targets/jenkins_credential/tasks/main.yml new file mode 100644 index 0000000000..88ee0693cf --- /dev/null +++ b/tests/integration/targets/jenkins_credential/tasks/main.yml @@ -0,0 +1,79 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Prepare the test environment + include_tasks: pre.yml + vars: + output_dir: "{{ playbook_dir }}/generated" + +- name: Generate token + community.general.jenkins_credential: + name: "test-token" + jenkins_user: "{{ jenkins_username }}" + jenkins_password: "{{ jenkins_password }}" + type: "token" + no_log: true + register: token_result + +- name: Assert token and tokenUuid are returned + assert: + that: + - token_result.token is defined + - token_result.token_uuid is defined + fail_msg: "Token generation failed" + success_msg: "Token and tokenUuid successfully returned" + +- name: Set token facts + set_fact: + token: "{{ token_result.token }}" + tokenUuid: "{{ token_result.token_uuid }}" + +- name: Test adding new credentials and scopes + include_tasks: add.yml + vars: + run_number: 1 + output_dir: "{{ playbook_dir }}/generated" + +- name: Test adding credentials and scopes when they already exist + include_tasks: add.yml + vars: + run_number: 2 + output_dir: "{{ playbook_dir }}/generated" + +- name: Test editing credentials + include_tasks: edit.yml + vars: + output_dir: "{{ playbook_dir }}/generated" + +- name: Test deleting credentials and scopes + include_tasks: del.yml + vars: + run_number: 1 + +- name: Test deleting credentials and scopes when they don't exist + include_tasks: del.yml + vars: + run_number: 2 + +- name: Delete token + community.general.jenkins_credential: + id: "{{ tokenUuid }}" + name: "test-token-2" + jenkins_user: "{{ jenkins_username }}" + jenkins_password: "{{ jenkins_password }}" + state: "absent" + type: "token" + register: delete_token_result + +- name: Assert token deletion + assert: + that: + - delete_token_result.changed is true + fail_msg: "Token deletion failed" + success_msg: "Token successfully deleted" + +- name: Remove generated test files + ansible.builtin.file: + path: "{{ playbook_dir }}/generated" + state: absent diff --git a/tests/integration/targets/jenkins_credential/tasks/pre.yml b/tests/integration/targets/jenkins_credential/tasks/pre.yml new file mode 100644 index 0000000000..abb649ae1e --- /dev/null +++ b/tests/integration/targets/jenkins_credential/tasks/pre.yml @@ -0,0 +1,92 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Include Jenkins user variables + include_vars: "{{ role_path }}/vars/credentials.yml" + +- name: Make sure Jenkins is ready + uri: + url: http://localhost:8080/login + status_code: 200 + return_content: false + timeout: 30 + register: result + retries: 10 + delay: 5 + until: result.status == 200 + +- name: Get Jenkins crumb and save cookie + shell: | + curl -s -c cookies.txt -u FishLegs:MeatLug http://localhost:8080/crumbIssuer/api/json > crumb.json + args: + executable: /bin/bash + +- name: Read crumb value + set_fact: + crumb_data: "{{ lookup('file', 'crumb.json') | from_json }}" + +- name: Create Jenkins folder 'test' + shell: | + curl -b cookies.txt -u {{ jenkins_username }}:{{ jenkins_password }} \ + -H "{{ crumb_data.crumbRequestField }}: {{ crumb_data.crumb }}" \ + -H "Content-Type: application/xml" \ + --data-binary @- http://localhost:8080/createItem?name=test < + Test Folder + + + EOF + args: + executable: /bin/bash + +- name: Create output directory + ansible.builtin.file: + path: "{{ output_dir }}" + state: directory + mode: "0755" + +- name: Generate private key + community.crypto.openssl_privatekey: + path: "{{ output_dir }}/private.key" + size: 2048 + type: RSA + +- name: Generate CSR (certificate signing request) + community.crypto.openssl_csr: + path: "{{ output_dir }}/request.csr" + privatekey_path: "{{ output_dir }}/private.key" + common_name: "dummy.local" + +- name: Generate self-signed certificate + community.crypto.x509_certificate: + path: "{{ output_dir }}/cert.pem" + privatekey_path: "{{ output_dir }}/private.key" + csr_path: "{{ output_dir }}/request.csr" + provider: selfsigned + +- name: Create PKCS#12 (.p12) file + community.crypto.openssl_pkcs12: + path: "{{ output_dir }}/certificate.p12" + privatekey_path: "{{ output_dir }}/private.key" + certificate_path: "{{ output_dir }}/cert.pem" + friendly_name: "dummy-cert" + passphrase: "12345678901234" + +- name: Copy cert.pem to github.pem + ansible.builtin.copy: + src: "{{ output_dir }}/cert.pem" + dest: "{{ output_dir }}/github.pem" + remote_src: true + +- name: Copy private.key to my-secret.pem + ansible.builtin.copy: + src: "{{ output_dir }}/private.key" + dest: "{{ output_dir }}/my-secret.pem" + remote_src: true + +- name: Generate dummy SSH key + community.crypto.openssh_keypair: + path: "{{ output_dir }}/ssh_key" + type: rsa + size: 2048 diff --git a/tests/integration/targets/jenkins_credential/vars/credentials.yml b/tests/integration/targets/jenkins_credential/vars/credentials.yml new file mode 100644 index 0000000000..27df98700b --- /dev/null +++ b/tests/integration/targets/jenkins_credential/vars/credentials.yml @@ -0,0 +1,6 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +jenkins_username: FishLegs +jenkins_password: MeatLug diff --git a/tests/integration/targets/kdeconfig/tasks/main.yml b/tests/integration/targets/kdeconfig/tasks/main.yml index 790bb378dc..f2656c5c98 100644 --- a/tests/integration/targets/kdeconfig/tasks/main.yml +++ b/tests/integration/targets/kdeconfig/tasks/main.yml @@ -17,7 +17,7 @@ copy: dest: "{{ kwriteconf_fake }}" src: kwriteconf_fake - mode: 0755 + mode: "0755" - name: Simple test kdeconfig: @@ -182,7 +182,7 @@ values: - group: test key: test1 - bool_value: on + bool_value: true kwriteconfig_path: "{{ kwriteconf_fake }}" register: result_bool_idem ignore_errors: true @@ -207,7 +207,7 @@ value: test2 - groups: [testx, testy] key: testz - bool_value: on + bool_value: true kwriteconfig_path: "{{ kwriteconf_fake }}" register: result_checkmode ignore_errors: true @@ -236,7 +236,7 @@ value: test2 - groups: [testx, testy] key: testz - bool_value: on + bool_value: true kwriteconfig_path: "{{ kwriteconf_fake }}" register: result_checkmode_apply ignore_errors: true @@ -260,7 +260,7 @@ value: test2 - groups: [testx, testy] key: testz - bool_value: on + bool_value: true kwriteconfig_path: "{{ kwriteconf_fake }}" register: result_checkmode2 ignore_errors: true diff --git a/tests/integration/targets/kernel_blacklist/handlers/main.yml b/tests/integration/targets/kernel_blacklist/handlers/main.yml index 814c9c51a8..ca97688005 100644 --- a/tests/integration/targets/kernel_blacklist/handlers/main.yml +++ b/tests/integration/targets/kernel_blacklist/handlers/main.yml @@ -7,4 +7,3 @@ ansible.builtin.file: path: /etc/modprobe.d state: absent - \ No newline at end of file diff --git a/tests/integration/targets/kernel_blacklist/tasks/main.yml b/tests/integration/targets/kernel_blacklist/tasks/main.yml index 48cd38a937..aecc9b68d5 100644 --- a/tests/integration/targets/kernel_blacklist/tasks/main.yml +++ b/tests/integration/targets/kernel_blacklist/tasks/main.yml @@ -51,7 +51,7 @@ - orig_stat.stat.size == stat_test_1.stat.size - orig_stat.stat.checksum == stat_test_1.stat.checksum - orig_stat.stat.mtime == stat_test_1.stat.mtime - - stat_test_1.stat.checksum == expected_content | checksum + - stat_test_1.stat.checksum == (expected_content | trim + '\n') | checksum vars: expected_content: | # Copyright (c) Ansible Project @@ -65,7 +65,7 @@ - name: test deprecation assert: that: - - "'deprecations' not in bl_test_1 or (ansible_version.major == 2 and ansible_version.minor == 12)" + - "'deprecations' not in bl_test_1" - name: add new item to list community.general.kernel_blacklist: @@ -83,7 +83,7 @@ assert: that: - bl_test_2 is changed - - slurp_test_2.content|b64decode == content + - slurp_test_2.content|b64decode == (content | trim + '\n') vars: content: | # Copyright (c) Ansible Project @@ -111,7 +111,7 @@ assert: that: - bl_test_3 is changed - - slurp_test_3.content|b64decode == content + - slurp_test_3.content|b64decode == (content | trim + '\n') vars: content: | # Copyright (c) Ansible Project diff --git a/tests/integration/targets/keycloak_authentication/README.md b/tests/integration/targets/keycloak_authentication/README.md new file mode 100644 index 0000000000..03ca31b199 --- /dev/null +++ b/tests/integration/targets/keycloak_authentication/README.md @@ -0,0 +1,10 @@ + +# Running keycloak_authentication module integration test + +Run integration tests: + + ansible-test integration -v keycloak_authentication --allow-unsupported --docker fedora35 --docker-network host \ No newline at end of file diff --git a/tests/integration/targets/proxmox_pool/aliases b/tests/integration/targets/keycloak_authentication/aliases similarity index 86% rename from tests/integration/targets/proxmox_pool/aliases rename to tests/integration/targets/keycloak_authentication/aliases index 525dcd332b..bd1f024441 100644 --- a/tests/integration/targets/proxmox_pool/aliases +++ b/tests/integration/targets/keycloak_authentication/aliases @@ -3,5 +3,3 @@ # SPDX-License-Identifier: GPL-3.0-or-later unsupported -proxmox_pool -proxmox_pool_member diff --git a/tests/integration/targets/keycloak_authentication/tasks/access_token.yml b/tests/integration/targets/keycloak_authentication/tasks/access_token.yml new file mode 100644 index 0000000000..145f4708bc --- /dev/null +++ b/tests/integration/targets/keycloak_authentication/tasks/access_token.yml @@ -0,0 +1,25 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +--- +- name: Get access token + ansible.builtin.uri: + url: "{{ url }}/realms/{{ admin_realm }}/protocol/openid-connect/token" + method: POST + status_code: 200 + headers: + Accept: application/json + User-agent: Ansible + body_format: form-urlencoded + body: + grant_type: "password" + client_id: "admin-cli" + username: "{{ admin_user }}" + password: "{{ admin_password }}" + register: token_response + no_log: true + +- name: Extract access token + ansible.builtin.set_fact: + access_token: "{{ token_response.json['access_token'] }}" + no_log: true diff --git a/tests/integration/targets/keycloak_authentication/tasks/main.yml b/tests/integration/targets/keycloak_authentication/tasks/main.yml new file mode 100644 index 0000000000..cfb193848c --- /dev/null +++ b/tests/integration/targets/keycloak_authentication/tasks/main.yml @@ -0,0 +1,185 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +- name: Install required packages + pip: + name: + - jmespath + - requests + register: result + until: result is success + +- name: Start container + community.docker.docker_container: + name: mykeycloak + image: "quay.io/keycloak/keycloak:{{ keycloak_version }}" + command: start-dev + env: + KC_HTTP_RELATIVE_PATH: /auth + KEYCLOAK_ADMIN: admin + KEYCLOAK_ADMIN_PASSWORD: password + ports: + - "{{ keycloak_port }}:8080" + detach: true + auto_remove: true + memory: 2200M + +- name: Wait for Keycloak + uri: + url: "{{ url }}/admin/" + status_code: 200 + validate_certs: false + register: result + until: result.status == 200 + retries: 10 + delay: 10 + +- name: Delete realm if exists + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + state: absent + +- name: Create realm + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + id: "{{ realm }}" + realm: "{{ realm }}" + state: present + +- name: Create an authentication flow from first broker login and add an execution to it. + community.general.keycloak_authentication: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "Test first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "idp-review-profile" + requirement: "REQUIRED" + authenticationConfig: + alias: "Test review profile config" + config: + update.profile.on.first.login: "missing" + +- name: Create auth flow + community.general.keycloak_authentication: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "My conditionnal browser otp" + description: "browser based authentication with otp" + providerId: "basic-flow" + authenticationExecutions: + - displayName: Cookie + providerId: auth-cookie + requirement: ALTERNATIVE + - displayName: Kerberos + providerId: auth-spnego + requirement: DISABLED + - displayName: Identity Provider Redirector + providerId: identity-provider-redirector + requirement: ALTERNATIVE + - displayName: My browser otp forms + requirement: ALTERNATIVE + - displayName: Username Password Form + flowAlias: My browser otp forms + providerId: auth-username-password-form + requirement: REQUIRED + - displayName: My browser otp Browser - Conditional OTP + flowAlias: My browser otp forms + requirement: REQUIRED + providerId: "auth-conditional-otp-form" + authenticationConfig: + alias: my-conditional-otp-config + config: + defaultOtpOutcome: "force" + noOtpRequiredForHeaderPattern: "{{ keycloak_no_otp_required_pattern_orinale }}" + state: present + +- name: Modified auth flow with new config + community.general.keycloak_authentication: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "My conditionnal browser otp" + description: "browser based authentication with otp" + providerId: "basic-flow" + authenticationExecutions: + - displayName: Cookie + providerId: auth-cookie + requirement: ALTERNATIVE + - displayName: Kerberos + providerId: auth-spnego + requirement: DISABLED + - displayName: Identity Provider Redirector + providerId: identity-provider-redirector + requirement: ALTERNATIVE + - displayName: My browser otp forms + requirement: ALTERNATIVE + - displayName: Username Password Form + flowAlias: My browser otp forms + providerId: auth-username-password-form + requirement: REQUIRED + - displayName: My browser otp Browser - Conditional OTP + flowAlias: My browser otp forms + requirement: REQUIRED + providerId: "auth-conditional-otp-form" + authenticationConfig: + alias: my-conditional-otp-config + config: + defaultOtpOutcome: "force" + noOtpRequiredForHeaderPattern: "{{ keycloak_no_otp_required_pattern_modifed }}" + state: present + register: result + +- name: Retrive access + ansible.builtin.include_tasks: + file: access_token.yml + +- name: Export realm + ansible.builtin.uri: + url: "{{ url }}/admin/realms/{{ realm }}/partial-export?exportClients=false&exportGroupsAndRoles=false" + method: POST + headers: + Accept: application/json + User-agent: Ansible + Authorization: "Bearer {{ access_token }}" + body_format: form-urlencoded + body: {} + register: exported_realm + no_log: true + +- name: Assert `my-conditional-otp-config` exists only once + ansible.builtin.assert: + that: + - exported_realm.json | community.general.json_query('authenticatorConfig[?alias==`my-conditional-otp-config`]') | length == 1 + +- name: Delete auth flow + community.general.keycloak_authentication: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "My conditionnal browser otp" + state: absent + register: result + +- name: Remove container + community.docker.docker_container: + name: mykeycloak + state: absent diff --git a/tests/integration/targets/keycloak_authentication/vars/main.yml b/tests/integration/targets/keycloak_authentication/vars/main.yml new file mode 100644 index 0000000000..03244e18c0 --- /dev/null +++ b/tests/integration/targets/keycloak_authentication/vars/main.yml @@ -0,0 +1,16 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +keycloak_version: latest +keycloak_port: 8080 + +url: "http://localhost:{{ keycloak_port }}/auth" +admin_realm: master +admin_user: admin +admin_password: password +realm: myrealm + + +keycloak_no_otp_required_pattern_orinale: "X-Forwarded-For: 10\\.[0-9\\.:]+" +keycloak_no_otp_required_pattern_modifed: "X-Original-Forwarded-For: 10\\.[0-9\\.:]+" \ No newline at end of file diff --git a/tests/integration/targets/keycloak_authz_custom_policy/tasks/main.yml b/tests/integration/targets/keycloak_authz_custom_policy/tasks/main.yml index b22d751215..39500fc86d 100644 --- a/tests/integration/targets/keycloak_authz_custom_policy/tasks/main.yml +++ b/tests/integration/targets/keycloak_authz_custom_policy/tasks/main.yml @@ -93,7 +93,7 @@ - result.end_state.type == "script-policy-2.js" - result.msg == 'Custom policy FirstCustomPolicy already exists' -# Ensure that we can create multiple instances of the custom policy +# Ensure that we can create multiple instances of the custom policy - name: Create second instance of the custom policy community.general.keycloak_authz_custom_policy: auth_keycloak_url: "{{ url }}" diff --git a/tests/integration/targets/keycloak_authz_permission/tasks/main.yml b/tests/integration/targets/keycloak_authz_permission/tasks/main.yml index 16cb6806f2..889e59c506 100644 --- a/tests/integration/targets/keycloak_authz_permission/tasks/main.yml +++ b/tests/integration/targets/keycloak_authz_permission/tasks/main.yml @@ -93,8 +93,8 @@ name: "ScopePermission" description: "Scope permission" resources: - - "Default Resource" - - "Other Resource" + - "Default Resource" + - "Other Resource" permission_type: scope scopes: - "file:delete" diff --git a/tests/integration/targets/keycloak_client/tasks/main.yml b/tests/integration/targets/keycloak_client/tasks/main.yml index e1a7d2ebfb..e22544ddd4 100644 --- a/tests/integration/targets/keycloak_client/tasks/main.yml +++ b/tests/integration/targets/keycloak_client/tasks/main.yml @@ -6,7 +6,7 @@ uri: url: "{{ url }}/admin/" status_code: 200 - validate_certs: no + validate_certs: false register: result until: result.status == 200 retries: 10 @@ -72,7 +72,7 @@ redirect_uris: '{{redirect_uris1}}' attributes: '{{client_attributes1}}' protocol_mappers: '{{protocol_mappers1}}' - authorization_services_enabled: False + authorization_services_enabled: false check_mode: true register: check_client_when_present_and_same @@ -94,8 +94,8 @@ redirect_uris: '{{redirect_uris1}}' attributes: '{{client_attributes1}}' protocol_mappers: '{{protocol_mappers1}}' - authorization_services_enabled: False - service_accounts_enabled: True + authorization_services_enabled: false + service_accounts_enabled: true check_mode: true register: check_client_when_present_and_changed diff --git a/tests/integration/targets/keycloak_client/vars/main.yml b/tests/integration/targets/keycloak_client/vars/main.yml index 498f93e709..93f0d4d73e 100644 --- a/tests/integration/targets/keycloak_client/vars/main.yml +++ b/tests/integration/targets/keycloak_client/vars/main.yml @@ -34,9 +34,9 @@ protocol_mappers1: "claim.name": "email" "user.attribute": "email" "jsonType.label": "String" - "id.token.claim": "true" - "access.token.claim": "true" - "userinfo.token.claim": "true" + "id.token.claim": true + "access.token.claim": true + "userinfo.token.claim": true - name: 'email_verified' protocol: 'openid-connect' @@ -45,9 +45,9 @@ protocol_mappers1: "claim.name": "email_verified" "user.attribute": "emailVerified" "jsonType.label": "boolean" - "id.token.claim": "true" - "access.token.claim": "true" - "userinfo.token.claim": "true" + "id.token.claim": true + "access.token.claim": true + "userinfo.token.claim": true - name: 'family_name' protocol: 'openid-connect' diff --git a/tests/integration/targets/keycloak_client_rolescope/tasks/main.yml b/tests/integration/targets/keycloak_client_rolescope/tasks/main.yml index 8675c9548d..d4c60d3f2e 100644 --- a/tests/integration/targets/keycloak_client_rolescope/tasks/main.yml +++ b/tests/integration/targets/keycloak_client_rolescope/tasks/main.yml @@ -6,7 +6,7 @@ uri: url: "{{ url }}/admin/" status_code: 200 - validate_certs: no + validate_certs: false register: result until: result.status == 200 retries: 10 @@ -39,9 +39,9 @@ auth_password: "{{ admin_password }}" name: "{{ item }}" realm: "{{ realm }}" - with_items: - - "{{ realm_role_admin }}" - - "{{ realm_role_user }}" + with_items: + - "{{ realm_role_admin }}" + - "{{ realm_role_user }}" - name: Client private community.general.keycloak_client: @@ -53,10 +53,10 @@ client_id: "{{ client_name_private }}" state: present redirect_uris: - - "https://my-backend-api.c.org/" - fullScopeAllowed: True + - "https://my-backend-api.c.org/" + fullScopeAllowed: true attributes: '{{client_attributes1}}' - public_client: False + public_client: false - name: Create a Keycloak client role community.general.keycloak_role: @@ -67,9 +67,9 @@ name: "{{ item }}" realm: "{{ realm }}" client_id: "{{ client_name_private }}" - with_items: - - "{{ client_role_admin }}" - - "{{ client_role_user }}" + with_items: + - "{{ client_role_admin }}" + - "{{ client_role_user }}" - name: Client public community.general.keycloak_client: @@ -80,10 +80,10 @@ realm: "{{ realm }}" client_id: "{{ client_name_public }}" redirect_uris: - - "https://my-onepage-app-frontend.c.org/" + - "https://my-onepage-app-frontend.c.org/" attributes: '{{client_attributes1}}' - full_scope_allowed: False - public_client: True + full_scope_allowed: false + public_client: true - name: Map roles to public client @@ -96,15 +96,15 @@ client_id: "{{ client_name_public }}" client_scope_id: "{{ client_name_private }}" role_names: - - "{{ client_role_admin }}" - - "{{ client_role_user }}" + - "{{ client_role_admin }}" + - "{{ client_role_user }}" register: result - name: Assert mapping created assert: that: - - result is changed - - result.end_state | length == 2 + - result is changed + - result.end_state | length == 2 - name: remap role user to public client community.general.keycloak_client_rolescope: @@ -116,15 +116,15 @@ client_id: "{{ client_name_public }}" client_scope_id: "{{ client_name_private }}" role_names: - - "{{ client_role_user }}" + - "{{ client_role_user }}" register: result - name: Assert mapping created assert: that: - - result is not changed - - result.end_state | length == 2 - + - result is not changed + - result.end_state | length == 2 + - name: Remove Map role admin to public client community.general.keycloak_client_rolescope: auth_keycloak_url: "{{ url }}" @@ -135,16 +135,16 @@ client_id: "{{ client_name_public }}" client_scope_id: "{{ client_name_private }}" role_names: - - "{{ client_role_admin }}" + - "{{ client_role_admin }}" state: absent register: result - name: Assert mapping deleted assert: that: - - result is changed - - result.end_state | length == 1 - - result.end_state[0].name == client_role_user + - result is changed + - result.end_state | length == 1 + - result.end_state[0].name == client_role_user - name: Map missing roles to public client community.general.keycloak_client_rolescope: @@ -156,15 +156,15 @@ client_id: "{{ client_name_public }}" client_scope_id: "{{ client_name_private }}" role_names: - - "{{ client_role_admin }}" - - "{{ client_role_not_exists }}" + - "{{ client_role_admin }}" + - "{{ client_role_not_exists }}" ignore_errors: true register: result - name: Assert failed mapping missing role assert: that: - - result is failed + - result is failed - name: Map roles duplicate community.general.keycloak_client_rolescope: @@ -176,15 +176,15 @@ client_id: "{{ client_name_public }}" client_scope_id: "{{ client_name_private }}" role_names: - - "{{ client_role_admin }}" - - "{{ client_role_admin }}" + - "{{ client_role_admin }}" + - "{{ client_role_admin }}" register: result - name: Assert result assert: that: - - result is changed - - result.end_state | length == 2 + - result is changed + - result.end_state | length == 2 - name: Map roles to private client community.general.keycloak_client_rolescope: @@ -195,14 +195,14 @@ realm: "{{ realm }}" client_id: "{{ client_name_private }}" role_names: - - "{{ realm_role_admin }}" + - "{{ realm_role_admin }}" ignore_errors: true register: result - name: Assert failed mapping role to full scope client assert: that: - - result is failed + - result is failed - name: Map realm role to public client community.general.keycloak_client_rolescope: @@ -213,14 +213,14 @@ realm: "{{ realm }}" client_id: "{{ client_name_public }}" role_names: - - "{{ realm_role_admin }}" + - "{{ realm_role_admin }}" register: result - name: Assert result assert: that: - - result is changed - - result.end_state | length == 1 + - result is changed + - result.end_state | length == 1 - name: Map two realm roles to public client community.general.keycloak_client_rolescope: @@ -231,15 +231,15 @@ realm: "{{ realm }}" client_id: "{{ client_name_public }}" role_names: - - "{{ realm_role_admin }}" - - "{{ realm_role_user }}" + - "{{ realm_role_admin }}" + - "{{ realm_role_user }}" register: result - name: Assert result assert: that: - - result is changed - - result.end_state | length == 2 + - result is changed + - result.end_state | length == 2 - name: Unmap all realm roles to public client community.general.keycloak_client_rolescope: @@ -250,16 +250,16 @@ realm: "{{ realm }}" client_id: "{{ client_name_public }}" role_names: - - "{{ realm_role_admin }}" - - "{{ realm_role_user }}" + - "{{ realm_role_admin }}" + - "{{ realm_role_user }}" state: absent register: result - name: Assert result assert: that: - - result is changed - - result.end_state | length == 0 + - result is changed + - result.end_state | length == 0 - name: Map missing realm role to public client community.general.keycloak_client_rolescope: @@ -270,14 +270,14 @@ realm: "{{ realm }}" client_id: "{{ client_name_public }}" role_names: - - "{{ realm_role_not_exists }}" + - "{{ realm_role_not_exists }}" ignore_errors: true register: result - name: Assert failed mapping missing realm role assert: that: - - result is failed + - result is failed - name: Check-mode try to Map realm roles to public client community.general.keycloak_client_rolescope: @@ -288,17 +288,17 @@ realm: "{{ realm }}" client_id: "{{ client_name_public }}" role_names: - - "{{ realm_role_admin }}" - - "{{ realm_role_user }}" + - "{{ realm_role_admin }}" + - "{{ realm_role_user }}" check_mode: true register: result - name: Assert result assert: that: - - result is changed - - result.end_state | length == 2 - + - result is changed + - result.end_state | length == 2 + - name: Check-mode step two, check if change where applied community.general.keycloak_client_rolescope: auth_keycloak_url: "{{ url }}" @@ -313,5 +313,5 @@ - name: Assert result assert: that: - - result is not changed - - result.end_state | length == 0 \ No newline at end of file + - result is not changed + - result.end_state | length == 0 diff --git a/tests/integration/targets/keycloak_clientsecret_info/tasks/main.yml b/tests/integration/targets/keycloak_clientsecret_info/tasks/main.yml index a0cacf1889..2b51e63068 100644 --- a/tests/integration/targets/keycloak_clientsecret_info/tasks/main.yml +++ b/tests/integration/targets/keycloak_clientsecret_info/tasks/main.yml @@ -32,7 +32,7 @@ assert: that: - fetch_by_client_id_result.clientsecret_info.type == "secret" - - "{{ fetch_by_client_id_result.clientsecret_info.value | length }} >= 32" + - fetch_by_client_id_result.clientsecret_info.value | length >= 32 - name: Keycloak Client fetch clientsecret by id community.general.keycloak_clientsecret_info: "{{ auth_args | combine(call_args) }}" diff --git a/tests/integration/targets/keycloak_clientsecret_regenerate/tasks/main.yml b/tests/integration/targets/keycloak_clientsecret_regenerate/tasks/main.yml index 9bd52698a2..56f864965d 100644 --- a/tests/integration/targets/keycloak_clientsecret_regenerate/tasks/main.yml +++ b/tests/integration/targets/keycloak_clientsecret_regenerate/tasks/main.yml @@ -32,7 +32,7 @@ assert: that: - regenerate_by_client_id.end_state.type == "secret" - - "{{ regenerate_by_client_id.end_state.value | length }} >= 32" + - regenerate_by_client_id.end_state.value | length >= 32 - name: Keycloak Client regenerate clientsecret by id community.general.keycloak_clientsecret_regenerate: "{{ auth_args | combine(call_args) }}" @@ -45,5 +45,5 @@ - name: Assert that client secret was regenerated assert: that: - - "{{ regenerate_by_id.end_state.value | length }} >= 32" + - regenerate_by_id.end_state.value | length >= 32 - regenerate_by_id.end_state.value != regenerate_by_client_id.end_state.value diff --git a/tests/integration/targets/keycloak_component_info/tasks/main.yml b/tests/integration/targets/keycloak_component_info/tasks/main.yml index c0ca5600fc..e84a1f751c 100644 --- a/tests/integration/targets/keycloak_component_info/tasks/main.yml +++ b/tests/integration/targets/keycloak_component_info/tasks/main.yml @@ -6,7 +6,7 @@ uri: url: "{{ url }}/admin/" status_code: 200 - validate_certs: no + validate_certs: false register: result until: result.status == 200 retries: 10 @@ -45,8 +45,8 @@ - name: Assert ldap is missing assert: that: - - result is not changed - - result.components | length == 0 + - result is not changed + - result.components | length == 0 - name: Create new user federation community.general.keycloak_user_federation: @@ -103,15 +103,15 @@ - name: Assert ldap exists assert: that: - - result is not changed - - result.components | length == 1 - - result.components[0].name == federation + - result is not changed + - result.components | length == 1 + - result.components[0].name == federation - name: Save ldap id set_fact: myLdapId: "{{ result.components[0].id }}" -- name: Retrive ldap subcomponents info +- name: Retrive ldap subcomponents info community.general.keycloak_component_info: auth_keycloak_url: "{{ url }}" auth_realm: "{{ admin_realm }}" @@ -124,10 +124,10 @@ - name: Assert components exists assert: that: - - result is not changed - - result.components | length > 0 + - result is not changed + - result.components | length > 0 -- name: Retrive ldap subcomponents filter by name +- name: Retrive ldap subcomponents filter by name community.general.keycloak_component_info: auth_keycloak_url: "{{ url }}" auth_realm: "{{ admin_realm }}" @@ -141,11 +141,11 @@ - name: Assert sub component with name "email" exists assert: that: - - result is not changed - - result.components | length == 1 - - result.components[0].name == "email" + - result is not changed + - result.components | length == 1 + - result.components[0].name == "email" -- name: Retrive ldap subcomponents filter by type +- name: Retrive ldap subcomponents filter by type community.general.keycloak_component_info: auth_keycloak_url: "{{ url }}" auth_realm: "{{ admin_realm }}" @@ -159,9 +159,9 @@ - name: Assert ldap sub components filter by type assert: that: - - result is not changed - - result.components | length > 0 - - result.components[0].providerType == "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + - result is not changed + - result.components | length > 0 + - result.components[0].providerType == "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" - name: Retrive key info when absent community.general.keycloak_component_info: @@ -177,8 +177,8 @@ - name: Assert key is missing assert: that: - - result is not changed - - result.components | length == 0 + - result is not changed + - result.components | length == 0 - name: Create custom realm key community.general.keycloak_realm_key: @@ -211,8 +211,8 @@ - name: Assert key exists assert: that: - - result is not changed - - result.components | length == 1 + - result is not changed + - result.components | length == 1 - name: Retrive all realm components community.general.keycloak_component_info: @@ -226,8 +226,8 @@ - name: Assert key exists assert: that: - - result is not changed - - result.components | length > 0 + - result is not changed + - result.components | length > 0 - name: Retrive all ldap in realm community.general.keycloak_component_info: @@ -242,10 +242,10 @@ - name: Assert key exists assert: that: - - result is not changed - - result.components | length == 1 - - result.components[0].providerType == "org.keycloak.storage.UserStorageProvider" - - result.components[0].name == "myldap" + - result is not changed + - result.components | length == 1 + - result.components[0].providerType == "org.keycloak.storage.UserStorageProvider" + - result.components[0].name == "myldap" - name: Retrive component by name only community.general.keycloak_component_info: @@ -260,7 +260,7 @@ - name: Assert key exists assert: that: - - result is not changed - - result.components | length == 1 - - result.components[0].providerType == "org.keycloak.keys.KeyProvider" - - result.components[0].name == realm_key_name + - result is not changed + - result.components | length == 1 + - result.components[0].providerType == "org.keycloak.keys.KeyProvider" + - result.components[0].name == realm_key_name diff --git a/tests/integration/targets/keycloak_group/tasks/main.yml b/tests/integration/targets/keycloak_group/tasks/main.yml index f807b0640d..df567d7db7 100644 --- a/tests/integration/targets/keycloak_group/tasks/main.yml +++ b/tests/integration/targets/keycloak_group/tasks/main.yml @@ -145,11 +145,11 @@ realm: "{{ realm }}" name: my-new_group attributes: - attrib1: value1 - attrib2: value2 - attrib3: - - item1 - - item2 + attrib1: value1 + attrib2: value2 + attrib3: + - item1 + - item2 register: result - name: Assert that group was correctly created diff --git a/tests/integration/targets/keycloak_modules_authentication/README.md b/tests/integration/targets/keycloak_modules_authentication/README.md new file mode 100644 index 0000000000..a3d40a5674 --- /dev/null +++ b/tests/integration/targets/keycloak_modules_authentication/README.md @@ -0,0 +1,26 @@ + +# Running keycloak module authentication integration test + +To run the Keycloak module authentication integration test, start a keycloak server using Docker or Podman: + +```sh + podman|docker run -d --rm --name mykeycloak -p 8080:8080 -e KEYCLOAK_ADMIN=admin -e KEYCLOAK_ADMIN_PASSWORD=password quay.io/keycloak/keycloak:latest start-dev --http-relative-path /auth +``` + +Source Ansible env-setup from ansible github repository. + +Run the integration tests: + +```sh + ansible-test integration keycloak_role --python 3.10 --allow-unsupported +``` + +To cleanup, run: + +```sh + podman|docker stop mykeycloak +``` diff --git a/tests/integration/targets/proxmox/aliases b/tests/integration/targets/keycloak_modules_authentication/aliases similarity index 72% rename from tests/integration/targets/proxmox/aliases rename to tests/integration/targets/keycloak_modules_authentication/aliases index 5e5957a5c2..bd1f024441 100644 --- a/tests/integration/targets/proxmox/aliases +++ b/tests/integration/targets/keycloak_modules_authentication/aliases @@ -3,7 +3,3 @@ # SPDX-License-Identifier: GPL-3.0-or-later unsupported -proxmox_domain_info -proxmox_group_info -proxmox_user_info -proxmox_storage_info diff --git a/tests/integration/targets/keycloak_modules_authentication/tasks/main.yml b/tests/integration/targets/keycloak_modules_authentication/tasks/main.yml new file mode 100644 index 0000000000..b788865de9 --- /dev/null +++ b/tests/integration/targets/keycloak_modules_authentication/tasks/main.yml @@ -0,0 +1,342 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Reset public login in master admin-cli (if potentially previous test failed) + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + auth_client_id: "admin-cli" + auth_client_secret: "{{ client_secret }}" + client_id: "admin-cli" + secret: "{{ client_secret }}" + public_client: true + state: present + +- name: Create realm + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + id: "{{ realm }}" + realm: "{{ realm }}" + state: present + +- name: Create client + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + register: client + +- name: Create new realm role with username/password authentication + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ keycloak_role_description }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Remove created realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Get Keycloak token + ansible.builtin.uri: + url: "{{ url }}/realms/{{ admin_realm }}/protocol/openid-connect/token" + method: POST + return_content: true + status_code: 200 + body_format: form-urlencoded + body: + grant_type: "password" + client_id: "admin-cli" + username: "{{ admin_user }}" + password: "{{ admin_password }}" + register: token_response + +- name: Extract tokens + ansible.builtin.set_fact: + access_token: "{{ token_response.json | json_query('access_token') }}" + refresh_token: "{{ token_response.json | json_query('refresh_token') }}" + +- name: Create new realm role with provided token authentication + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + token: "{{ access_token }}" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ keycloak_role_description }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Remove created realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Create new realm role with invalid auth token and valid refresh token + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + token: "invalidtoken!!!" + refresh_token: "{{ refresh_token }}" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ keycloak_role_description }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Remove created realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Create new realm role with invalid auth token and valid username/password + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + token: "invalidtoken!!!" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ keycloak_role_description }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Remove created realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Create new realm role with invalid auth token, invalid refresh token, and valid username/password + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + token: "invalidtoken!!!" + refresh_token: "invalidrefreshtoken!!!" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ keycloak_role_description }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Remove created realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: PREPARE - Temporarily disable public login in master admin-cli + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + auth_client_id: "admin-cli" + auth_client_secret: "{{ client_secret }}" + client_id: "admin-cli" + secret: "{{ client_secret }}" + public_client: false + service_accounts_enabled: true + client_authenticator_type: "client-secret" + state: present + +- name: PREPARE - Get admin role id + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + auth_client_id: "admin-cli" + auth_client_secret: "{{ client_secret }}" + name: "admin" + register: admin_role + +- name: PREPARE - Assign admin role to admin-cli in master + community.general.keycloak_user_rolemapping: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + auth_client_id: "admin-cli" + auth_client_secret: "{{ client_secret }}" + realm: "master" + roles: + - name: "admin" + service_account_user_client_id: "admin-cli" + +- name: Create new realm role with valid client_id and client_secret + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_client_id: "admin-cli" + auth_client_secret: "{{ client_secret }}" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ keycloak_role_description }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Reset temporarily disabled public login in master admin-cli + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + auth_client_id: "admin-cli" + auth_client_secret: "{{ client_secret }}" + client_id: "admin-cli" + secret: "{{ client_secret }}" + public_client: true + state: present + +- name: Remove created realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +### Unhappy path tests + +- name: Fail to create new realm role with invalid username/password + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "invalid_password" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ keycloak_role_description }}" + state: present + register: result + failed_when: > + ("HTTP Error 401: Unauthorized" not in result.msg) + +- name: Fail to create new realm role with invalid auth token + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + token: "invalidtoken!!!" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ keycloak_role_description }}" + state: present + register: result + failed_when: > + ("HTTP Error 401: Unauthorized" not in result.msg) + +- name: Fail to create new realm role with invalid auth and refresh tokens, and invalid username/password + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "invalid_password" + token: "invalidtoken!!!" + refresh_token: "invalidtoken!!!" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ keycloak_role_description }}" + state: present + register: result + failed_when: > + ("HTTP Error 401: Unauthorized" not in result.msg) diff --git a/tests/integration/targets/keycloak_modules_authentication/vars/main.yml b/tests/integration/targets/keycloak_modules_authentication/vars/main.yml new file mode 100644 index 0000000000..f57d791d86 --- /dev/null +++ b/tests/integration/targets/keycloak_modules_authentication/vars/main.yml @@ -0,0 +1,21 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +url: http://localhost:8080/auth +admin_realm: master +admin_user: admin +admin_password: password +realm: myrealm +client_id: myclient +client_secret: myclientsecret +role: myrole + +keycloak_role_name: test +keycloak_role_description: test +keycloak_role_composite: false +keycloak_client_id: test-client +keycloak_client_name: test-client +keycloak_client_description: This is a client for testing purpose +role_state: present diff --git a/tests/integration/targets/keycloak_realm_key/tasks/main.yml b/tests/integration/targets/keycloak_realm_key/tasks/main.yml index c02950600f..e447d075e0 100644 --- a/tests/integration/targets/keycloak_realm_key/tasks/main.yml +++ b/tests/integration/targets/keycloak_realm_key/tasks/main.yml @@ -151,7 +151,8 @@ - result.end_state.config.enabled == ["true"] - result.end_state.config.algorithm == ["RS256"] - result.end_state.config.priority == ["140"] - - result.msg == "Realm key testkey would be changed: config.priority ['150'] -> ['140']" + - >- + result.msg == "Realm key testkey would be changed: config.priority ['150'] -> ['140']" - name: Update custom realm key community.general.keycloak_realm_key: @@ -184,7 +185,8 @@ - result.end_state.config.enabled == ["true"] - result.end_state.config.algorithm == ["RS256"] - result.end_state.config.priority == ["140"] - - result.msg == "Realm key testkey changed: config.priority ['150'] -> ['140']" + - >- + result.msg == "Realm key testkey changed: config.priority ['150'] -> ['140']" - name: Update custom realm key (test for idempotency) community.general.keycloak_realm_key: diff --git a/tests/integration/targets/keycloak_role/tasks/main.yml b/tests/integration/targets/keycloak_role/tasks/main.yml index c649b86808..9111928a55 100644 --- a/tests/integration/targets/keycloak_role/tasks/main.yml +++ b/tests/integration/targets/keycloak_role/tasks/main.yml @@ -45,8 +45,8 @@ that: - result is changed - result.existing == {} - - result.end_state.name == "{{ role }}" - - result.end_state.containerId == "{{ realm }}" + - result.end_state.name == role + - result.end_state.containerId == realm - name: Create existing realm role community.general.keycloak_role: @@ -89,8 +89,8 @@ assert: that: - result is changed - - result.existing.description == "{{ description_1 }}" - - result.end_state.description == "{{ description_2 }}" + - result.existing.description == description_1 + - result.end_state.description == description_2 - name: Delete existing realm role community.general.keycloak_role: @@ -156,8 +156,8 @@ that: - result is changed - result.existing == {} - - result.end_state.name == "{{ role }}" - - result.end_state.containerId == "{{ client.end_state.id }}" + - result.end_state.name == role + - result.end_state.containerId == client.end_state.id - name: Create existing client role community.general.keycloak_role: @@ -202,8 +202,8 @@ assert: that: - result is changed - - result.existing.description == "{{ description_1 }}" - - result.end_state.description == "{{ description_2 }}" + - result.existing.description == description_1 + - result.end_state.description == description_2 - name: Delete existing client role community.general.keycloak_role: @@ -480,4 +480,4 @@ assert: that: - result is not changed - - result.end_state == {} \ No newline at end of file + - result.end_state == {} diff --git a/tests/integration/targets/keycloak_role/vars/main.yml b/tests/integration/targets/keycloak_role/vars/main.yml index 0af55dfc5c..1da126873e 100644 --- a/tests/integration/targets/keycloak_role/vars/main.yml +++ b/tests/integration/targets/keycloak_role/vars/main.yml @@ -17,10 +17,10 @@ keycloak_role_name: test keycloak_role_description: test keycloak_role_composite: true keycloak_role_composites: - - name: view-clients + - name: view-clients client_id: "realm-management" state: present - - name: query-clients + - name: query-clients client_id: "realm-management" state: present - name: offline_access @@ -31,10 +31,10 @@ keycloak_client_description: This is a client for testing purpose role_state: present keycloak_role_composites_with_absent: - - name: view-clients + - name: view-clients client_id: "realm-management" state: present - - name: query-clients + - name: query-clients client_id: "realm-management" state: present - name: offline_access diff --git a/tests/integration/targets/keycloak_user_federation/tasks/main.yml b/tests/integration/targets/keycloak_user_federation/tasks/main.yml index ae0b4bf162..d193a48f07 100644 --- a/tests/integration/targets/keycloak_user_federation/tasks/main.yml +++ b/tests/integration/targets/keycloak_user_federation/tasks/main.yml @@ -64,7 +64,7 @@ that: - result is changed - result.existing == {} - - result.end_state.name == "{{ federation }}" + - result.end_state.name == federation - name: Create new user federation in admin realm community.general.keycloak_user_federation: @@ -117,7 +117,7 @@ that: - result is changed - result.existing == {} - - result.end_state.name == "{{ federation }}" + - result.end_state.name == federation - name: Update existing user federation (no change) community.general.keycloak_user_federation: @@ -170,9 +170,9 @@ that: - result is not changed - result.existing != {} - - result.existing.name == "{{ federation }}" + - result.existing.name == federation - result.end_state != {} - - result.end_state.name == "{{ federation }}" + - result.end_state.name == federation - name: Update existing user federation (no change, admin realm) community.general.keycloak_user_federation: @@ -225,9 +225,9 @@ that: - result is not changed - result.existing != {} - - result.existing.name == "{{ federation }}" + - result.existing.name == federation - result.end_state != {} - - result.end_state.name == "{{ federation }}" + - result.end_state.name == federation - name: Update existing user federation (with change) community.general.keycloak_user_federation: @@ -296,9 +296,9 @@ that: - result is changed - result.existing != {} - - result.existing.name == "{{ federation }}" + - result.existing.name == federation - result.end_state != {} - - result.end_state.name == "{{ federation }}" + - result.end_state.name == federation - name: Delete existing user federation community.general.keycloak_user_federation: @@ -411,7 +411,7 @@ that: - result is changed - result.existing == {} - - result.end_state.name == "{{ federation }}" + - result.end_state.name == federation ## no point in retesting this, just doing it to clean up introduced server changes - name: Delete absent user federation diff --git a/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml b/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml index 1a897ad9af..ff56371e58 100644 --- a/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml +++ b/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml @@ -37,7 +37,8 @@ - name: Map a realm role to client service account vars: - - roles: [ {'name': '{{ role }}'} ] + - roles: + - name: '{{ role }}' community.general.keycloak_user_rolemapping: auth_keycloak_url: "{{ url }}" auth_realm: "{{ admin_realm }}" @@ -53,11 +54,12 @@ assert: that: - result is changed - - result.end_state | selectattr("clientRole", "eq", false) | selectattr("name", "eq", "{{role}}") | list | count > 0 + - result.end_state | selectattr("clientRole", "eq", false) | selectattr("name", "eq", role) | list | count > 0 - name: Unmap a realm role from client service account vars: - - roles: [ {'name': '{{ role }}'} ] + - roles: + - name: '{{ role }}' community.general.keycloak_user_rolemapping: auth_keycloak_url: "{{ url }}" auth_realm: "{{ admin_realm }}" @@ -74,8 +76,8 @@ that: - result is changed - (result.end_state | length) == (result.existing | length) - 1 - - result.existing | selectattr("clientRole", "eq", false) | selectattr("name", "eq", "{{role}}") | list | count > 0 - - result.end_state | selectattr("clientRole", "eq", false) | selectattr("name", "eq", "{{role}}") | list | count == 0 + - result.existing | selectattr("clientRole", "eq", false) | selectattr("name", "eq", role) | list | count > 0 + - result.end_state | selectattr("clientRole", "eq", false) | selectattr("name", "eq", role) | list | count == 0 - name: Delete existing realm role community.general.keycloak_role: @@ -101,7 +103,8 @@ - name: Map a client role to client service account vars: - - roles: [ {'name': '{{ role }}'} ] + - roles: + - name: '{{ role }}' community.general.keycloak_user_rolemapping: auth_keycloak_url: "{{ url }}" auth_realm: "{{ admin_realm }}" @@ -118,11 +121,12 @@ assert: that: - result is changed - - result.end_state | selectattr("clientRole", "eq", true) | selectattr("name", "eq", "{{role}}") | list | count > 0 + - result.end_state | selectattr("clientRole", "eq", true) | selectattr("name", "eq", role) | list | count > 0 - name: Unmap a client role from client service account vars: - - roles: [ {'name': '{{ role }}'} ] + - roles: + - name: '{{ role }}' community.general.keycloak_user_rolemapping: auth_keycloak_url: "{{ url }}" auth_realm: "{{ admin_realm }}" @@ -140,4 +144,4 @@ that: - result is changed - result.end_state == [] - - result.existing | selectattr("clientRole", "eq", true) | selectattr("name", "eq", "{{role}}") | list | count > 0 + - result.existing | selectattr("clientRole", "eq", true) | selectattr("name", "eq", role) | list | count > 0 diff --git a/tests/integration/targets/keycloak_userprofile/tasks/main.yml b/tests/integration/targets/keycloak_userprofile/tasks/main.yml index 37b65d35ed..8ecce1a728 100644 --- a/tests/integration/targets/keycloak_userprofile/tasks/main.yml +++ b/tests/integration/targets/keycloak_userprofile/tasks/main.yml @@ -180,7 +180,7 @@ # parent_id: "{{ realm }}" # config: "{{ config_updated }}" # register: result -# +# # - name: Assert that forced update ran correctly # assert: # that: @@ -292,7 +292,7 @@ - name: Remove Keycloak test realm community.general.keycloak_realm: - auth_keycloak_url: "{{ url }}" + auth_keycloak_url: "{{ url }}" auth_realm: "{{ admin_realm }}" auth_username: "{{ admin_user }}" auth_password: "{{ admin_password }}" diff --git a/tests/integration/targets/keycloak_userprofile/vars/main.yml b/tests/integration/targets/keycloak_userprofile/vars/main.yml index 1f8ae6c823..b423a677c0 100644 --- a/tests/integration/targets/keycloak_userprofile/vars/main.yml +++ b/tests/integration/targets/keycloak_userprofile/vars/main.yml @@ -9,69 +9,69 @@ admin_user: admin admin_password: password realm: realm_userprofile_test attributes_default: - - name: username - displayName: ${username} - validations: - length: - min: 3 - max: 255 - usernameProhibitedCharacters: {} - up_username_not_idn_homograph: {} - annotations: {} - permissions: - view: - - admin - - user - edit: [] - multivalued: false - - name: email - displayName: ${email} - validations: - email: {} - length: - max: 255 - annotations: {} - required: - roles: - - user - permissions: - view: - - admin - - user - edit: [] - multivalued: false - - name: firstName - displayName: ${firstName} - validations: - length: - max: 255 - personNameProhibitedCharacters: {} - annotations: {} - required: - roles: - - user - permissions: - view: - - admin - - user - edit: [] - multivalued: false - - name: lastName - displayName: ${lastName} - validations: - length: - max: 255 - person_name_prohibited_characters: {} - annotations: {} - required: - roles: - - user - permissions: - view: - - admin - - user - edit: [] - multivalued: false + - name: username + displayName: ${username} + validations: + length: + min: 3 + max: 255 + usernameProhibitedCharacters: {} + up_username_not_idn_homograph: {} + annotations: {} + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: email + displayName: ${email} + validations: + email: {} + length: + max: 255 + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: firstName + displayName: ${firstName} + validations: + length: + max: 255 + personNameProhibitedCharacters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: lastName + displayName: ${lastName} + validations: + length: + max: 255 + person_name_prohibited_characters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false attributes_additional: - name: additionalAttribute displayName: additionalAttribute diff --git a/tests/integration/targets/keyring/tasks/main.yml b/tests/integration/targets/keyring/tasks/main.yml index 3833018e80..2aa05fc0a6 100644 --- a/tests/integration/targets/keyring/tasks/main.yml +++ b/tests/integration/targets/keyring/tasks/main.yml @@ -5,7 +5,7 @@ - name: Ensure required packages for headless keyring access are installed (RPM) ansible.builtin.package: - name: gnome-keyring + name: gnome-keyring become: true when: "'localhost' not in inventory_hostname" diff --git a/tests/integration/targets/ldap_inc/tasks/tests/basic.yml b/tests/integration/targets/ldap_inc/tasks/tests/basic.yml index 4165ece743..5eb1a535ff 100644 --- a/tests/integration/targets/ldap_inc/tasks/tests/basic.yml +++ b/tests/integration/targets/ldap_inc/tasks/tests/basic.yml @@ -21,10 +21,10 @@ - name: assert that test increment by default assert: that: - - output is not failed - - output.incremented - - output.value == "1001" - - output.rfc4525 + - output is not failed + - output.incremented + - output.value == "1001" + - output.rfc4525 - name: Test defined increment ldap_inc: @@ -39,10 +39,10 @@ - name: assert that test increment by default assert: that: - - output is not failed - - output.incremented - - output.value == "1003" - - output.rfc4525 + - output is not failed + - output.incremented + - output.value == "1003" + - output.rfc4525 - name: Test defined increment by 0 ldap_inc: @@ -57,9 +57,9 @@ - name: assert that test defined increment by 0 assert: that: - - output is not failed - - output.incremented == false - - output.value == "1003" + - output is not failed + - output.incremented == false + - output.value == "1003" - name: Test defined negative increment ldap_inc: @@ -74,10 +74,10 @@ - name: assert that test defined negative increment assert: that: - - output is not failed - - output.incremented - - output.value == "1002" - - output.rfc4525 + - output is not failed + - output.incremented + - output.value == "1002" + - output.rfc4525 - name: Test forcing classic method instead of automatic detection ldap_inc: @@ -93,7 +93,7 @@ - name: assert that test defined negative increment assert: that: - - output is not failed - - output.incremented - - output.value == "1001" - - output.rfc4525 == False + - output is not failed + - output.incremented + - output.value == "1001" + - output.rfc4525 == False diff --git a/tests/integration/targets/ldap_search/tasks/tests/auth.yml b/tests/integration/targets/ldap_search/tasks/tests/auth.yml index a8c7a13ee9..912178c364 100644 --- a/tests/integration/targets/ldap_search/tasks/tests/auth.yml +++ b/tests/integration/targets/ldap_search/tasks/tests/auth.yml @@ -22,9 +22,9 @@ - name: assert that test LDAP user can read its password assert: that: - - output is not failed - - output.results | length == 1 - - output.results.0.userPassword is defined + - output is not failed + - output.results | length == 1 + - output.results.0.userPassword is defined - name: Test simple search for cert authenticated user ldap_search: @@ -42,6 +42,6 @@ - name: assert that test LDAP user can read its password assert: that: - - output is not failed - - output.results | length == 1 - - output.results.0.userPassword is defined + - output is not failed + - output.results | length == 1 + - output.results.0.userPassword is defined diff --git a/tests/integration/targets/ldap_search/tasks/tests/basic.yml b/tests/integration/targets/ldap_search/tasks/tests/basic.yml index 11e5d6562c..5b98c61648 100644 --- a/tests/integration/targets/ldap_search/tasks/tests/basic.yml +++ b/tests/integration/targets/ldap_search/tasks/tests/basic.yml @@ -20,9 +20,9 @@ - name: assert that test LDAP user can be found assert: that: - - output is not failed - - output.results | length == 1 - - output.results.0.displayName == "LDAP Test" + - output is not failed + - output.results | length == 1 + - output.results.0.displayName == "LDAP Test" - name: Test simple search for a user with no results ldap_search: @@ -35,5 +35,5 @@ - name: assert that the output is empty assert: that: - - output is not failed - - output.results | length == 0 + - output is not failed + - output.results | length == 0 diff --git a/tests/integration/targets/ldap_search/tasks/tests/pages.yml b/tests/integration/targets/ldap_search/tasks/tests/pages.yml index 32575854ba..e0742c5598 100644 --- a/tests/integration/targets/ldap_search/tasks/tests/pages.yml +++ b/tests/integration/targets/ldap_search/tasks/tests/pages.yml @@ -20,5 +20,5 @@ - name: assert that the right number of results are returned assert: that: - - output is not failed - - output.results | length == 2 + - output is not failed + - output.results | length == 2 diff --git a/tests/integration/targets/ldap_search/tasks/tests/schema.yml b/tests/integration/targets/ldap_search/tasks/tests/schema.yml index 892eac3cb3..ca26305b82 100644 --- a/tests/integration/targets/ldap_search/tasks/tests/schema.yml +++ b/tests/integration/targets/ldap_search/tasks/tests/schema.yml @@ -20,6 +20,7 @@ - name: Assert that the schema output is correct assert: that: - - output is not failed - - output.results | length >= 1 - - "{{ 'displayName' in output.results.0.attrs }}" + - output is not failed + - output.results | length >= 1 + - >- + 'displayName' in output.results.0.attrs diff --git a/tests/integration/targets/listen_ports_facts/tasks/main.yml b/tests/integration/targets/listen_ports_facts/tasks/main.yml index 0e583e7a13..5da5b03784 100644 --- a/tests/integration/targets/listen_ports_facts/tasks/main.yml +++ b/tests/integration/targets/listen_ports_facts/tasks/main.yml @@ -110,3 +110,32 @@ loop: "{{ [tcp_listen, udp_listen]|flatten }}" when: item.name == 'nc' ignore_errors: true + + +- when: ansible_os_family == "Debian" + block: + - name: Remove netstat and ss dependencies to simulate missing executables + ansible.builtin.package: + name: + - net-tools + - iproute2 + state: absent + ignore_errors: true + + - name: Trigger listen_ports_facts with missing tools + community.general.listen_ports_facts: + register: listen_ports_failure_result + ignore_errors: true + + - name: Assert graceful failure when dependencies are missing + ansible.builtin.assert: + that: + - listen_ports_failure_result is failed + - "'Unable to find any of the supported commands' in listen_ports_failure_result.msg" + + - name: Reinstall netstat and ss dependencies after test + ansible.builtin.package: + name: + - net-tools + - iproute2 + state: present diff --git a/tests/integration/targets/locale_gen/aliases b/tests/integration/targets/locale_gen/aliases index a5d3e27f9e..735d7bd0d3 100644 --- a/tests/integration/targets/locale_gen/aliases +++ b/tests/integration/targets/locale_gen/aliases @@ -6,5 +6,7 @@ azp/posix/3 destructive needs/root skip/aix +skip/fedora skip/freebsd skip/macos +skip/rhel diff --git a/tests/integration/targets/locale_gen/tasks/main.yml b/tests/integration/targets/locale_gen/tasks/main.yml index 358247c4ee..d24ef5d29d 100644 --- a/tests/integration/targets/locale_gen/tasks/main.yml +++ b/tests/integration/targets/locale_gen/tasks/main.yml @@ -10,7 +10,7 @@ - name: Bail out if not supported ansible.builtin.meta: end_play - when: ansible_distribution not in ('Ubuntu', 'Debian') + when: ansible_distribution not in ('Ubuntu', 'Debian', 'Archlinux') - name: Run tests auto-detecting mechanism ansible.builtin.include_tasks: basic.yml diff --git a/tests/integration/targets/locale_gen/vars/main.yml b/tests/integration/targets/locale_gen/vars/main.yml index 23358e6374..6d4f7de60d 100644 --- a/tests/integration/targets/locale_gen/vars/main.yml +++ b/tests/integration/targets/locale_gen/vars/main.yml @@ -24,3 +24,6 @@ locale_list_basic: - tr_CY.UTF-8 - tr_CY.utf8 skip_removal: false + - localegen: de_CH.UTF-8 + locales: [de_CH.utf8] + skip_removal: false diff --git a/tests/integration/targets/lookup_cartesian/tasks/main.yml b/tests/integration/targets/lookup_cartesian/tasks/main.yml index 5575f22ba6..3351537ffc 100644 --- a/tests/integration/targets/lookup_cartesian/tasks/main.yml +++ b/tests/integration/targets/lookup_cartesian/tasks/main.yml @@ -12,21 +12,21 @@ debug: var=item register: product with_community.general.cartesian: - - - A - - B - - C - - - '1' - - '2' - - '3' + - - A + - B + - C + - - '1' + - '2' + - '3' - name: Verify cartesian lookup assert: that: - - product.results[0]['item'] == ["A", "1"] - - product.results[1]['item'] == ["A", "2"] - - product.results[2]['item'] == ["A", "3"] - - product.results[3]['item'] == ["B", "1"] - - product.results[4]['item'] == ["B", "2"] - - product.results[5]['item'] == ["B", "3"] - - product.results[6]['item'] == ["C", "1"] - - product.results[7]['item'] == ["C", "2"] - - product.results[8]['item'] == ["C", "3"] + - product.results[0]['item'] == ["A", "1"] + - product.results[1]['item'] == ["A", "2"] + - product.results[2]['item'] == ["A", "3"] + - product.results[3]['item'] == ["B", "1"] + - product.results[4]['item'] == ["B", "2"] + - product.results[5]['item'] == ["B", "3"] + - product.results[6]['item'] == ["C", "1"] + - product.results[7]['item'] == ["C", "2"] + - product.results[8]['item'] == ["C", "3"] diff --git a/tests/integration/targets/lookup_dependent/tasks/main.yml b/tests/integration/targets/lookup_dependent/tasks/main.yml index b2f2097294..a1328cc19e 100644 --- a/tests/integration/targets/lookup_dependent/tasks/main.yml +++ b/tests/integration/targets/lookup_dependent/tasks/main.yml @@ -136,7 +136,8 @@ assert: that: - eval_error is failed - - eval_error.msg.startswith("Caught \"'foo' is undefined") + - >- + "Caught \"'foo' is undefined" in eval_error.msg - name: "Test 5: same variable name reused" debug: @@ -151,7 +152,8 @@ assert: that: - eval_error is failed - - eval_error.msg.startswith("Caught \"'x' is undefined") + - >- + "Caught \"'x' is undefined" in eval_error.msg - name: "Test 6: multi-value dict" debug: @@ -166,7 +168,8 @@ assert: that: - eval_error is failed - - eval_error.msg == 'Parameter 0 must be a one-element dictionary, got 2 elements' + - >- + 'Parameter 0 must be a one-element dictionary, got 2 elements' in eval_error.msg - name: "Test 7: empty dict" debug: @@ -180,4 +183,5 @@ assert: that: - eval_error is failed - - eval_error.msg == 'Parameter 0 must be a one-element dictionary, got 0 elements' + - >- + 'Parameter 0 must be a one-element dictionary, got 0 elements' in eval_error.msg diff --git a/tests/integration/targets/lookup_etcd3/defaults/main.yml b/tests/integration/targets/lookup_etcd3/defaults/main.yml index de726382ba..68751f3d64 100644 --- a/tests/integration/targets/lookup_etcd3/defaults/main.yml +++ b/tests/integration/targets/lookup_etcd3/defaults/main.yml @@ -3,5 +3,5 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - etcd3_prefix: '/keyprefix/' - etcd3_singlekey: '/singlekeypath' +etcd3_prefix: '/keyprefix/' +etcd3_singlekey: '/singlekeypath' diff --git a/tests/integration/targets/lookup_etcd3/tasks/main.yml b/tests/integration/targets/lookup_etcd3/tasks/main.yml index 47f1916c02..2e150452b2 100644 --- a/tests/integration/targets/lookup_etcd3/tasks/main.yml +++ b/tests/integration/targets/lookup_etcd3/tasks/main.yml @@ -15,9 +15,9 @@ value: "bar{{ item }}" state: present loop: - - 1 - - 2 - - 3 + - 1 + - 2 + - 3 - name: put a single key/values in etcd etcd3: diff --git a/tests/integration/targets/lookup_etcd3/tasks/tests.yml b/tests/integration/targets/lookup_etcd3/tasks/tests.yml index 929c6f142a..132d2ce9ac 100644 --- a/tests/integration/targets/lookup_etcd3/tasks/tests.yml +++ b/tests/integration/targets/lookup_etcd3/tasks/tests.yml @@ -5,23 +5,23 @@ # SPDX-License-Identifier: GPL-3.0-or-later - block: - - name: 'Fetch secrets using "etcd3" lookup' - set_fact: - etcdoutkey1: "{{ lookup('community.general.etcd3', etcd3_prefix, prefix=True) }}" - etcdoutkey2: "{{ lookup('community.general.etcd3', etcd3_singlekey) }}" - key_inexistent: "{{ lookup('community.general.etcd3', 'inexistent_key') }}" + - name: 'Fetch secrets using "etcd3" lookup' + set_fact: + etcdoutkey1: "{{ lookup('community.general.etcd3', etcd3_prefix, prefix=True) }}" + etcdoutkey2: "{{ lookup('community.general.etcd3', etcd3_singlekey) }}" + key_inexistent: "{{ lookup('community.general.etcd3', 'inexistent_key') }}" - - name: 'Check etcd values' - assert: - msg: 'unexpected etcd3 values' - that: - - etcdoutkey1 is sequence - - etcdoutkey1 | length() == 3 - - etcdoutkey1[0].value == 'bar1' - - etcdoutkey1[1].value == 'bar2' - - etcdoutkey1[2].value == 'bar3' - - etcdoutkey2 is sequence - - etcdoutkey2 | length() == 2 - - etcdoutkey2.value == 'foobar' - - key_inexistent is sequence - - key_inexistent | length() == 0 + - name: 'Check etcd values' + assert: + msg: 'unexpected etcd3 values' + that: + - etcdoutkey1 is sequence + - etcdoutkey1 | length() == 3 + - etcdoutkey1[0].value == 'bar1' + - etcdoutkey1[1].value == 'bar2' + - etcdoutkey1[2].value == 'bar3' + - etcdoutkey2 is sequence + - etcdoutkey2 | length() == 2 + - etcdoutkey2.value == 'foobar' + - key_inexistent is sequence + - key_inexistent | length() == 0 diff --git a/tests/integration/targets/lookup_flattened/tasks/main.yml b/tests/integration/targets/lookup_flattened/tasks/main.yml index 37af1327bf..b4d57eb176 100644 --- a/tests/integration/targets/lookup_flattened/tasks/main.yml +++ b/tests/integration/targets/lookup_flattened/tasks/main.yml @@ -11,14 +11,14 @@ - name: test with_flattened set_fact: '{{ item }}=flattened' with_community.general.flattened: - - - a__ - - - b__ - - - c__ - - d__ + - - a__ + - - b__ + - - c__ + - d__ - name: verify with_flattened results assert: that: - - a__ == 'flattened' - - b__ == 'flattened' - - c__ == 'flattened' - - d__ == 'flattened' + - a__ == 'flattened' + - b__ == 'flattened' + - c__ == 'flattened' + - d__ == 'flattened' diff --git a/tests/integration/targets/lookup_lmdb_kv/aliases b/tests/integration/targets/lookup_lmdb_kv/aliases index 9c7febe241..5ecc8bfd38 100644 --- a/tests/integration/targets/lookup_lmdb_kv/aliases +++ b/tests/integration/targets/lookup_lmdb_kv/aliases @@ -5,3 +5,4 @@ azp/posix/2 destructive skip/aix +disabled # TODO: currently broken diff --git a/tests/integration/targets/lookup_lmdb_kv/test.yml b/tests/integration/targets/lookup_lmdb_kv/test.yml index 8a88bca456..4d2ea0d973 100644 --- a/tests/integration/targets/lookup_lmdb_kv/test.yml +++ b/tests/integration/targets/lookup_lmdb_kv/test.yml @@ -5,27 +5,27 @@ - hosts: localhost tasks: - - debug: - msg: '{{ query("community.general.lmdb_kv", "nl", "be", "lu", db="jp.mdb") }}' - - debug: - var: item.1 - loop: '{{ query("community.general.lmdb_kv", db="jp.mdb") }}' - - assert: - that: - - query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') == ['Netherlands', 'Belgium', 'Luxembourg'] - - query('community.general.lmdb_kv', db='jp.mdb')|length == 5 - - assert: - that: - - item.0 == 'nl' - - item.1 == 'Netherlands' - vars: - lmdb_kv_db: jp.mdb - with_community.general.lmdb_kv: - - n* - - assert: - that: - - item == 'Belgium' - vars: - lmdb_kv_db: jp.mdb - with_community.general.lmdb_kv: - - be + - debug: + msg: '{{ query("community.general.lmdb_kv", "nl", "be", "lu", db="jp.mdb") }}' + - debug: + var: item.1 + loop: '{{ query("community.general.lmdb_kv", db="jp.mdb") }}' + - assert: + that: + - query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') == ['Netherlands', 'Belgium', 'Luxembourg'] + - query('community.general.lmdb_kv', db='jp.mdb')|length == 5 + - assert: + that: + - item.0 == 'nl' + - item.1 == 'Netherlands' + vars: + lmdb_kv_db: jp.mdb + with_community.general.lmdb_kv: + - n* + - assert: + that: + - item == 'Belgium' + vars: + lmdb_kv_db: jp.mdb + with_community.general.lmdb_kv: + - be diff --git a/tests/integration/targets/lookup_passwordstore/tasks/package.yml b/tests/integration/targets/lookup_passwordstore/tasks/package.yml index e5ccd5677d..43f44401e8 100644 --- a/tests/integration/targets/lookup_passwordstore/tasks/package.yml +++ b/tests/integration/targets/lookup_passwordstore/tasks/package.yml @@ -23,17 +23,17 @@ - block: # OpenSUSE Leap>=15.0 don't include password-store in main repo - - name: SUSE | Add security:privacy repo - template: - src: security-privacy.repo.j2 - dest: /etc/zypp/repos.d/security:privacy.repo + - name: SUSE | Add security:privacy repo + template: + src: security-privacy.repo.j2 + dest: /etc/zypp/repos.d/security:privacy.repo - - name: SUSE | Install package - package: - name: password-store - state: present - update_cache: true - disable_gpg_check: true + - name: SUSE | Install package + package: + name: password-store + state: present + update_cache: true + disable_gpg_check: true when: ansible_facts.pkg_mgr in ['zypper', 'community.general.zypper'] # See https://github.com/gopasspw/gopass/issues/1849#issuecomment-802789285 @@ -41,20 +41,20 @@ when: ansible_facts.os_family == 'Debian' become: true block: - - name: Fetch gopass repo keyring - ansible.builtin.get_url: - url: https://packages.gopass.pw/repos/gopass/gopass-archive-keyring.gpg - dest: /usr/share/keyrings/gopass-archive-keyring.gpg - - name: Add gopass repo - ansible.builtin.apt_repository: - repo: "deb [arch=amd64,arm64,armhf \ - signed-by=/usr/share/keyrings/gopass-archive-keyring.gpg] \ - https://packages.gopass.pw/repos/gopass stable main" - state: present - - name: Update apt-cache and install gopass package - ansible.builtin.apt: - name: gopass - update_cache: true + - name: Fetch gopass repo keyring + ansible.builtin.get_url: + url: https://packages.gopass.pw/repos/gopass/gopass-archive-keyring.gpg + dest: /usr/share/keyrings/gopass-archive-keyring.gpg + - name: Add gopass repo + ansible.builtin.apt_repository: + repo: "deb [arch=amd64,arm64,armhf \ + signed-by=/usr/share/keyrings/gopass-archive-keyring.gpg] \ + https://packages.gopass.pw/repos/gopass stable main" + state: present + - name: Update apt-cache and install gopass package + ansible.builtin.apt: + name: gopass + update_cache: true - name: Install on macOS when: ansible_facts.distribution == 'MacOSX' diff --git a/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml b/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml index a94529e460..e3a8b4e4b7 100644 --- a/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml +++ b/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml @@ -3,128 +3,128 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Create a password ({{ backend }}) - set_fact: - newpass: "{{ lookup('community.general.passwordstore', 'test-pass', length=8, create=true, backend=backend) }}" +- name: Create a password ({{ backend }}) + set_fact: + newpass: "{{ lookup('community.general.passwordstore', 'test-pass', length=8, create=true, backend=backend) }}" - - name: Fetch password from an existing file ({{ backend }}) - set_fact: - readpass: "{{ lookup('community.general.passwordstore', 'test-pass', backend=backend) }}" +- name: Fetch password from an existing file ({{ backend }}) + set_fact: + readpass: "{{ lookup('community.general.passwordstore', 'test-pass', backend=backend) }}" - - name: Verify password ({{ backend }}) - assert: - that: - - readpass == newpass +- name: Verify password ({{ backend }}) + assert: + that: + - readpass == newpass - - name: Create a password with equal sign ({{ backend }}) - set_fact: - newpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal userpass=SimpleSample= create=true', backend=backend) }}" +- name: Create a password with equal sign ({{ backend }}) + set_fact: + newpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal userpass=SimpleSample= create=true', backend=backend) }}" - - name: Fetch a password with equal sign ({{ backend }}) - set_fact: - readpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal', backend=backend) }}" +- name: Fetch a password with equal sign ({{ backend }}) + set_fact: + readpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal', backend=backend) }}" - - name: Verify password ({{ backend }}) - assert: - that: - - readpass == newpass +- name: Verify password ({{ backend }}) + assert: + that: + - readpass == newpass - - name: Create a password using missing=create ({{ backend }}) - set_fact: - newpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', missing='create', length=8, backend=backend) }}" +- name: Create a password using missing=create ({{ backend }}) + set_fact: + newpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', missing='create', length=8, backend=backend) }}" - - name: Fetch password from an existing file ({{ backend }}) - set_fact: - readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', backend=backend) }}" +- name: Fetch password from an existing file ({{ backend }}) + set_fact: + readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', backend=backend) }}" - - name: Verify password ({{ backend }}) - assert: - that: - - readpass == newpass +- name: Verify password ({{ backend }}) + assert: + that: + - readpass == newpass - - name: Fetch password from existing file using missing=empty ({{ backend }}) - set_fact: - readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', missing='empty', backend=backend) }}" +- name: Fetch password from existing file using missing=empty ({{ backend }}) + set_fact: + readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', missing='empty', backend=backend) }}" - - name: Verify password ({{ backend }}) - assert: - that: - - readpass == newpass +- name: Verify password ({{ backend }}) + assert: + that: + - readpass == newpass - - name: Fetch password from non-existing file using missing=empty ({{ backend }}) - set_fact: - readpass: "{{ query('community.general.passwordstore', 'test-missing-pass', missing='empty', backend=backend) }}" +- name: Fetch password from non-existing file using missing=empty ({{ backend }}) + set_fact: + readpass: "{{ query('community.general.passwordstore', 'test-missing-pass', missing='empty', backend=backend) }}" - - name: Verify password ({{ backend }}) - assert: - that: - - readpass == [ none ] +- name: Verify password ({{ backend }}) + assert: + that: + - readpass == [ none ] - - name: Create the YAML password ({{ backend }}) - command: "{{ backend }} insert -m -f test-yaml-pass" - args: - stdin: | - testpassword - key: | - multi - line +- name: Create the YAML password ({{ backend }}) + command: "{{ backend }} insert -m -f test-yaml-pass" + args: + stdin: | + testpassword + key: | + multi + line - - name: Fetch a password with YAML subkey ({{ backend }}) - set_fact: - readyamlpass: "{{ lookup('community.general.passwordstore', 'test-yaml-pass', subkey='key', backend=backend) }}" +- name: Fetch a password with YAML subkey ({{ backend }}) + set_fact: + readyamlpass: "{{ lookup('community.general.passwordstore', 'test-yaml-pass', subkey='key', backend=backend) }}" - - name: Read a yaml subkey ({{ backend }}) - assert: - that: - - readyamlpass == 'multi\nline\n' +- name: Read a yaml subkey ({{ backend }}) + assert: + that: + - readyamlpass == 'multi\nline\n' - - name: Create a non-YAML multiline file ({{ backend }}) - command: "{{ backend }} insert -m -f test-multiline-pass" - args: - stdin: | - testpassword - random additional line +- name: Create a non-YAML multiline file ({{ backend }}) + command: "{{ backend }} insert -m -f test-multiline-pass" + args: + stdin: | + testpassword + random additional line - - name: Fetch password from multiline file ({{ backend }}) - set_fact: - readyamlpass: "{{ lookup('community.general.passwordstore', 'test-multiline-pass', backend=backend) }}" +- name: Fetch password from multiline file ({{ backend }}) + set_fact: + readyamlpass: "{{ lookup('community.general.passwordstore', 'test-multiline-pass', backend=backend) }}" - - name: Multiline pass only returns first line ({{ backend }}) - assert: - that: - - readyamlpass == 'testpassword' +- name: Multiline pass only returns first line ({{ backend }}) + assert: + that: + - readyamlpass == 'testpassword' - - name: Fetch all from multiline file ({{ backend }}) - set_fact: - readyamlpass: "{{ lookup('community.general.passwordstore', 'test-multiline-pass', returnall='yes', backend=backend) }}" +- name: Fetch all from multiline file ({{ backend }}) + set_fact: + readyamlpass: "{{ lookup('community.general.passwordstore', 'test-multiline-pass', returnall='yes', backend=backend) }}" - - name: Multiline pass returnall returns everything in the file ({{ backend }}) - assert: - that: - - readyamlpass == 'testpassword\nrandom additional line\n' +- name: Multiline pass returnall returns everything in the file ({{ backend }}) + assert: + that: + - readyamlpass == 'testpassword\nrandom additional line\n' - - name: Create a password in a folder ({{ backend }}) - set_fact: - newpass: "{{ lookup('community.general.passwordstore', 'folder/test-pass', length=8, create=true, backend=backend) }}" +- name: Create a password in a folder ({{ backend }}) + set_fact: + newpass: "{{ lookup('community.general.passwordstore', 'folder/test-pass', length=8, create=true, backend=backend) }}" - - name: Fetch password from folder ({{ backend }}) - set_fact: - readpass: "{{ lookup('community.general.passwordstore', 'folder/test-pass', backend=backend) }}" +- name: Fetch password from folder ({{ backend }}) + set_fact: + readpass: "{{ lookup('community.general.passwordstore', 'folder/test-pass', backend=backend) }}" - - name: Verify password from folder ({{ backend }}) - assert: - that: - - readpass == newpass +- name: Verify password from folder ({{ backend }}) + assert: + that: + - readpass == newpass - - name: Try to read folder as passname ({{ backend }}) - set_fact: - newpass: "{{ lookup('community.general.passwordstore', 'folder', backend=backend) }}" - ignore_errors: true - register: eval_error +- name: Try to read folder as passname ({{ backend }}) + set_fact: + newpass: "{{ lookup('community.general.passwordstore', 'folder', backend=backend) }}" + ignore_errors: true + register: eval_error - - name: Make sure reading folder as passname failed ({{ backend }}) - assert: - that: - - eval_error is failed - - '"passname folder not found" in eval_error.msg' - when: backend != "gopass" # Remove this line once gopass backend can handle this +- name: Make sure reading folder as passname failed ({{ backend }}) + assert: + that: + - eval_error is failed + - '"passname folder not found" in eval_error.msg' + when: backend != "gopass" # Remove this line once gopass backend can handle this diff --git a/tests/integration/targets/lookup_passwordstore/tasks/tests.yml b/tests/integration/targets/lookup_passwordstore/tasks/tests.yml index 65a578c962..3928087072 100644 --- a/tests/integration/targets/lookup_passwordstore/tasks/tests.yml +++ b/tests/integration/targets/lookup_passwordstore/tasks/tests.yml @@ -30,7 +30,7 @@ - name: Store path of pass executable set_fact: - passpath: "{{ result.stdout }}" + passpath: "{{ result.stdout }}" - name: Move original pass into place if there was a leftover command: @@ -49,7 +49,7 @@ - name: Store path of gopass executable set_fact: - gopasspath: "{{ result.stdout }}" + gopasspath: "{{ result.stdout }}" - name: Move original gopass into place if there was a leftover command: @@ -64,9 +64,9 @@ command: "{{ item }} --version" register: versions loop: - - "{{ gpg2_bin }}" - - pass - - gopass + - "{{ gpg2_bin }}" + - pass + - gopass - name: Output versions of tools debug: @@ -131,52 +131,52 @@ - name: Test pass compatibility shim detection block: - - name: Move original pass out of the way - command: - argv: - - mv - - "{{ passpath }}" - - "{{ passpath }}.testorig" - args: - creates: "{{ passpath }}.testorig" + - name: Move original pass out of the way + command: + argv: + - mv + - "{{ passpath }}" + - "{{ passpath }}.testorig" + args: + creates: "{{ passpath }}.testorig" - - name: Create dummy pass script - ansible.builtin.copy: - content: | - #!/bin/sh - echo "shim_ok" - dest: "{{ passpath }}" - mode: '0755' + - name: Create dummy pass script + ansible.builtin.copy: + content: | + #!/bin/sh + echo "shim_ok" + dest: "{{ passpath }}" + mode: '0755' - - name: Try reading from non-existent passwordstore location with different pass utility - set_fact: - newpass: "{{ lookup('community.general.passwordstore', 'test-pass') }}" - environment: - PATH: "/tmp" + - name: Try reading from non-existent passwordstore location with different pass utility + set_fact: + newpass: "{{ lookup('community.general.passwordstore', 'test-pass') }}" + environment: + PATH: "/tmp" - - name: Verify password received from shim - assert: - that: - - newpass == "shim_ok" + - name: Verify password received from shim + assert: + that: + - newpass == "shim_ok" - - name: Try to read folder as passname with a different pass utility - set_fact: - newpass: "{{ lookup('community.general.passwordstore', 'folder') }}" + - name: Try to read folder as passname with a different pass utility + set_fact: + newpass: "{{ lookup('community.general.passwordstore', 'folder') }}" - - name: Verify password received from shim - assert: - that: - - newpass == "shim_ok" + - name: Verify password received from shim + assert: + that: + - newpass == "shim_ok" always: - - name: Move original pass back into place - command: - argv: - - mv - - "{{ passpath }}.testorig" - - "{{ passpath }}" - args: - removes: "{{ passpath }}.testorig" + - name: Move original pass back into place + command: + argv: + - mv + - "{{ passpath }}.testorig" + - "{{ passpath }}" + args: + removes: "{{ passpath }}.testorig" # This are in addition to the real gopass tests above # and verify plugin logic @@ -184,56 +184,56 @@ vars: passwordstore_backend: "gopass" block: - - name: Check if gopass executable exists - stat: - path: "{{ gopasspath }}" - register: gopass_check + - name: Check if gopass executable exists + stat: + path: "{{ gopasspath }}" + register: gopass_check - - name: Move original gopass out of the way - command: - argv: - - mv - - "{{ gopasspath }}" - - "{{ gopasspath }}.testorig" - args: - creates: "{{ gopasspath }}.testorig" - when: gopass_check.stat.exists == true + - name: Move original gopass out of the way + command: + argv: + - mv + - "{{ gopasspath }}" + - "{{ gopasspath }}.testorig" + args: + creates: "{{ gopasspath }}.testorig" + when: gopass_check.stat.exists == true - - name: Create mocked gopass script - ansible.builtin.copy: - content: | - #!/bin/sh - if [ "$GOPASS_NO_REMINDER" != "YES" ]; then - exit 1 - fi - if [ "$1" = "--version" ]; then - exit 2 - fi - echo "gopass_ok" - dest: "{{ gopasspath }}" - mode: '0755' + - name: Create mocked gopass script + ansible.builtin.copy: + content: | + #!/bin/sh + if [ "$GOPASS_NO_REMINDER" != "YES" ]; then + exit 1 + fi + if [ "$1" = "--version" ]; then + exit 2 + fi + echo "gopass_ok" + dest: "{{ gopasspath }}" + mode: '0755' - - name: Try to read folder as passname using gopass mock - set_fact: - newpass: "{{ lookup('community.general.passwordstore', 'folder') }}" + - name: Try to read folder as passname using gopass mock + set_fact: + newpass: "{{ lookup('community.general.passwordstore', 'folder') }}" - - name: Verify password received from gopass mock - assert: - that: - - newpass == "gopass_ok" + - name: Verify password received from gopass mock + assert: + that: + - newpass == "gopass_ok" always: - - name: Remove mocked gopass - ansible.builtin.file: - path: "{{ gopasspath }}" - state: absent + - name: Remove mocked gopass + ansible.builtin.file: + path: "{{ gopasspath }}" + state: absent - - name: Move original gopass back into place - command: - argv: - - mv - - "{{ gopasspath }}.testorig" - - "{{ gopasspath }}" - args: - removes: "{{ gopasspath }}.testorig" - when: gopass_check.stat.exists == true + - name: Move original gopass back into place + command: + argv: + - mv + - "{{ gopasspath }}.testorig" + - "{{ gopasspath }}" + args: + removes: "{{ gopasspath }}.testorig" + when: gopass_check.stat.exists == true diff --git a/tests/integration/targets/lookup_random_pet/test.yml b/tests/integration/targets/lookup_random_pet/test.yml index c61461867a..a40ab2262c 100644 --- a/tests/integration/targets/lookup_random_pet/test.yml +++ b/tests/integration/targets/lookup_random_pet/test.yml @@ -6,25 +6,25 @@ - hosts: localhost gather_facts: false tasks: - - name: Call plugin - set_fact: - result1: "{{ query('community.general.random_pet', words=3) }}" - result2: "{{ query('community.general.random_pet', length=3) }}" - result3: "{{ query('community.general.random_pet', prefix='kubernetes') }}" - result4: "{{ query('community.general.random_pet', separator='_') }}" - result5: "{{ query('community.general.random_pet', words=2, length=6, prefix='kubernetes', separator='_') }}" + - name: Call plugin + set_fact: + result1: "{{ query('community.general.random_pet', words=3) }}" + result2: "{{ query('community.general.random_pet', length=3) }}" + result3: "{{ query('community.general.random_pet', prefix='kubernetes') }}" + result4: "{{ query('community.general.random_pet', separator='_') }}" + result5: "{{ query('community.general.random_pet', words=2, length=6, prefix='kubernetes', separator='_') }}" - - name: Check results - assert: - that: - - result1 | length == 1 - - result1[0].split('-') | length == 3 - - result2 | length == 1 - - result2[0].split('-')[0] | length <= 3 - - result3 | length == 1 - - result3[0].split('-')[0] == 'kubernetes' - - result4 | length == 1 - - result4[0].split('_') | length == 2 - - result5 | length == 1 - - result5[0].split('_') | length == 3 - - result5[0].split('_')[0] == 'kubernetes' + - name: Check results + assert: + that: + - result1 | length == 1 + - result1[0].split('-') | length == 3 + - result2 | length == 1 + - result2[0].split('-')[0] | length <= 3 + - result3 | length == 1 + - result3[0].split('-')[0] == 'kubernetes' + - result4 | length == 1 + - result4[0].split('_') | length == 2 + - result5 | length == 1 + - result5[0].split('_') | length == 3 + - result5[0].split('_')[0] == 'kubernetes' diff --git a/tests/integration/targets/lookup_random_string/test.yml b/tests/integration/targets/lookup_random_string/test.yml index b1f6234102..b74116d04a 100644 --- a/tests/integration/targets/lookup_random_string/test.yml +++ b/tests/integration/targets/lookup_random_string/test.yml @@ -6,48 +6,48 @@ - hosts: localhost gather_facts: false tasks: - - name: Call plugin - set_fact: - result1: "{{ query('community.general.random_string') }}" - result2: "{{ query('community.general.random_string', length=0) }}" - result3: "{{ query('community.general.random_string', length=10) }}" - result4: "{{ query('community.general.random_string', length=-1) }}" - result5: "{{ query('community.general.random_string', override_special='_', min_special=1) }}" - result6: "{{ query('community.general.random_string', upper=false, special=false) }}" # lower case only - result7: "{{ query('community.general.random_string', lower=false, special=false) }}" # upper case only - result8: "{{ query('community.general.random_string', lower=false, upper=false, special=false) }}" # number only - result9: "{{ query('community.general.random_string', lower=false, upper=false, special=false, min_numeric=1, length=1) }}" # single digit only - result10: "{{ query('community.general.random_string', numbers=false, upper=false, special=false, min_lower=1, length=1) }}" # single lowercase character only - result11: "{{ query('community.general.random_string', base64=true, length=8) }}" - result12: "{{ query('community.general.random_string', upper=false, numbers=false, special=false) }}" # all lower case - result13: "{{ query('community.general.random_string', override_all='0', length=2) }}" + - name: Call plugin + set_fact: + result1: "{{ query('community.general.random_string') }}" + result2: "{{ query('community.general.random_string', length=0) }}" + result3: "{{ query('community.general.random_string', length=10) }}" + result4: "{{ query('community.general.random_string', length=-1) }}" + result5: "{{ query('community.general.random_string', override_special='_', min_special=1) }}" + result6: "{{ query('community.general.random_string', upper=false, special=false) }}" # lower case only + result7: "{{ query('community.general.random_string', lower=false, special=false) }}" # upper case only + result8: "{{ query('community.general.random_string', lower=false, upper=false, special=false) }}" # number only + result9: "{{ query('community.general.random_string', lower=false, upper=false, special=false, min_numeric=1, length=1) }}" # single digit only + result10: "{{ query('community.general.random_string', numbers=false, upper=false, special=false, min_lower=1, length=1) }}" # single lowercase character only + result11: "{{ query('community.general.random_string', base64=true, length=8) }}" + result12: "{{ query('community.general.random_string', upper=false, numbers=false, special=false) }}" # all lower case + result13: "{{ query('community.general.random_string', override_all='0', length=2) }}" - - name: Raise error when impossible constraints are provided - set_fact: - impossible: "{{ query('community.general.random_string', upper=false, lower=false, special=false, numbers=false) }}" - ignore_errors: true - register: impossible_result + - name: Raise error when impossible constraints are provided + set_fact: + impossible: "{{ query('community.general.random_string', upper=false, lower=false, special=false, numbers=false) }}" + ignore_errors: true + register: impossible_result - - name: Check results - assert: - that: - - result1[0] | length == 8 - - result2[0] | length == 0 - - result3[0] | length == 10 - - result4[0] | length == 0 - - result5[0] | length == 8 - - "'_' in result5[0]" - - result6[0] is lower - - result7[0] is upper - - result8[0] | regex_replace('^(\d+)$', '') == '' - - result9[0] | regex_replace('^(\d+)$', '') == '' - - result9[0] | length == 1 - - result10[0] | length == 1 - - result10[0] is lower - # if input string is not multiple of 3, base64 encoded string will be padded with = - - result11[0].endswith('=') - - result12[0] is lower - - result13[0] | length == 2 - - result13[0] == '00' - - impossible_result is failed - - "'Available characters cannot' in impossible_result.msg" + - name: Check results + assert: + that: + - result1[0] | length == 8 + - result2[0] | length == 0 + - result3[0] | length == 10 + - result4[0] | length == 0 + - result5[0] | length == 8 + - "'_' in result5[0]" + - result6[0] is lower + - result7[0] is upper + - result8[0] | regex_replace('^(\d+)$', '') == '' + - result9[0] | regex_replace('^(\d+)$', '') == '' + - result9[0] | length == 1 + - result10[0] | length == 1 + - result10[0] is lower + # if input string is not multiple of 3, base64 encoded string will be padded with = + - result11[0].endswith('=') + - result12[0] is lower + - result13[0] | length == 2 + - result13[0] == '00' + - impossible_result is failed + - "'Available characters cannot' in impossible_result.msg" diff --git a/tests/integration/targets/lookup_random_words/test.yml b/tests/integration/targets/lookup_random_words/test.yml index e1b6fde13b..0c8fd8e110 100644 --- a/tests/integration/targets/lookup_random_words/test.yml +++ b/tests/integration/targets/lookup_random_words/test.yml @@ -6,28 +6,28 @@ - hosts: localhost gather_facts: false tasks: - - name: Call random_words plugin - set_fact: - result1: "{{ query('community.general.random_words') }}" - result2: "{{ query('community.general.random_words', min_length=5, max_length=5) }}" - result3: "{{ query('community.general.random_words', delimiter='!') }}" - result4: "{{ query('community.general.random_words', numwords=3, delimiter='-', case='capitalize') }}" - result5: "{{ query('community.general.random_words', min_length=5, max_length=5, numwords=3, delimiter='') }}" + - name: Call random_words plugin + set_fact: + result1: "{{ query('community.general.random_words') }}" + result2: "{{ query('community.general.random_words', min_length=5, max_length=5) }}" + result3: "{{ query('community.general.random_words', delimiter='!') }}" + result4: "{{ query('community.general.random_words', numwords=3, delimiter='-', case='capitalize') }}" + result5: "{{ query('community.general.random_words', min_length=5, max_length=5, numwords=3, delimiter='') }}" - - name: Check results - assert: - that: - - result1 | length == 1 - - result1[0] | length >= 35 - - result2 | length == 1 - - result2[0] | length == 35 - - result3 | length == 1 - - result3[0].count("!") == 5 - - result4 | length == 1 - - result4[0] | length >= 17 - - result4[0] | length <= 29 - - result4[0] | regex_findall("[A-Z]") | length == 3 - # If one of the random words is 't-shirt', there are more than 2 dashes... - - result4[0].count("-") == 2 or "t-shirt" in result4[0].lower() - - result5 | length == 1 - - result5[0] | length == 15 + - name: Check results + assert: + that: + - result1 | length == 1 + - result1[0] | length >= 35 + - result2 | length == 1 + - result2[0] | length == 35 + - result3 | length == 1 + - result3[0].count("!") == 5 + - result4 | length == 1 + - result4[0] | length >= 17 + - result4[0] | length <= 29 + - result4[0] | regex_findall("[A-Z]") | length == 3 + # If one of the random words is 't-shirt', there are more than 2 dashes... + - result4[0].count("-") == 2 or "t-shirt" in result4[0].lower() + - result5 | length == 1 + - result5[0] | length == 15 diff --git a/tests/integration/targets/lvg/aliases b/tests/integration/targets/lvg/aliases index 3b92ba75c4..eb76529397 100644 --- a/tests/integration/targets/lvg/aliases +++ b/tests/integration/targets/lvg/aliases @@ -10,3 +10,5 @@ skip/aix skip/freebsd skip/osx skip/macos +skip/alpine3.21 # TODO try to fix +skip/alpine3.22 # TODO try to fix diff --git a/tests/integration/targets/lvg/tasks/main.yml b/tests/integration/targets/lvg/tasks/main.yml index 15af2d08c4..6bc6944f5f 100644 --- a/tests/integration/targets/lvg/tasks/main.yml +++ b/tests/integration/targets/lvg/tasks/main.yml @@ -24,6 +24,8 @@ - import_tasks: test_grow_reduce.yml + - import_tasks: test_remove_extra_pvs.yml + - import_tasks: test_pvresize.yml - import_tasks: test_active_change.yml diff --git a/tests/integration/targets/lvg/tasks/test_active_create.yml b/tests/integration/targets/lvg/tasks/test_active_create.yml index 7ac1ffedd7..2ad530bc53 100644 --- a/tests/integration/targets/lvg/tasks/test_active_create.yml +++ b/tests/integration/targets/lvg/tasks/test_active_create.yml @@ -64,8 +64,8 @@ assert: that: "inactive_by_option_vg_autoact_status_result.stdout | length == 0" always: - - name: Cleanup vg_autoact_test - lvg: - state: absent - vg: vg_autoact_test - force: true + - name: Cleanup vg_autoact_test + lvg: + state: absent + vg: vg_autoact_test + force: true diff --git a/tests/integration/targets/lvg/tasks/test_pvresize.yml b/tests/integration/targets/lvg/tasks/test_pvresize.yml index 3f3b9dbddb..90bea4e5d7 100644 --- a/tests/integration/targets/lvg/tasks/test_pvresize.yml +++ b/tests/integration/targets/lvg/tasks/test_pvresize.yml @@ -14,8 +14,8 @@ - name: Assert the testvg size is 33554432B assert: - that: - - "'33554432B' == cmd_result.stdout" + that: + - "'33554432B' == cmd_result.stdout" - name: Increases size in file command: "dd if=/dev/zero bs=8MiB count=1 of={{ remote_tmp_dir }}/img1 conv=notrunc oflag=append" @@ -25,14 +25,14 @@ - name: "Reruns lvg with pvresize:no" lvg: - vg: testvg - pvs: "{{ loop_device1 }}" - pvresize: false + vg: testvg + pvs: "{{ loop_device1 }}" + pvresize: false register: cmd_result - + - assert: that: - - cmd_result is not changed + - cmd_result is not changed - name: Gets current vg size shell: vgs -v testvg -o pv_size --noheading --units b | xargs @@ -40,36 +40,36 @@ - name: Assert the testvg size is still 33554432B assert: - that: - - "'33554432B' == cmd_result.stdout" + that: + - "'33554432B' == cmd_result.stdout" - name: "Reruns lvg with pvresize:yes and check_mode:yes" lvg: - vg: testvg - pvs: "{{ loop_device1 }}" - pvresize: true + vg: testvg + pvs: "{{ loop_device1 }}" + pvresize: true check_mode: true register: cmd_result - + - name: Assert that the module returned the state was changed assert: that: - - cmd_result is changed + - cmd_result is changed - name: Gets current vg size shell: vgs -v testvg -o pv_size --noheading --units b | xargs register: cmd_result - + - name: Assert the testvg size is still 33554432B assert: - that: - - "'33554432B' == cmd_result.stdout" + that: + - "'33554432B' == cmd_result.stdout" - name: "Reruns lvg with pvresize:yes" lvg: - vg: testvg - pvs: "{{ loop_device1 }}" - pvresize: true + vg: testvg + pvs: "{{ loop_device1 }}" + pvresize: true - name: Gets current vg size shell: vgs -v testvg -o pv_size --noheading --units b | xargs @@ -77,5 +77,5 @@ - name: Assert the testvg size is now 41943040B assert: - that: - - "'41943040B' == cmd_result.stdout" + that: + - "'41943040B' == cmd_result.stdout" diff --git a/tests/integration/targets/lvg/tasks/test_remove_extra_pvs.yml b/tests/integration/targets/lvg/tasks/test_remove_extra_pvs.yml new file mode 100644 index 0000000000..830986d8da --- /dev/null +++ b/tests/integration/targets/lvg/tasks/test_remove_extra_pvs.yml @@ -0,0 +1,40 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# test_grow_reduce already checks the base case with default parameters (remove additional PVs) + +- name: "Create volume group on first disk" + lvg: + vg: testvg + pvs: "{{ loop_device1 }}" + +- name: "get lvm facts" + setup: + +- debug: var=ansible_lvm + +- name: "Assert the testvg span only on first disk" + assert: + that: + - ansible_lvm.pvs[loop_device1].vg == "testvg" + - 'loop_device2 not in ansible_lvm.pvs or + ansible_lvm.pvs[loop_device2].vg == ""' + +- name: "Extend to second disk AND keep first disk" + lvg: + vg: testvg + pvs: "{{ loop_device2 }}" + remove_extra_pvs: false + +- name: "get lvm facts" + setup: + +- debug: var=ansible_lvm + +- name: "Assert the testvg spans on both disks" + assert: + that: + - ansible_lvm.pvs[loop_device1].vg == "testvg" + - ansible_lvm.pvs[loop_device2].vg == "testvg" diff --git a/tests/integration/targets/lvm_pv/aliases b/tests/integration/targets/lvm_pv/aliases new file mode 100644 index 0000000000..64d439099c --- /dev/null +++ b/tests/integration/targets/lvm_pv/aliases @@ -0,0 +1,13 @@ +# Copyright (c) Contributors to the Ansible project +# Based on the integraton test for the lvg module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/1 +azp/posix/vm +destructive +needs/privileged +skip/aix +skip/freebsd +skip/osx +skip/macos diff --git a/tests/integration/targets/lvm_pv/meta/main.yml b/tests/integration/targets/lvm_pv/meta/main.yml new file mode 100644 index 0000000000..90c5d5cb8d --- /dev/null +++ b/tests/integration/targets/lvm_pv/meta/main.yml @@ -0,0 +1,9 @@ +--- +# Copyright (c) Ansible Project +# Based on the integraton test for the lvg module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +dependencies: + - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/lvm_pv/tasks/cleanup.yml b/tests/integration/targets/lvm_pv/tasks/cleanup.yml new file mode 100644 index 0000000000..a9c0bb095d --- /dev/null +++ b/tests/integration/targets/lvm_pv/tasks/cleanup.yml @@ -0,0 +1,12 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Detaching loop device + ansible.builtin.command: losetup -d {{ loop_device.stdout }} + +- name: Removing loop device file + ansible.builtin.file: + path: "{{ remote_tmp_dir }}/test_lvm_pv.img" + state: absent diff --git a/tests/integration/targets/lvm_pv/tasks/creation.yml b/tests/integration/targets/lvm_pv/tasks/creation.yml new file mode 100644 index 0000000000..a26a39c524 --- /dev/null +++ b/tests/integration/targets/lvm_pv/tasks/creation.yml @@ -0,0 +1,33 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Creating a 50MB file for loop device + ansible.builtin.command: dd if=/dev/zero of={{ remote_tmp_dir }}/test_lvm_pv.img bs=1M count=50 + args: + creates: "{{ remote_tmp_dir }}/test_lvm_pv.img" + +- name: Creating loop device + ansible.builtin.command: losetup -f + register: loop_device + +- name: Associating loop device with file + ansible.builtin.command: 'losetup {{ loop_device.stdout }} {{ remote_tmp_dir }}/test_lvm_pv.img' + +- name: Creating physical volume + community.general.lvm_pv: + device: "{{ loop_device.stdout }}" + register: result + +- name: Checking physical volume size + ansible.builtin.command: pvs --noheadings -o pv_size --units M {{ loop_device.stdout }} + register: pv_size_output + +- name: Asserting physical volume was created + ansible.builtin.assert: + that: + - result.changed == true + - (pv_size_output.stdout | trim | regex_replace('M', '') | float) > 45 + - (pv_size_output.stdout | trim | regex_replace('M', '') | float) < 55 + - "'created' in result.msg" diff --git a/tests/integration/targets/lvm_pv/tasks/main.yml b/tests/integration/targets/lvm_pv/tasks/main.yml new file mode 100644 index 0000000000..16c966d274 --- /dev/null +++ b/tests/integration/targets/lvm_pv/tasks/main.yml @@ -0,0 +1,27 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Contributors to the Ansible project +# Based on the integraton test for the lvg module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Install required packages (Linux) + when: ansible_system == 'Linux' + ansible.builtin.package: + name: lvm2 + state: present + +- name: Testing lvm_pv module + block: + - import_tasks: creation.yml + + - import_tasks: resizing.yml + + - import_tasks: removal.yml + + always: + - import_tasks: cleanup.yml diff --git a/tests/integration/targets/lvm_pv/tasks/removal.yml b/tests/integration/targets/lvm_pv/tasks/removal.yml new file mode 100644 index 0000000000..d59a890a55 --- /dev/null +++ b/tests/integration/targets/lvm_pv/tasks/removal.yml @@ -0,0 +1,16 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Removing physical volume + community.general.lvm_pv: + device: "{{ loop_device.stdout }}" + state: absent + register: remove_result + +- name: Asserting physical volume was removed + ansible.builtin.assert: + that: + - remove_result.changed == true + - "'removed' in remove_result.msg" diff --git a/tests/integration/targets/lvm_pv/tasks/resizing.yml b/tests/integration/targets/lvm_pv/tasks/resizing.yml new file mode 100644 index 0000000000..184fe7498c --- /dev/null +++ b/tests/integration/targets/lvm_pv/tasks/resizing.yml @@ -0,0 +1,27 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Growing the loop device file to 100MB + ansible.builtin.shell: truncate -s 100M {{ remote_tmp_dir }}/test_lvm_pv.img + +- name: Refreshing the loop device + ansible.builtin.shell: losetup -c {{ loop_device.stdout }} + +- name: Resizing the physical volume + community.general.lvm_pv: + device: "{{ loop_device.stdout }}" + resize: true + register: resize_result + +- name: Checking physical volume size + ansible.builtin.command: pvs --noheadings -o pv_size --units M {{ loop_device.stdout }} + register: pv_size_output + +- name: Asserting physical volume was resized + ansible.builtin.assert: + that: + - resize_result.changed == true + - (pv_size_output.stdout | trim | regex_replace('M', '') | float) > 95 + - "'resized' in resize_result.msg" diff --git a/tests/integration/targets/mail/tasks/main.yml b/tests/integration/targets/mail/tasks/main.yml index 83c242ad23..3831a43643 100644 --- a/tests/integration/targets/mail/tasks/main.yml +++ b/tests/integration/targets/mail/tasks/main.yml @@ -10,101 +10,101 @@ # TODO: Our current implementation does not handle SMTP authentication -- when: +- when: # TODO: https://github.com/ansible-collections/community.general/issues/4656 - ansible_python.version.major != 3 or ansible_python.version.minor < 12 block: - # NOTE: If the system does not support smtpd-tls (python 2.6 and older) we do basic tests - - name: Attempt to install smtpd-tls - pip: - name: smtpd-tls - state: present - ignore_errors: true - register: smtpd_tls + # NOTE: If the system does not support smtpd-tls (python 2.6 and older) we do basic tests + - name: Attempt to install smtpd-tls + pip: + name: smtpd-tls + state: present + ignore_errors: true + register: smtpd_tls - - name: Install test smtpserver - copy: - src: '{{ item }}' - dest: '{{ remote_tmp_dir }}/{{ item }}' - loop: - - smtpserver.py - - smtpserver.crt - - smtpserver.key + - name: Install test smtpserver + copy: + src: '{{ item }}' + dest: '{{ remote_tmp_dir }}/{{ item }}' + loop: + - smtpserver.py + - smtpserver.crt + - smtpserver.key - # FIXME: Verify the mail after it was send would be nice - # This would require either dumping the content, or registering async task output - - name: Start test smtpserver - shell: '{{ ansible_python.executable }} {{ remote_tmp_dir }}/smtpserver.py 10025:10465' - async: 45 - poll: 0 - register: smtpserver + # FIXME: Verify the mail after it was send would be nice + # This would require either dumping the content, or registering async task output + - name: Start test smtpserver + shell: '{{ ansible_python.executable }} {{ remote_tmp_dir }}/smtpserver.py 10025:10465' + async: 45 + poll: 0 + register: smtpserver - - name: Send a basic test-mail - mail: - port: 10025 - subject: Test mail 1 (smtp) - secure: never + - name: Send a basic test-mail + mail: + port: 10025 + subject: Test mail 1 (smtp) + secure: never - - name: Send a test-mail with body and specific recipient - mail: - port: 10025 - from: ansible@localhost - to: root@localhost - subject: Test mail 2 (smtp + body) - body: Test body 2 - secure: never + - name: Send a test-mail with body and specific recipient + mail: + port: 10025 + from: ansible@localhost + to: root@localhost + subject: Test mail 2 (smtp + body) + body: Test body 2 + secure: never - - name: Send a test-mail with attachment - mail: - port: 10025 - from: ansible@localhost - to: root@localhost - subject: Test mail 3 (smtp + body + attachment) - body: Test body 3 - attach: /etc/group - secure: never + - name: Send a test-mail with attachment + mail: + port: 10025 + from: ansible@localhost + to: root@localhost + subject: Test mail 3 (smtp + body + attachment) + body: Test body 3 + attach: /etc/group + secure: never - # NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used - - name: Send a test-mail using starttls - mail: - port: 10025 - from: ansible@localhost - to: root@localhost - subject: Test mail 4 (smtp + starttls + body + attachment) - body: Test body 4 - attach: /etc/group - secure: starttls - ignore_errors: true - register: starttls_support + # NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used + - name: Send a test-mail using starttls + mail: + port: 10025 + from: ansible@localhost + to: root@localhost + subject: Test mail 4 (smtp + starttls + body + attachment) + body: Test body 4 + attach: /etc/group + secure: starttls + ignore_errors: true + register: starttls_support - # NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used - - name: Send a test-mail using TLS - mail: - port: 10465 - from: ansible@localhost - to: root@localhost - subject: Test mail 5 (smtp + tls + body + attachment) - body: Test body 5 - attach: /etc/group - secure: always - ignore_errors: true - register: tls_support + # NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used + - name: Send a test-mail using TLS + mail: + port: 10465 + from: ansible@localhost + to: root@localhost + subject: Test mail 5 (smtp + tls + body + attachment) + body: Test body 5 + attach: /etc/group + secure: always + ignore_errors: true + register: tls_support - - fail: - msg: Sending mail using starttls failed. - when: smtpd_tls is succeeded and starttls_support is failed and tls_support is succeeded + - fail: + msg: Sending mail using starttls failed. + when: smtpd_tls is succeeded and starttls_support is failed and tls_support is succeeded - - fail: - msg: Send mail using TLS failed. - when: smtpd_tls is succeeded and tls_support is failed and starttls_support is succeeded + - fail: + msg: Send mail using TLS failed. + when: smtpd_tls is succeeded and tls_support is failed and starttls_support is succeeded - - name: Send a test-mail with body, specific recipient and specific ehlohost - mail: - port: 10025 - ehlohost: some.domain.tld - from: ansible@localhost - to: root@localhost - subject: Test mail 6 (smtp + body + ehlohost) - body: Test body 6 - secure: never + - name: Send a test-mail with body, specific recipient and specific ehlohost + mail: + port: 10025 + ehlohost: some.domain.tld + from: ansible@localhost + to: root@localhost + subject: Test mail 6 (smtp + body + ehlohost) + body: Test body 6 + secure: never diff --git a/tests/integration/targets/mas/tasks/main.yml b/tests/integration/targets/mas/tasks/main.yml index 839620779e..d4b51d3842 100644 --- a/tests/integration/targets/mas/tasks/main.yml +++ b/tests/integration/targets/mas/tasks/main.yml @@ -24,7 +24,7 @@ - name: Ensure the app is uninstalled assert: that: - - install_status.stat.exists == false + - install_status.stat.exists == false - name: Wait until the OS-internal cache was updated pause: @@ -41,8 +41,8 @@ - name: Ensure that the status would have changed assert: that: - - install_check is changed - - install_check.msg == "Installed 1 app(s)" + - install_check is changed + - install_check.msg == "Installed 1 app(s)" - name: Determine whether the app is installed stat: @@ -52,7 +52,7 @@ - name: Ensure the app is not yet installed assert: that: - - install_status.stat.exists == false + - install_status.stat.exists == false - name: Install Rested mas: @@ -63,8 +63,8 @@ - name: Ensure that the status changed assert: that: - - install is changed - - install.msg == "Installed 1 app(s)" + - install is changed + - install.msg == "Installed 1 app(s)" - name: Determine whether the app is installed stat: @@ -74,7 +74,7 @@ - name: Ensure the app is installed assert: that: - - install_status.stat.exists == true + - install_status.stat.exists == true - name: Wait until the OS-internal cache was updated pause: @@ -89,8 +89,8 @@ - name: Ensure that the status is unchanged (already installed) assert: that: - - install_again is not changed - - "'msg' not in install_again" + - install_again is not changed + - "'msg' not in install_again" # Uninstallation - name: Check if Rested needs to be uninstalled @@ -104,8 +104,8 @@ - name: Ensure that the status would have changed assert: that: - - uninstall_check is changed - - uninstall_check.msg == "Uninstalled 1 app(s)" + - uninstall_check is changed + - uninstall_check.msg == "Uninstalled 1 app(s)" - name: Determine whether the app is installed stat: @@ -115,7 +115,7 @@ - name: Ensure the app is not yet uninstalled assert: that: - - install_status.stat.exists == true + - install_status.stat.exists == true - name: Uninstall Rested mas: @@ -127,8 +127,8 @@ - name: Ensure that the status changed assert: that: - - uninstall is changed - - uninstall.msg == "Uninstalled 1 app(s)" + - uninstall is changed + - uninstall.msg == "Uninstalled 1 app(s)" - name: Determine whether the app is installed stat: @@ -138,7 +138,7 @@ - name: Ensure the app is uninstalled assert: that: - - uninstall_status.stat.exists == false + - uninstall_status.stat.exists == false - name: Wait until the OS-internal cache was updated pause: @@ -154,5 +154,5 @@ - name: Ensure that the status is unchanged (already uninstalled) assert: that: - - uninstall_again is not changed - - "'msg' not in uninstall_again" + - uninstall_again is not changed + - "'msg' not in uninstall_again" diff --git a/tests/integration/targets/module_helper/library/mdepfail.py b/tests/integration/targets/module_helper/library/mdepfail.py index b61c32a4da..ba315d0111 100644 --- a/tests/integration/targets/module_helper/library/mdepfail.py +++ b/tests/integration/targets/module_helper/library/mdepfail.py @@ -57,7 +57,7 @@ class MSimple(ModuleHelper): raise Exception("a >= 100") if self.vars.c == "abc change": self.vars['abc'] = "changed abc" - if self.vars.get('a', 0) == 2: + if self.vars.a == 2: self.vars['b'] = str(self.vars.b) * 2 self.vars['c'] = str(self.vars.c) * 2 diff --git a/tests/integration/targets/module_helper/library/msimple.py b/tests/integration/targets/module_helper/library/msimple.py index 096e515247..41407ec50e 100644 --- a/tests/integration/targets/module_helper/library/msimple.py +++ b/tests/integration/targets/module_helper/library/msimple.py @@ -63,7 +63,7 @@ class MSimple(ModuleHelper): raise Exception("a >= 100") if self.vars.c == "abc change": self.vars['abc'] = "changed abc" - if self.vars.get('a', 0) == 2: + if self.vars.a == 2: self.vars['b'] = str(self.vars.b) * 2 self.vars['c'] = str(self.vars.c) * 2 self.process_a3_bc() diff --git a/tests/integration/targets/module_helper/library/mstate.py b/tests/integration/targets/module_helper/library/mstate.py index b3b4ed5e69..bfaab03755 100644 --- a/tests/integration/targets/module_helper/library/mstate.py +++ b/tests/integration/targets/module_helper/library/mstate.py @@ -49,7 +49,6 @@ class MState(StateModuleHelper): state=dict(type='str', choices=['join', 'b_x_a', 'c_x_a', 'both_x_a', 'nop'], default='join'), ), ) - use_old_vardict = False def __init_module__(self): self.vars.set('result', "abc", diff=True) diff --git a/tests/integration/targets/module_helper/tasks/mdepfail.yml b/tests/integration/targets/module_helper/tasks/mdepfail.yml index 1655be54e3..f0be8340e3 100644 --- a/tests/integration/targets/module_helper/tasks/mdepfail.yml +++ b/tests/integration/targets/module_helper/tasks/mdepfail.yml @@ -8,11 +8,15 @@ ignore_errors: true register: result +- name: Show results + debug: + var: result + - name: assert failing dependency assert: that: - result is failed - '"Failed to import" in result.msg' - '"nopackagewiththisname" in result.msg' - - '"ModuleNotFoundError:" in result.exception or "ImportError:" in result.exception' - - '"nopackagewiththisname" in result.exception' + - '"ModuleNotFoundError:" in result.exception or "ImportError:" in result.exception or "(traceback unavailable)" in result.exception' + - '"nopackagewiththisname" in result.exception or "(traceback unavailable)" in result.exception' diff --git a/tests/integration/targets/module_helper/tasks/msimpleda.yml b/tests/integration/targets/module_helper/tasks/msimpleda.yml index e01b65e12c..2d89cbaa39 100644 --- a/tests/integration/targets/module_helper/tasks/msimpleda.yml +++ b/tests/integration/targets/module_helper/tasks/msimpleda.yml @@ -3,15 +3,26 @@ # SPDX-License-Identifier: GPL-3.0-or-later - set_fact: - attr2_d: + attr2_depr_dict: msg: Attribute attr2 is deprecated version: 9.9.9 collection_name: community.general - attr2_d_29: + # With Data Tagging, the deprecation dict looks a bit different: + attr2_depr_dict_dt: msg: Attribute attr2 is deprecated version: 9.9.9 -- set_fact: - attr2_depr_dict: "{{ ((ansible_version.major, ansible_version.minor) < (2, 10))|ternary(attr2_d_29, attr2_d) }}" + collection_name: community.general + deprecator: + resolved_name: community.general + type: collection + # Latest version: + attr2_depr_dict_dt2: + msg: Attribute attr2 is deprecated + version: 9.9.9 + collection_name: community.general + deprecator: + resolved_name: community.general + type: ~ - name: test msimpleda 1 msimpleda: @@ -23,17 +34,29 @@ that: - simple1.a == 1 - simple1.attr1 == "abc" - - ("deprecations" not in simple1) or attr2_depr_dict not in simple1.deprecations + - >- + ("deprecations" not in simple1) or ( + attr2_depr_dict not in simple1.deprecations and + attr2_depr_dict_dt not in simple1.deprecations and + attr2_depr_dict_dt2 not in simple1.deprecations + ) - name: test msimpleda 2 msimpleda: a: 2 register: simple2 +- name: Show results + debug: + var: simple2 + - name: assert simple2 assert: that: - simple2.a == 2 - simple2.attr2 == "def" - '"deprecations" in simple2' - - attr2_depr_dict in simple2.deprecations + - >- + attr2_depr_dict in simple2.deprecations or + attr2_depr_dict_dt in simple2.deprecations or + attr2_depr_dict_dt2 in simple2.deprecations diff --git a/tests/integration/targets/monit/tasks/main.yml b/tests/integration/targets/monit/tasks/main.yml index ea85954125..518e997c32 100644 --- a/tests/integration/targets/monit/tasks/main.yml +++ b/tests/integration/targets/monit/tasks/main.yml @@ -9,91 +9,91 @@ # SPDX-License-Identifier: GPL-3.0-or-later - block: - - name: Install EPEL repository (RHEL only) - include_role: - name: setup_epel - when: - - ansible_distribution in ['RedHat', 'CentOS'] - - ansible_distribution_major_version is version('9', '<') + - name: Install EPEL repository (RHEL only) + include_role: + name: setup_epel + when: + - ansible_distribution in ['RedHat', 'CentOS'] + - ansible_distribution_major_version is version('9', '<') - - name: create required directories - become: true - file: - path: "{{ item }}" - state: directory - loop: - - /var/lib/monit - - /var/run/monit - - "{{ process_root }}" + - name: create required directories + become: true + file: + path: "{{ item }}" + state: directory + loop: + - /var/lib/monit + - /var/run/monit + - "{{ process_root }}" - - name: install monit - become: true - package: - name: monit - state: present + - name: install monit + become: true + package: + name: monit + state: present - - include_vars: '{{ item }}' - with_first_found: - - files: - - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" - - '{{ ansible_os_family }}.yml' - - 'defaults.yml' + - include_vars: '{{ item }}' + with_first_found: + - files: + - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" + - '{{ ansible_os_family }}.yml' + - 'defaults.yml' - - name: monit config - become: true - template: - src: "monitrc.j2" - dest: "{{ monitrc }}" + - name: monit config + become: true + template: + src: "monitrc.j2" + dest: "{{ monitrc }}" - - name: copy process file - become: true - copy: - src: httpd_echo.py - dest: "{{ process_file }}" + - name: copy process file + become: true + copy: + src: httpd_echo.py + dest: "{{ process_file }}" - - name: Install virtualenv on CentOS 8 - package: - name: virtualenv - state: present - when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '8' + - name: Install virtualenv on CentOS 8 + package: + name: virtualenv + state: present + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '8' - - name: Install virtualenv on Arch Linux - pip: - name: virtualenv - state: present - when: ansible_os_family == 'Archlinux' + - name: Install virtualenv on Arch Linux + pip: + name: virtualenv + state: present + when: ansible_os_family == 'Archlinux' - - name: install dependencies - pip: - name: "{{ item }}" - virtualenv: "{{ process_venv }}" - extra_args: "-c {{ remote_constraints }}" - loop: - - setuptools==44 - - python-daemon + - name: install dependencies + pip: + name: "{{ item }}" + virtualenv: "{{ process_venv }}" + extra_args: "-c {{ remote_constraints }}" + loop: + - setuptools==44 + - python-daemon - - name: restart monit - become: true - service: - name: monit - state: restarted + - name: restart monit + become: true + service: + name: monit + state: restarted - - include_tasks: test.yml + - include_tasks: test.yml always: - - name: stop monit - become: true - service: - name: monit - state: stopped + - name: stop monit + become: true + service: + name: monit + state: stopped - - name: uninstall monit - become: true - package: - name: monit - state: absent + - name: uninstall monit + become: true + package: + name: monit + state: absent - - name: remove process files - file: - path: "{{ process_root }}" - state: absent + - name: remove process files + file: + path: "{{ process_root }}" + state: absent diff --git a/tests/integration/targets/mqtt/tasks/main.yml b/tests/integration/targets/mqtt/tasks/main.yml index 3fd11643ee..33f9307c5f 100644 --- a/tests/integration/targets/mqtt/tasks/main.yml +++ b/tests/integration/targets/mqtt/tasks/main.yml @@ -9,6 +9,6 @@ # SPDX-License-Identifier: GPL-3.0-or-later - include_tasks: ubuntu.yml - when: + when: - ansible_distribution == 'Ubuntu' - ansible_distribution_release not in ['focal', 'jammy', 'noble'] diff --git a/tests/integration/targets/mqtt/tasks/ubuntu.yml b/tests/integration/targets/mqtt/tasks/ubuntu.yml index 0c0a12d041..332a10dfed 100644 --- a/tests/integration/targets/mqtt/tasks/ubuntu.yml +++ b/tests/integration/targets/mqtt/tasks/ubuntu.yml @@ -66,9 +66,9 @@ # port: 8885 # register: result -#- assert: -# that: -# - result is success +# - assert: +# that: +# - result is success - name: Send a message, client TLS1.1, server (required) TLS1.2 - Expected failure mqtt: diff --git a/tests/integration/targets/nomad/files/job.hcl b/tests/integration/targets/nomad/files/job.hcl index 8f01f04396..58e4de31d5 100644 --- a/tests/integration/targets/nomad/files/job.hcl +++ b/tests/integration/targets/nomad/files/job.hcl @@ -36,7 +36,7 @@ job "example" { # type = "service" - + # The "constraint" stanza defines additional constraints for placing this job, # in addition to any resource or driver constraints. This stanza may be placed # at the "job", "group", or "task" level, and supports variable interpolation. diff --git a/tests/integration/targets/nomad/tasks/main.yml b/tests/integration/targets/nomad/tasks/main.yml index 1a143be059..87b1c0474e 100644 --- a/tests/integration/targets/nomad/tasks/main.yml +++ b/tests/integration/targets/nomad/tasks/main.yml @@ -17,95 +17,95 @@ nomad_cmd: '{{ remote_tmp_dir }}/nomad' block: - - name: Install requests<2.20 (CentOS/RHEL 6) - pip: - name: requests<2.20 - extra_args: "-c {{ remote_constraints }}" - register: result - until: result is success - when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=') + - name: Install requests<2.20 (CentOS/RHEL 6) + pip: + name: requests<2.20 + extra_args: "-c {{ remote_constraints }}" + register: result + until: result is success + when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=') - - name: Install python-nomad - pip: - name: python-nomad - extra_args: "-c {{ remote_constraints }}" - register: result - until: result is success + - name: Install python-nomad + pip: + name: python-nomad + extra_args: "-c {{ remote_constraints }}" + register: result + until: result is success - - name: Install jmespath - pip: - name: jmespath - extra_args: "-c {{ remote_constraints }}" - register: result - until: result is success + - name: Install jmespath + pip: + name: jmespath + extra_args: "-c {{ remote_constraints }}" + register: result + until: result is success - - name: Generate privatekey - community.crypto.openssl_privatekey: - path: '{{ remote_tmp_dir }}/privatekey.pem' + - name: Generate privatekey + community.crypto.openssl_privatekey: + path: '{{ remote_tmp_dir }}/privatekey.pem' - - name: Generate CSR - community.crypto.openssl_csr: - path: '{{ remote_tmp_dir }}/csr.csr' - privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' - subject: - commonName: localhost + - name: Generate CSR + community.crypto.openssl_csr: + path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' + subject: + commonName: localhost - - name: Generate selfsigned certificate - register: selfsigned_certificate - community.crypto.x509_certificate: - path: '{{ remote_tmp_dir }}/cert.pem' - csr_path: '{{ remote_tmp_dir }}/csr.csr' - privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' - provider: selfsigned - selfsigned_digest: sha256 + - name: Generate selfsigned certificate + register: selfsigned_certificate + community.crypto.x509_certificate: + path: '{{ remote_tmp_dir }}/cert.pem' + csr_path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' + provider: selfsigned + selfsigned_digest: sha256 - - name: Install unzip - package: - name: unzip - register: result - until: result is success - when: ansible_distribution != "MacOSX" + - name: Install unzip + package: + name: unzip + register: result + until: result is success + when: ansible_distribution != "MacOSX" - - assert: - that: ansible_architecture in ['i386', 'x86_64', 'amd64'] + - assert: + that: ansible_architecture in ['i386', 'x86_64', 'amd64'] - - set_fact: - nomad_arch: '386' - when: ansible_architecture == 'i386' + - set_fact: + nomad_arch: '386' + when: ansible_architecture == 'i386' - - set_fact: - nomad_arch: amd64 - when: ansible_architecture in ['x86_64', 'amd64'] + - set_fact: + nomad_arch: amd64 + when: ansible_architecture in ['x86_64', 'amd64'] - - name: Download nomad binary - unarchive: - src: '{{ nomad_uri }}' - dest: '{{ remote_tmp_dir }}' - remote_src: true - register: result - until: result is success + - name: Download nomad binary + unarchive: + src: '{{ nomad_uri }}' + dest: '{{ remote_tmp_dir }}' + remote_src: true + register: result + until: result is success - - vars: - remote_dir: '{{ echo_remote_tmp_dir.stdout }}' - block: - - - command: echo {{ remote_tmp_dir }} - register: echo_remote_tmp_dir - - - name: Run tests integration + - vars: + remote_dir: '{{ echo_remote_tmp_dir.stdout }}' block: - - name: Start nomad (dev mode enabled) - shell: nohup {{ nomad_cmd }} agent -dev /dev/null 2>&1 & - - name: wait nomad up - wait_for: - host: localhost - port: 4646 - delay: 10 - timeout: 60 + - command: echo {{ remote_tmp_dir }} + register: echo_remote_tmp_dir - - import_tasks: nomad_job.yml - always: + - name: Run tests integration + block: + - name: Start nomad (dev mode enabled) + shell: nohup {{ nomad_cmd }} agent -dev /dev/null 2>&1 & - - name: kill nomad - shell: pkill nomad + - name: wait nomad up + wait_for: + host: localhost + port: 4646 + delay: 10 + timeout: 60 + + - import_tasks: nomad_job.yml + always: + + - name: kill nomad + shell: pkill nomad diff --git a/tests/integration/targets/npm/tasks/main.yml b/tests/integration/targets/npm/tasks/main.yml index 500e15fdb5..686bd3e434 100644 --- a/tests/integration/targets/npm/tasks/main.yml +++ b/tests/integration/targets/npm/tasks/main.yml @@ -16,17 +16,17 @@ - not (ansible_os_family == 'Alpine') # TODO block: - # expand remote path - - command: 'echo {{ remote_tmp_dir }}' - register: echo - - set_fact: - remote_dir: '{{ echo.stdout }}' + # expand remote path + - command: 'echo {{ remote_tmp_dir }}' + register: echo + - set_fact: + remote_dir: '{{ echo.stdout }}' - - include_tasks: run.yml - vars: - nodejs_version: '{{ item }}' - nodejs_path: 'node-v{{ nodejs_version }}-{{ ansible_system|lower }}-x{{ ansible_userspace_bits }}' - with_items: - - 7.10.1 # provides npm 4.2.0 (last npm < 5 released) - - 8.0.0 # provides npm 5.0.0 - - 8.2.0 # provides npm 5.3.0 (output change with this version) + - include_tasks: run.yml + vars: + nodejs_version: '{{ item }}' + nodejs_path: 'node-v{{ nodejs_version }}-{{ ansible_system|lower }}-x{{ ansible_userspace_bits }}' + with_items: + - 7.10.1 # provides npm 4.2.0 (last npm < 5 released) + - 8.0.0 # provides npm 5.0.0 + - 8.2.0 # provides npm 5.3.0 (output change with this version) diff --git a/tests/integration/targets/odbc/aliases b/tests/integration/targets/odbc/aliases index ceb043895a..ee15fde5bb 100644 --- a/tests/integration/targets/odbc/aliases +++ b/tests/integration/targets/odbc/aliases @@ -12,4 +12,7 @@ skip/rhel9.1 skip/rhel9.2 skip/rhel9.3 skip/rhel9.4 +skip/rhel9.5 +skip/rhel9.6 +skip/rhel10.0 skip/freebsd diff --git a/tests/integration/targets/odbc/defaults/main.yml b/tests/integration/targets/odbc/defaults/main.yml index dd75f54718..45f94e5e2a 100644 --- a/tests/integration/targets/odbc/defaults/main.yml +++ b/tests/integration/targets/odbc/defaults/main.yml @@ -20,14 +20,14 @@ packages: - postgresql-odbc - unixODBC - unixODBC-devel - - gcc + - gcc - gcc-c++ Debian: - odbc-postgresql - unixodbc - unixodbc-dev - - gcc - - g++ + - gcc + - g++ Suse: - psqlODBC - unixODBC diff --git a/tests/integration/targets/one_host/tasks/main.yml b/tests/integration/targets/one_host/tasks/main.yml index 3b2c1cedf3..839cda98e4 100644 --- a/tests/integration/targets/one_host/tasks/main.yml +++ b/tests/integration/targets/one_host/tasks/main.yml @@ -69,8 +69,8 @@ - name: "assert test_{{test_number}} failed" assert: that: - - result is failed - - result.results[0].msg == 'invalid host state ERROR' + - result is failed + - result.results[0].msg == 'invalid host state ERROR' # --- @@ -94,7 +94,7 @@ - name: "assert test_{{test_number}} worked" assert: that: - - result.changed + - result.changed # HOST ENABLEMENT @@ -120,7 +120,7 @@ - name: "assert test_{{test_number}} worked" assert: that: - - result.changed + - result.changed # TEMPLATE MANAGEMENT @@ -150,7 +150,7 @@ - name: "assert test_{{test_number}} worked" assert: that: - - result.changed + - result.changed # --- @@ -180,7 +180,7 @@ - name: "assert test_{{test_number}} worked" assert: that: - - result is not changed + - result is not changed # HOST DISABLEMENT @@ -205,7 +205,7 @@ - name: "assert test_{{test_number}} worked" assert: that: - - result.changed + - result.changed # HOST OFFLINE @@ -230,7 +230,7 @@ - name: "assert test_{{test_number}} worked" assert: that: - - result.changed + - result.changed # TEARDOWN diff --git a/tests/integration/targets/one_image/tasks/main.yml b/tests/integration/targets/one_image/tasks/main.yml index c8736d73d8..aea8501aa7 100644 --- a/tests/integration/targets/one_image/tasks/main.yml +++ b/tests/integration/targets/one_image/tasks/main.yml @@ -166,7 +166,7 @@ - name: Assert that image was deleted assert: that: - - result is changed + - result is changed # Trying to run with wrong arguments - name: Try to use name and ID at the same time @@ -182,7 +182,7 @@ - name: Assert that task failed assert: that: - - result is failed + - result is failed - name: Try to rename image without specifying new name one_image: @@ -197,7 +197,7 @@ - name: Assert that task failed assert: that: - - result is failed + - result is failed - name: Try to rename image without specifying new name one_image: diff --git a/tests/integration/targets/one_image_info/tasks/main.yml b/tests/integration/targets/one_image_info/tasks/main.yml index fede116241..00aacaa295 100644 --- a/tests/integration/targets/one_image_info/tasks/main.yml +++ b/tests/integration/targets/one_image_info/tasks/main.yml @@ -27,7 +27,7 @@ api_url: "{{ opennebula_url }}" api_username: "{{ opennebula_username }}" api_password: "{{ opennebula_password }}" - ids: + ids: - 2 - 2 - 8 diff --git a/tests/integration/targets/one_template/tasks/main.yml b/tests/integration/targets/one_template/tasks/main.yml index 58bca9c6c5..0532c16107 100644 --- a/tests/integration/targets/one_template/tasks/main.yml +++ b/tests/integration/targets/one_template/tasks/main.yml @@ -70,7 +70,7 @@ - name: "assert that creation worked" assert: that: - - result is changed + - result is changed # Updating a template @@ -123,7 +123,7 @@ - name: "assert that it updated the template" assert: that: - - result is changed + - result is changed - name: "Update an existing TEMPLATE with the same changes again" one_template: @@ -173,7 +173,7 @@ - name: "assert that there was no change" assert: that: - - result is not changed + - result is not changed # Deletion of templates @@ -195,7 +195,7 @@ - name: "assert that there was no change" assert: that: - - result is not changed + - result is not changed - name: "Delete an existing TEMPLATE" one_template: @@ -214,7 +214,7 @@ - name: "assert that there was a change" assert: that: - - result is changed + - result is changed # Usage without `template` parameter @@ -232,7 +232,7 @@ - name: "assert that it failed because template is missing" assert: that: - - result is failed + - result is failed # TEARDOWN diff --git a/tests/integration/targets/one_vnet/tasks/main.yml b/tests/integration/targets/one_vnet/tasks/main.yml index 084d4758ad..9e1164fcd7 100644 --- a/tests/integration/targets/one_vnet/tasks/main.yml +++ b/tests/integration/targets/one_vnet/tasks/main.yml @@ -112,7 +112,7 @@ - name: Assert that network was deleted assert: that: - - result is changed + - result is changed # Trying to run with wrong arguments - name: Try to create use network with state=present and without the template parameter @@ -161,7 +161,7 @@ api_url: "{{ opennebula_url }}" api_username: "{{ opennebula_username }}" api_password: "{{ opennebula_password }}" - name: + name: id: 0 state: present register: result diff --git a/tests/integration/targets/osx_defaults/tasks/main.yml b/tests/integration/targets/osx_defaults/tasks/main.yml index 3ca3180f04..780c3f96c5 100644 --- a/tests/integration/targets/osx_defaults/tasks/main.yml +++ b/tests/integration/targets/osx_defaults/tasks/main.yml @@ -21,7 +21,7 @@ - name: Test if state and value are required together assert: that: - - "'following are missing: value' in missing_value['msg']" + - "'following are missing: value' in missing_value['msg']" - name: Change value of AppleMeasurementUnits to centimeter in check_mode osx_defaults: @@ -36,7 +36,7 @@ - name: Test if AppleMeasurementUnits value is changed to Centimeters in check_mode assert: that: - - measure_task_check_mode.changed + - measure_task_check_mode.changed - name: Find the current value of AppleMeasurementUnits osx_defaults: @@ -68,7 +68,7 @@ - name: Test if AppleMeasurementUnits value is changed to {{ new_value }} assert: that: - - change_value.changed + - change_value.changed - name: Again change value of AppleMeasurementUnits to {{ new_value }} osx_defaults: @@ -82,7 +82,7 @@ - name: Again test if AppleMeasurementUnits value is not changed to {{ new_value }} assert: that: - - not change_value.changed + - not change_value.changed - name: Check a fake setting for delete operation osx_defaults: diff --git a/tests/integration/targets/pids/tasks/main.yml b/tests/integration/targets/pids/tasks/main.yml index c8feaacf3e..de4a2e406b 100644 --- a/tests/integration/targets/pids/tasks/main.yml +++ b/tests/integration/targets/pids/tasks/main.yml @@ -27,8 +27,8 @@ - name: "Verify that the list of Process IDs (PIDs) returned is empty" assert: that: - - emptypids is not changed - - emptypids.pids == [] + - emptypids is not changed + - emptypids.pids == [] - name: "Picking a random process name" set_fact: @@ -38,7 +38,7 @@ copy: src: sleeper.c dest: "{{ remote_tmp_dir }}/sleeper.c" - mode: 0644 + mode: "0644" - name: Compile fake 'sleep' binary command: cc {{ remote_tmp_dir }}/sleeper.c -o {{ remote_tmp_dir }}/{{ random_name }} @@ -47,7 +47,7 @@ template: src: obtainpid.sh.j2 dest: "{{ remote_tmp_dir }}/obtainpid.sh" - mode: 0755 + mode: "0755" - name: "Run the fake 'sleep' binary" command: sh {{ remote_tmp_dir }}/obtainpid.sh @@ -86,7 +86,7 @@ - name: "Reading pid from the file" slurp: - src: "{{ remote_tmp_dir }}/obtainpid.txt" + src: "{{ remote_tmp_dir }}/obtainpid.txt" register: newpid - name: Gather all processes to make debugging easier @@ -101,12 +101,12 @@ - name: "Verify that the Process IDs (PIDs) returned is not empty and also equal to the PIDs obtained in console" assert: that: - - "pids.pids | join(' ') == newpid.content | b64decode | trim" - - "pids.pids | length > 0" - - "exactpidmatch.pids == []" - - "pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim" - - "caseinsensitive_pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim" - - newpid.content | b64decode | trim | int in match_all.pids + - "pids.pids | join(' ') == newpid.content | b64decode | trim" + - "pids.pids | length > 0" + - "exactpidmatch.pids == []" + - "pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim" + - "caseinsensitive_pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim" + - newpid.content | b64decode | trim | int in match_all.pids - name: "Register output of bad input pattern" pids: diff --git a/tests/integration/targets/pipx/tasks/main.yml b/tests/integration/targets/pipx/tasks/main.yml index 314e9dfa52..6b83fa7335 100644 --- a/tests/integration/targets/pipx/tasks/main.yml +++ b/tests/integration/targets/pipx/tasks/main.yml @@ -3,22 +3,29 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: Determine pipx level - block: - - name: Install pipx>=1.7.0 - pip: - name: pipx>=1.7.0 - - name: Set has_pipx170 fact true - ansible.builtin.set_fact: - has_pipx170: true - rescue: - - name: Set has_pipx170 fact false - ansible.builtin.set_fact: - has_pipx170: false - - name: Install pipx (no version spec) - pip: - name: pipx +- name: Bail out if Python < 3.8 + when: ansible_python_version is version('3.8', '<') + ansible.builtin.meta: end_play +- name: Install pipx>=1.7.0 + ansible.builtin.pip: + name: pipx>=1.7.0 + extra_args: --user +- name: Determine packaging level + block: + - name: Install packaging>=22.0 + ansible.builtin.pip: + name: packaging>=22.0 + - name: Set has_packaging22 fact true + ansible.builtin.set_fact: + has_packaging22: true + rescue: + - name: Set has_packaging22 fact false + ansible.builtin.set_fact: + has_packaging22: false + - name: Install has_packaging (no version spec) + ansible.builtin.pip: + name: packaging ############################################################################## - name: ensure application tox is uninstalled @@ -164,7 +171,7 @@ community.general.pipx: state: absent name: tox - register: uninstall_tox_latest + register: uninstall_tox_1 - name: install application tox 3.24.0 for latest community.general.pipx: @@ -208,26 +215,55 @@ community.general.pipx: state: absent name: tox - register: uninstall_tox_again + register: uninstall_tox_2 + +- name: install tox with dependency group 'docs' + community.general.pipx: + name: tox + source: tox[docs] + state: latest + register: install_tox_latest_docs + +- name: install tox with dependency group 'docs' again + community.general.pipx: + name: tox + source: tox[docs] + state: latest + register: install_tox_latest_docs_again + +- name: cleanup tox latest yet again + community.general.pipx: + state: absent + name: tox + register: uninstall_tox_3 - name: check assertions tox latest assert: that: - install_tox_latest is changed - - uninstall_tox_latest is changed + - "'tox' in install_tox_latest.application" + - install_tox_latest.application.tox.version != '3.24.0' + - uninstall_tox_1 is changed + - "'tox' not in uninstall_tox_1.application" - install_tox_324_for_latest is changed + - "'tox' in install_tox_324_for_latest.application" - install_tox_324_for_latest.application.tox.version == '3.24.0' - install_tox_latest_with_preinstall is changed - - install_tox_latest_with_preinstall.application.tox.version == latest_tox_version + - "'tox' in install_tox_latest_with_preinstall.application" + - install_tox_latest_with_preinstall.application.tox.version != '3.24.0' - install_tox_latest_with_preinstall_again is not changed - - install_tox_latest_with_preinstall_again.application.tox.version == latest_tox_version - install_tox_latest_with_preinstall_again_force is changed - - install_tox_latest_with_preinstall_again_force.application.tox.version == latest_tox_version - - uninstall_tox_latest_again is changed - - install_tox_with_deps is changed - - install_tox_with_deps.application.tox.version == latest_tox_version - - uninstall_tox_again is changed - - "'tox' not in uninstall_tox_again.application" + - uninstall_tox_2 is changed + - "'tox' not in uninstall_tox_2.application" + - install_tox_latest_docs is changed + - install_tox_latest_docs_again is not changed + - uninstall_tox_3 is changed + - "'tox' not in uninstall_tox_3.application" + +############################################################################## +# Test version specifiers in name parameter +- name: Run version specifier tests + ansible.builtin.include_tasks: testcase-10031-version-specs.yml ############################################################################## @@ -246,21 +282,23 @@ - name: Include testcase for issue 8656 ansible.builtin.include_tasks: testcase-8656.yml -- name: Recent features - when: - - has_pipx170 - block: - - name: Include testcase for PR 8793 --global - ansible.builtin.include_tasks: testcase-8793-global.yml +- name: Include testcase for PR 8793 --global + ansible.builtin.include_tasks: testcase-8793-global.yml - - name: Include testcase for PR 8809 install-all - ansible.builtin.include_tasks: testcase-8809-installall.yml +- name: Include testcase for PR 8809 install-all + ansible.builtin.include_tasks: testcase-8809-installall.yml - - name: Include testcase for PR 8809 pin - ansible.builtin.include_tasks: testcase-8809-pin.yml +- name: Include testcase for PR 8809 pin + ansible.builtin.include_tasks: testcase-8809-pin.yml - - name: Include testcase for PR 8809 injectpkg - ansible.builtin.include_tasks: testcase-8809-uninjectpkg.yml +- name: Include testcase for PR 8809 injectpkg + ansible.builtin.include_tasks: testcase-8809-uninjectpkg.yml - - name: Include testcase for PR 9009 injectpkg --global - ansible.builtin.include_tasks: testcase-9009-fixglobal.yml +- name: Include testcase for PR 9009 injectpkg --global + ansible.builtin.include_tasks: testcase-9009-fixglobal.yml + +- name: Include testcase for PR 9103 upgrade --global + ansible.builtin.include_tasks: testcase-9103-upgrade-global.yml + +- name: Include testcase for issue 9619 latest --global + ansible.builtin.include_tasks: testcase-9619-latest-global.yml diff --git a/tests/integration/targets/pipx/tasks/testcase-10031-version-specs.yml b/tests/integration/targets/pipx/tasks/testcase-10031-version-specs.yml new file mode 100644 index 0000000000..e018720bd5 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-10031-version-specs.yml @@ -0,0 +1,83 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +############################################################################## +# Test version specifiers in name parameter + +- name: Ensure tox is uninstalled + community.general.pipx: + state: absent + name: tox + register: uninstall_tox + +- name: Install tox with version specifier in name + community.general.pipx: + name: tox>=3.22.0,<3.27.0 + register: install_tox_version + +- name: Install tox with same version specifier (idempotency check) + community.general.pipx: + name: tox>=3.22.0,<3.27.0 + register: install_tox_version_again + +- name: Ensure tox is uninstalled again + community.general.pipx: + state: absent + name: tox + +- name: Install tox with extras and version + community.general.pipx: + name: "tox[testing]>=3.22.0,<3.27.0" + register: install_tox_extras + ignore_errors: true # Some versions might not have this extra + +- name: Install tox with higher version specifier + community.general.pipx: + name: "tox>=3.27.0" + register: install_tox_higher_version + +- name: Install tox with higher version specifier (force) + community.general.pipx: + name: "tox>=3.27.0" + force: true + register: install_tox_higher_version_force + +- name: Cleanup tox + community.general.pipx: + state: absent + name: tox + register: uninstall_tox_final + +- name: Check version specifier assertions + assert: + that: + - install_tox_version is changed + - "'tox' in install_tox_version.application" + - "install_tox_version.application.tox.version is version('3.22.0', '>=')" + - "install_tox_version.application.tox.version is version('3.27.0', '<')" + - install_tox_version_again is not changed + - "'tox' in install_tox_extras.application" + - "install_tox_extras.application.tox.version is version('3.22.0', '>=')" + - "install_tox_extras.application.tox.version is version('3.27.0', '<')" + - install_tox_higher_version is changed + - install_tox_higher_version_force is changed + - uninstall_tox_final is changed + - "'tox' not in uninstall_tox_final.application" + +- name: If packaging is recent + when: + - has_packaging22 + block: + - name: Install tox with invalid version specifier + community.general.pipx: + name: "tox>>>>>3.27.0" + register: install_tox_invalid + ignore_errors: true + + - name: Check version specifier assertions + assert: + that: + - install_tox_invalid is failed + - "'Invalid package specification' in install_tox_invalid.msg" diff --git a/tests/integration/targets/pipx/tasks/testcase-8793-global.yml b/tests/integration/targets/pipx/tasks/testcase-8793-global.yml index 7d3c871306..b9bf8b75f6 100644 --- a/tests/integration/targets/pipx/tasks/testcase-8793-global.yml +++ b/tests/integration/targets/pipx/tasks/testcase-8793-global.yml @@ -5,7 +5,7 @@ - name: Set up environment environment: - PATH: /usr/local/bin:{{ ansible_env.PATH }} + PATH: /root/.local/bin:/usr/local/bin:{{ ansible_env.PATH }} block: - name: Remove global pipx dir ansible.builtin.file: diff --git a/tests/integration/targets/pipx/tasks/testcase-9103-upgrade-global.yml b/tests/integration/targets/pipx/tasks/testcase-9103-upgrade-global.yml new file mode 100644 index 0000000000..60621a42be --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-9103-upgrade-global.yml @@ -0,0 +1,38 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: 9103-Ensure application hello-world is uninstalled + community.general.pipx: + name: hello-world + state: absent + global: true + +- name: 9103-Install application hello-world + community.general.pipx: + name: hello-world + source: hello-world==0.1 + global: true + register: install_hw + +- name: 9103-Upgrade application hello-world + community.general.pipx: + state: upgrade + name: hello-world + global: true + register: upgrade_hw + +- name: 9103-Ensure application pylint is uninstalled + community.general.pipx: + name: pylint + state: absent + global: true + +- name: 9103-Assertions + ansible.builtin.assert: + that: + - install_hw is changed + - upgrade_hw is changed + - upgrade_hw.cmd[-3] == "upgrade" + - upgrade_hw.cmd[-2] == "--global" diff --git a/tests/integration/targets/pipx/tasks/testcase-9619-latest-global.yml b/tests/integration/targets/pipx/tasks/testcase-9619-latest-global.yml new file mode 100644 index 0000000000..e06ee438c0 --- /dev/null +++ b/tests/integration/targets/pipx/tasks/testcase-9619-latest-global.yml @@ -0,0 +1,38 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: 9619-Ensure application hello-world is uninstalled + community.general.pipx: + name: hello-world + state: absent + global: true + +- name: 9619-Install application hello-world + community.general.pipx: + name: hello-world + source: hello-world==0.1 + global: true + register: install_hw + +- name: 9619-Upgrade application hello-world + community.general.pipx: + state: latest + name: hello-world + global: true + register: latest_hw + +- name: 9619-Ensure application pylint is uninstalled + community.general.pipx: + name: pylint + state: absent + global: true + +- name: 9619-Assertions + ansible.builtin.assert: + that: + - install_hw is changed + - latest_hw is changed + - latest_hw.cmd[-3] == "upgrade" + - latest_hw.cmd[-2] == "--global" diff --git a/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml b/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml index 1db3e60406..812cd9bd74 100644 --- a/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml +++ b/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml @@ -7,7 +7,7 @@ ansible.builtin.file: path: /opt/pipx state: directory - mode: 0755 + mode: "0755" - name: Install tox site-wide community.general.pipx: diff --git a/tests/integration/targets/pipx_info/tasks/main.yml b/tests/integration/targets/pipx_info/tasks/main.yml index e3de105d6f..d51ce1b33e 100644 --- a/tests/integration/targets/pipx_info/tasks/main.yml +++ b/tests/integration/targets/pipx_info/tasks/main.yml @@ -3,47 +3,53 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: install pipx - pip: - name: pipx +- name: Bail out if Python < 3.8 + when: ansible_python_version is version('3.8', '<') + ansible.builtin.meta: end_play +- name: Install pipx>=1.7.0 + ansible.builtin.pip: + name: pipx>=1.7.0 extra_args: --user ############################################################################## -- name: ensure application tox is uninstalled +- name: Ensure applications are uninstalled community.general.pipx: + name: "{{ item }}" state: absent - name: tox + loop: + - tox + - pylint -- name: retrieve applications (empty) +- name: Retrieve applications (empty) community.general.pipx_info: {} register: info_empty -- name: install application tox +- name: Install application tox community.general.pipx: name: tox -- name: retrieve applications +- name: Retrieve applications community.general.pipx_info: {} register: info_all -- name: retrieve applications (include_deps=true) +- name: Retrieve applications (include_deps=true) community.general.pipx_info: include_deps: true register: info_all_deps -- name: retrieve application tox +- name: Retrieve application tox community.general.pipx_info: name: tox include_deps: true register: info_tox -- name: uninstall application tox +- name: Uninstall application tox community.general.pipx: state: absent name: tox -- name: check assertions tox - assert: +- name: Check assertions tox + ansible.builtin.assert: that: - info_empty.application|length == 0 @@ -63,8 +69,8 @@ - info_tox.application == info_all_deps.application ############################################################################## -- name: set test applications - set_fact: +- name: Set test applications + ansible.builtin.set_fact: apps: - name: tox source: tox==3.24.0 @@ -72,19 +78,19 @@ inject_packages: - licenses -- name: ensure applications are uninstalled +- name: Ensure applications are uninstalled community.general.pipx: name: "{{ item.name }}" state: absent loop: "{{ apps }}" -- name: install applications +- name: Install applications community.general.pipx: name: "{{ item.name }}" source: "{{ item.source | default(omit) }}" loop: "{{ apps }}" -- name: inject packages +- name: Inject packages community.general.pipx: state: inject name: "{{ item.name }}" @@ -92,31 +98,31 @@ when: "'inject_packages' in item" loop: "{{ apps }}" -- name: retrieve applications +- name: Retrieve applications community.general.pipx_info: {} register: info2_all -- name: retrieve applications (include_deps=true) +- name: Retrieve applications (include_deps=true) community.general.pipx_info: include_deps: true include_injected: true register: info2_all_deps -- name: retrieve application pylint +- name: Retrieve application pylint community.general.pipx_info: name: pylint include_deps: true include_injected: true register: info2_lint -- name: ensure applications are uninstalled +- name: Ensure applications are uninstalled community.general.pipx: name: "{{ item.name }}" state: absent loop: "{{ apps }}" -- name: check assertions multiple apps - assert: +- name: Check assertions multiple apps + ansible.builtin.assert: that: - all_apps|length == 2 - all_apps[1].name == "tox" @@ -135,6 +141,6 @@ - all_apps_deps|length == 2 - lint[0] == all_apps_deps[0] vars: - all_apps: "{{ info2_all.application|sort(attribute='name') }}" + all_apps: "{{ info2_all.application | sort(attribute='name') }}" all_apps_deps: "{{ info2_all_deps.application | sort(attribute='name') }}" lint: "{{ info2_lint.application | sort(attribute='name') }}" diff --git a/tests/integration/targets/pkgng/tasks/freebsd.yml b/tests/integration/targets/pkgng/tasks/freebsd.yml index 612e7c4d42..b51ad836bd 100644 --- a/tests/integration/targets/pkgng/tasks/freebsd.yml +++ b/tests/integration/targets/pkgng/tasks/freebsd.yml @@ -469,8 +469,10 @@ - '(pkgng_example8_invalid_annotation_failure.results | selectattr("changed") | list | count) == 0' # Invalid strings should always fail - '(pkgng_example8_invalid_annotation_failure.results | rejectattr("failed") | list | count) == 0' - # Invalid strings should not cause an exception - - '(pkgng_example8_invalid_annotation_failure.results | selectattr("exception", "defined") | list | count) == 0' + # Invalid strings should not cause a module crash + - '(pkgng_example8_invalid_annotation_failure.results | selectattr("module_stdout", "defined") | list | count) == 0' + # Invalid strings should have a proper msg field + - '(pkgng_example8_invalid_annotation_failure.results | selectattr("msg", "match", "failed to annotate zsh, invalid annotate string: .*") | list | count) == 4' # Verify annotations are unaffected - '(pkgng_example8_invalid_annotation_verify.stdout_lines | select("search", "(naked_string|invalid_operation|empty_first_tag|validsecond|notag)") | list | count) == 0' @@ -521,18 +523,25 @@ # NOTE: FreeBSD 13.4 fails to update the package catalogue for unknown reasons (someone with FreeBSD # knowledge has to take a look) # + # NOTE: FreeBSD 13.5 fails to update the package catalogue for unknown reasons (someone with FreeBSD + # knowledge has to take a look) + # # NOTE: FreeBSD 14.0 fails to update the package catalogue for unknown reasons (someone with FreeBSD # knowledge has to take a look) # # NOTE: FreeBSD 14.1 fails to update the package catalogue for unknown reasons (someone with FreeBSD # knowledge has to take a look) # + # NOTE: FreeBSD 14.2 fails as well (someone with FreeBSD knowledge has to take a look) + # + # NOTE: FreeBSD 14.3 fails as well (someone with FreeBSD knowledge has to take a look) + # # See also # https://github.com/ansible-collections/community.general/issues/5795 when: >- (ansible_distribution_version is version('12.01', '>=') and ansible_distribution_version is version('12.3', '<')) - or (ansible_distribution_version is version('13.5', '>=') and ansible_distribution_version is version('14.0', '<')) - or ansible_distribution_version is version('14.2', '>=') + or (ansible_distribution_version is version('13.6', '>=') and ansible_distribution_version is version('14.0', '<')) + or ansible_distribution_version is version('14.4', '>=') block: - name: Setup testjail include_tasks: setup-testjail.yml diff --git a/tests/integration/targets/pkgutil/tasks/main.yml b/tests/integration/targets/pkgutil/tasks/main.yml index 8ceb4adcc3..e7f665efbf 100644 --- a/tests/integration/targets/pkgutil/tasks/main.yml +++ b/tests/integration/targets/pkgutil/tasks/main.yml @@ -24,7 +24,7 @@ - name: Verify cm_add_package assert: that: - - cm_add_package is changed + - cm_add_package is changed - name: Add package (normal mode) pkgutil: @@ -35,7 +35,7 @@ - name: Verify nm_add_package assert: that: - - nm_add_package is changed + - nm_add_package is changed - name: Add package again (check_mode) pkgutil: @@ -47,7 +47,7 @@ - name: Verify cm_add_package_again assert: that: - - cm_add_package_again is not changed + - cm_add_package_again is not changed - name: Add package again (normal mode) pkgutil: @@ -58,7 +58,7 @@ - name: Verify nm_add_package_again assert: that: - - nm_add_package_again is not changed + - nm_add_package_again is not changed # REMOVE PACKAGE @@ -72,7 +72,7 @@ - name: Verify cm_remove_package assert: that: - - cm_remove_package is changed + - cm_remove_package is changed - name: Remove package (normal mode) pkgutil: @@ -83,7 +83,7 @@ - name: Verify nm_remove_package assert: that: - - nm_remove_package is changed + - nm_remove_package is changed - name: Remove package again (check_mode) pkgutil: @@ -95,7 +95,7 @@ - name: Verify cm_remove_package_again assert: that: - - cm_remove_package_again is not changed + - cm_remove_package_again is not changed - name: Remove package again (normal mode) pkgutil: @@ -106,7 +106,7 @@ - name: Verify nm_remove_package_again assert: that: - - nm_remove_package_again is not changed + - nm_remove_package_again is not changed # RESTORE ENVIRONMENT diff --git a/tests/integration/targets/proxmox/tasks/main.yml b/tests/integration/targets/proxmox/tasks/main.yml deleted file mode 100644 index 1ce9767b70..0000000000 --- a/tests/integration/targets/proxmox/tasks/main.yml +++ /dev/null @@ -1,614 +0,0 @@ -#################################################################### -# WARNING: These are designed specifically for Ansible tests # -# and should not be used as examples of how to write Ansible roles # -#################################################################### - -# Copyright (c) 2020, Tristan Le Guern -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: List domains - proxmox_domain_info: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - register: results - -- assert: - that: - - results is not changed - - results.proxmox_domains is defined - -- name: Retrieve info about pve - proxmox_domain_info: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - domain: pve - register: results - -- assert: - that: - - results is not changed - - results.proxmox_domains is defined - - results.proxmox_domains|length == 1 - - results.proxmox_domains[0].type == 'pve' - -- name: List groups - proxmox_group_info: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - register: results - -- assert: - that: - - results is not changed - - results.proxmox_groups is defined - -- name: List users - proxmox_user_info: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - register: results - -- assert: - that: - - results is not changed - - results.proxmox_users is defined - -- name: Retrieve info about api_user using name and domain - proxmox_user_info: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - user: "{{ user }}" - domain: "{{ domain }}" - register: results_user_domain - -- assert: - that: - - results_user_domain is not changed - - results_user_domain.proxmox_users is defined - - results_user_domain.proxmox_users|length == 1 - - results_user_domain.proxmox_users[0].domain == "{{ domain }}" - - results_user_domain.proxmox_users[0].user == "{{ user }}" - - results_user_domain.proxmox_users[0].userid == "{{ user }}@{{ domain }}" - -- name: Retrieve info about api_user using userid - proxmox_user_info: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - userid: "{{ user }}@{{ domain }}" - register: results_userid - -- assert: - that: - - results_userid is not changed - - results_userid.proxmox_users is defined - - results_userid.proxmox_users|length == 1 - - results_userid.proxmox_users[0].domain == "{{ domain }}" - - results_userid.proxmox_users[0].user == "{{ user }}" - - results_userid.proxmox_users[0].userid == "{{ user }}@{{ domain }}" - -- name: Retrieve info about storage - proxmox_storage_info: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - storage: "{{ storage }}" - register: results_storage - -- assert: - that: - - results_storage is not changed - - results_storage.proxmox_storages is defined - - results_storage.proxmox_storages|length == 1 - - results_storage.proxmox_storages[0].storage == "{{ storage }}" - -- name: List content on storage - proxmox_storage_contents_info: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - storage: "{{ storage }}" - node: "{{ node }}" - content: images - register: results_list_storage - -- assert: - that: - - results_storage is not changed - - results_storage.proxmox_storage_content is defined - - results_storage.proxmox_storage_content |length == 1 - -- name: VM creation - tags: [ 'create' ] - block: - - name: Create test vm test-instance - proxmox_kvm: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - node: "{{ node }}" - storage: "{{ storage }}" - vmid: "{{ from_vmid }}" - name: test-instance - clone: 'yes' - state: present - timeout: 500 - register: results_kvm - - - set_fact: - vmid: "{{ results_kvm.msg.split(' ')[-7] }}" - - - assert: - that: - - results_kvm is changed - - results_kvm.vmid == from_vmid - - results_kvm.msg == "VM test-instance with newid {{ vmid }} cloned from vm with vmid {{ from_vmid }}" - - - pause: - seconds: 30 - -- name: VM start - tags: [ 'start' ] - block: - - name: Start test VM - proxmox_kvm: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - node: "{{ node }}" - vmid: "{{ vmid }}" - state: started - register: results_action_start - - - assert: - that: - - results_action_start is changed - - results_action_start.status == 'stopped' - - results_action_start.vmid == {{ vmid }} - - results_action_start.msg == "VM {{ vmid }} started" - - - pause: - seconds: 90 - - - name: Try to start test VM again - proxmox_kvm: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - node: "{{ node }}" - vmid: "{{ vmid }}" - state: started - register: results_action_start_again - - - assert: - that: - - results_action_start_again is not changed - - results_action_start_again.status == 'running' - - results_action_start_again.vmid == {{ vmid }} - - results_action_start_again.msg == "VM {{ vmid }} is already running" - - - name: Check current status - proxmox_kvm: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - node: "{{ node }}" - vmid: "{{ vmid }}" - state: current - register: results_action_current - - - assert: - that: - - results_action_current is not changed - - results_action_current.status == 'running' - - results_action_current.vmid == {{ vmid }} - - results_action_current.msg == "VM test-instance with vmid = {{ vmid }} is running" - -- name: VM add/change/delete NIC - tags: [ 'nic' ] - block: - - name: Add NIC to test VM - proxmox_nic: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - vmid: "{{ vmid }}" - state: present - interface: net5 - bridge: vmbr0 - tag: 42 - register: results - - - assert: - that: - - results is changed - - results.vmid == {{ vmid }} - - results.msg == "Nic net5 updated on VM with vmid {{ vmid }}" - - - name: Update NIC no changes - proxmox_nic: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - vmid: "{{ vmid }}" - state: present - interface: net5 - bridge: vmbr0 - tag: 42 - register: results - - - assert: - that: - - results is not changed - - results.vmid == {{ vmid }} - - results.msg == "Nic net5 unchanged on VM with vmid {{ vmid }}" - - - name: Update NIC with changes - proxmox_nic: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - vmid: "{{ vmid }}" - state: present - interface: net5 - bridge: vmbr0 - tag: 24 - firewall: true - register: results - - - assert: - that: - - results is changed - - results.vmid == {{ vmid }} - - results.msg == "Nic net5 updated on VM with vmid {{ vmid }}" - - - name: Delete NIC - proxmox_nic: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - vmid: "{{ vmid }}" - state: absent - interface: net5 - register: results - - - assert: - that: - - results is changed - - results.vmid == {{ vmid }} - - results.msg == "Nic net5 deleted on VM with vmid {{ vmid }}" - -- name: Create new disk in VM - tags: ['create_disk'] - block: - - name: Add new disk (without force) to VM - proxmox_disk: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - vmid: "{{ vmid }}" - disk: "{{ disk }}" - storage: "{{ storage }}" - size: 1 - state: present - register: results - - - assert: - that: - - results is changed - - results.vmid == {{ vmid }} - - results.msg == "Disk {{ disk }} created in VM {{ vmid }}" - - - name: Try add disk again with same options (expect no-op) - proxmox_disk: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - vmid: "{{ vmid }}" - disk: "{{ disk }}" - storage: "{{ storage }}" - size: 1 - state: present - register: results - - - assert: - that: - - results is not changed - - results.vmid == {{ vmid }} - - results.msg == "Disk {{ disk }} is up to date in VM {{ vmid }}" - - - name: Add new disk replacing existing disk (detach old and leave unused) - proxmox_disk: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - vmid: "{{ vmid }}" - disk: "{{ disk }}" - storage: "{{ storage }}" - size: 2 - create: forced - state: present - register: results - - - assert: - that: - - results is changed - - results.vmid == {{ vmid }} - - results.msg == "Disk {{ disk }} created in VM {{ vmid }}" - -- name: Update existing disk in VM - tags: ['update_disk'] - block: - - name: Update disk configuration - proxmox_disk: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - vmid: "{{ vmid }}" - disk: "{{ disk }}" - backup: false - ro: true - aio: native - state: present - register: results - - - assert: - that: - - results is changed - - results.vmid == {{ vmid }} - - results.msg == "Disk {{ disk }} updated in VM {{ vmid }}" - -- name: Grow existing disk in VM - tags: ['grow_disk'] - block: - - name: Increase disk size - proxmox_disk: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - vmid: "{{ vmid }}" - disk: "{{ disk }}" - size: +1G - state: resized - register: results - - - assert: - that: - - results is changed - - results.vmid == {{ vmid }} - - results.msg == "Disk {{ disk }} resized in VM {{ vmid }}" - -- name: Detach disk and leave it unused - tags: ['detach_disk'] - block: - - name: Detach disk - proxmox_disk: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - vmid: "{{ vmid }}" - disk: "{{ disk }}" - state: detached - register: results - - - assert: - that: - - results is changed - - results.vmid == {{ vmid }} - - results.msg == "Disk {{ disk }} detached from VM {{ vmid }}" - -- name: Move disk to another storage or another VM - tags: ['move_disk'] - block: - - name: Move disk to another storage inside same VM - proxmox_disk: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - vmid: "{{ vmid }}" - disk: "{{ disk }}" - target_storage: "{{ target_storage }}" - format: "{{ target_format }}" - state: moved - register: results - - - assert: - that: - - results is changed - - results.vmid == {{ vmid }} - - results.msg == "Disk {{ disk }} moved from VM {{ vmid }} storage {{ results.storage }}" - - - name: Move disk to another VM (same storage) - proxmox_disk: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - vmid: "{{ vmid }}" - disk: "{{ disk }}" - target_vmid: "{{ target_vm }}" - target_disk: "{{ target_disk }}" - state: moved - register: results - - - assert: - that: - - results is changed - - results.vmid == {{ vmid }} - - results.msg == "Disk {{ disk }} moved from VM {{ vmid }} storage {{ results.storage }}" - - -- name: Remove disk permanently - tags: ['remove_disk'] - block: - - name: Remove disk - proxmox_disk: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - vmid: "{{ target_vm }}" - disk: "{{ target_disk }}" - state: absent - register: results - - - assert: - that: - - results is changed - - results.vmid == {{ target_vm }} - - results.msg == "Disk {{ target_disk }} removed from VM {{ target_vm }}" - -- name: VM stop - tags: [ 'stop' ] - block: - - name: Stop test VM - proxmox_kvm: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - node: "{{ node }}" - vmid: "{{ vmid }}" - state: stopped - register: results_action_stop - - - assert: - that: - - results_action_stop is changed - - results_action_stop.status == 'running' - - results_action_stop.vmid == {{ vmid }} - - results_action_stop.msg == "VM {{ vmid }} is shutting down" - - - pause: - seconds: 5 - - - name: Check current status again - proxmox_kvm: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - node: "{{ node }}" - vmid: "{{ vmid }}" - state: current - register: results_action_current - - - assert: - that: - - results_action_current is not changed - - results_action_current.status == 'stopped' - - results_action_current.vmid == {{ vmid }} - - results_action_current.msg == "VM test-instance with vmid = {{ vmid }} is stopped" - -- name: VM destroy - tags: [ 'destroy' ] - block: - - name: Destroy test VM - proxmox_kvm: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - node: "{{ node }}" - vmid: "{{ vmid }}" - state: absent - register: results_kvm_destroy - - - assert: - that: - - results_kvm_destroy is changed - - results_kvm_destroy.vmid == {{ vmid }} - - results_kvm_destroy.msg == "VM {{ vmid }} removed" - -- name: Retrieve information about nodes - proxmox_node_info: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - register: results - -- assert: - that: - - results is not changed - - results.proxmox_nodes is defined - - results.proxmox_nodes|length >= 1 - - results.proxmox_nodes[0].type == 'node' diff --git a/tests/integration/targets/proxmox_pool/tasks/main.yml b/tests/integration/targets/proxmox_pool/tasks/main.yml deleted file mode 100644 index 2b22960f2c..0000000000 --- a/tests/integration/targets/proxmox_pool/tasks/main.yml +++ /dev/null @@ -1,220 +0,0 @@ -#################################################################### -# WARNING: These are designed specifically for Ansible tests # -# and should not be used as examples of how to write Ansible roles # -#################################################################### - -# Copyright (c) 2023, Sergei Antipov -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Proxmox VE pool and pool membership management - tags: ["pool"] - block: - - name: Make sure poolid parameter is not missing - proxmox_pool: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - ignore_errors: true - register: result - - - assert: - that: - - result is failed - - "'missing required arguments: poolid' in result.msg" - - - name: Create pool (Check) - proxmox_pool: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - poolid: "{{ poolid }}" - check_mode: true - register: result - - - assert: - that: - - result is changed - - result is success - - - name: Create pool - proxmox_pool: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - poolid: "{{ poolid }}" - register: result - - - assert: - that: - - result is changed - - result is success - - result.poolid == "{{ poolid }}" - - - name: Delete pool (Check) - proxmox_pool: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - poolid: "{{ poolid }}" - state: absent - check_mode: true - register: result - - - assert: - that: - - result is changed - - result is success - - - name: Delete non-existing pool should do nothing - proxmox_pool: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - poolid: "non-existing-poolid" - state: absent - register: result - - - assert: - that: - - result is not changed - - result is success - - - name: Deletion of non-empty pool fails - block: - - name: Add storage into pool - proxmox_pool_member: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - poolid: "{{ poolid }}" - member: "{{ member }}" - type: "{{ member_type }}" - diff: true - register: result - - - assert: - that: - - result is changed - - result is success - - "'{{ member }}' in result.diff.after.members" - - - name: Add non-existing storage into pool should fail - proxmox_pool_member: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - poolid: "{{ poolid }}" - member: "non-existing-storage" - type: "{{ member_type }}" - ignore_errors: true - register: result - - - assert: - that: - - result is failed - - "'Storage non-existing-storage doesn\\'t exist in the cluster' in result.msg" - - - name: Delete non-empty pool - proxmox_pool: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - poolid: "{{ poolid }}" - state: absent - ignore_errors: true - register: result - - - assert: - that: - - result is failed - - "'Please remove members from pool first.' in result.msg" - - - name: Delete storage from the pool - proxmox_pool_member: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - poolid: "{{ poolid }}" - member: "{{ member }}" - type: "{{ member_type }}" - state: absent - register: result - - - assert: - that: - - result is success - - result is changed - - rescue: - - name: Delete storage from the pool if it is added - proxmox_pool_member: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - poolid: "{{ poolid }}" - member: "{{ member }}" - type: "{{ member_type }}" - state: absent - ignore_errors: true - - - name: Delete pool - proxmox_pool: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - poolid: "{{ poolid }}" - state: absent - register: result - - - assert: - that: - - result is changed - - result is success - - result.poolid == "{{ poolid }}" - - rescue: - - name: Delete test pool if it is created - proxmox_pool: - api_host: "{{ api_host }}" - api_user: "{{ user }}@{{ domain }}" - api_password: "{{ api_password | default(omit) }}" - api_token_id: "{{ api_token_id | default(omit) }}" - api_token_secret: "{{ api_token_secret | default(omit) }}" - validate_certs: "{{ validate_certs }}" - poolid: "{{ poolid }}" - state: absent - ignore_errors: true diff --git a/tests/integration/targets/proxmox_template/tasks/main.yml b/tests/integration/targets/proxmox_template/tasks/main.yml deleted file mode 100644 index 2d1187e890..0000000000 --- a/tests/integration/targets/proxmox_template/tasks/main.yml +++ /dev/null @@ -1,136 +0,0 @@ -#################################################################### -# WARNING: These are designed specifically for Ansible tests # -# and should not be used as examples of how to write Ansible roles # -#################################################################### - -# Copyright (c) 2023, Sergei Antipov -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Proxmox VE virtual machines templates management - tags: ['template'] - vars: - filename: /tmp/dummy.iso - block: - - name: Create dummy ISO file - ansible.builtin.command: - cmd: 'truncate -s 300M {{ filename }}' - - - name: Delete requests_toolbelt module if it is installed - ansible.builtin.pip: - name: requests_toolbelt - state: absent - - - name: Install latest proxmoxer - ansible.builtin.pip: - name: proxmoxer - state: latest - - - name: Upload ISO as template to Proxmox VE cluster should fail - proxmox_template: - api_host: '{{ api_host }}' - api_user: '{{ user }}@{{ domain }}' - api_password: '{{ api_password | default(omit) }}' - api_token_id: '{{ api_token_id | default(omit) }}' - api_token_secret: '{{ api_token_secret | default(omit) }}' - validate_certs: '{{ validate_certs }}' - node: '{{ node }}' - src: '{{ filename }}' - content_type: iso - force: true - register: result - ignore_errors: true - - - assert: - that: - - result is failed - - result.msg is match('\'requests_toolbelt\' module is required to upload files larger than 256MB') - - - name: Install old (1.1.2) version of proxmoxer - ansible.builtin.pip: - name: proxmoxer==1.1.1 - state: present - - - name: Upload ISO as template to Proxmox VE cluster should be successful - proxmox_template: - api_host: '{{ api_host }}' - api_user: '{{ user }}@{{ domain }}' - api_password: '{{ api_password | default(omit) }}' - api_token_id: '{{ api_token_id | default(omit) }}' - api_token_secret: '{{ api_token_secret | default(omit) }}' - validate_certs: '{{ validate_certs }}' - node: '{{ node }}' - src: '{{ filename }}' - content_type: iso - force: true - register: result - - - assert: - that: - - result is changed - - result is success - - result.msg is match('template with volid=local:iso/dummy.iso uploaded') - - - name: Install latest proxmoxer - ansible.builtin.pip: - name: proxmoxer - state: latest - - - name: Make smaller dummy file - ansible.builtin.command: - cmd: 'truncate -s 128M {{ filename }}' - - - name: Upload ISO as template to Proxmox VE cluster should be successful - proxmox_template: - api_host: '{{ api_host }}' - api_user: '{{ user }}@{{ domain }}' - api_password: '{{ api_password | default(omit) }}' - api_token_id: '{{ api_token_id | default(omit) }}' - api_token_secret: '{{ api_token_secret | default(omit) }}' - validate_certs: '{{ validate_certs }}' - node: '{{ node }}' - src: '{{ filename }}' - content_type: iso - force: true - register: result - - - assert: - that: - - result is changed - - result is success - - result.msg is match('template with volid=local:iso/dummy.iso uploaded') - - - name: Install requests_toolbelt - ansible.builtin.pip: - name: requests_toolbelt - state: present - - - name: Make big dummy file - ansible.builtin.command: - cmd: 'truncate -s 300M {{ filename }}' - - - name: Upload ISO as template to Proxmox VE cluster should be successful - proxmox_template: - api_host: '{{ api_host }}' - api_user: '{{ user }}@{{ domain }}' - api_password: '{{ api_password | default(omit) }}' - api_token_id: '{{ api_token_id | default(omit) }}' - api_token_secret: '{{ api_token_secret | default(omit) }}' - validate_certs: '{{ validate_certs }}' - node: '{{ node }}' - src: '{{ filename }}' - content_type: iso - force: true - register: result - - - assert: - that: - - result is changed - - result is success - - result.msg is match('template with volid=local:iso/dummy.iso uploaded') - - always: - - name: Delete ISO file from host - ansible.builtin.file: - path: '{{ filename }}' - state: absent diff --git a/tests/integration/targets/python_requirements_info/tasks/main.yml b/tests/integration/targets/python_requirements_info/tasks/main.yml index 24a7d1366a..cf0b9ad1d0 100644 --- a/tests/integration/targets/python_requirements_info/tasks/main.yml +++ b/tests/integration/targets/python_requirements_info/tasks/main.yml @@ -8,6 +8,12 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later +- name: Make sure setuptools is installed + pip: + name: setuptools + state: present + when: ansible_facts.distribution == 'MacOSX' and ansible_distribution_version is version('15', '>=') + - name: run python_requirements_info module python_requirements_info: register: basic_info diff --git a/tests/integration/targets/read_csv/tasks/main.yml b/tests/integration/targets/read_csv/tasks/main.yml index c09349dd5b..f8b46a3e62 100644 --- a/tests/integration/targets/read_csv/tasks/main.yml +++ b/tests/integration/targets/read_csv/tasks/main.yml @@ -26,14 +26,14 @@ - assert: that: - - users_unique.dict.dag.name == 'dag' - - users_unique.dict.dag.gecos == 'Dag Wieërs' - - users_unique.dict.dag.uid == '500' - - users_unique.dict.dag.gid == '500' - - users_unique.dict.jeroen.name == 'jeroen' - - users_unique.dict.jeroen.gecos == 'Jeroen Hoekx' - - users_unique.dict.jeroen.uid == '501' - - users_unique.dict.jeroen.gid == '500' + - users_unique.dict.dag.name == 'dag' + - users_unique.dict.dag.gecos == 'Dag Wieërs' + - users_unique.dict.dag.uid == '500' + - users_unique.dict.dag.gid == '500' + - users_unique.dict.jeroen.name == 'jeroen' + - users_unique.dict.jeroen.gecos == 'Jeroen Hoekx' + - users_unique.dict.jeroen.uid == '501' + - users_unique.dict.jeroen.gid == '500' # Read a CSV file and access the first item - name: Read users from CSV file and return a list @@ -43,14 +43,14 @@ - assert: that: - - users_unique.list.0.name == 'dag' - - users_unique.list.0.gecos == 'Dag Wieërs' - - users_unique.list.0.uid == '500' - - users_unique.list.0.gid == '500' - - users_unique.list.1.name == 'jeroen' - - users_unique.list.1.gecos == 'Jeroen Hoekx' - - users_unique.list.1.uid == '501' - - users_unique.list.1.gid == '500' + - users_unique.list.0.name == 'dag' + - users_unique.list.0.gecos == 'Dag Wieërs' + - users_unique.list.0.uid == '500' + - users_unique.list.0.gid == '500' + - users_unique.list.1.name == 'jeroen' + - users_unique.list.1.gecos == 'Jeroen Hoekx' + - users_unique.list.1.uid == '501' + - users_unique.list.1.gid == '500' # Create basic CSV file using semi-colon @@ -74,14 +74,14 @@ - assert: that: - - users_nonunique.dict.dag.name == 'dag' - - users_nonunique.dict.dag.gecos == 'Dag Wieers' - - users_nonunique.dict.dag.uid == '502' - - users_nonunique.dict.dag.gid == '500' - - users_nonunique.dict.jeroen.name == 'jeroen' - - users_nonunique.dict.jeroen.gecos == 'Jeroen Hoekx' - - users_nonunique.dict.jeroen.uid == '501' - - users_nonunique.dict.jeroen.gid == '500' + - users_nonunique.dict.dag.name == 'dag' + - users_nonunique.dict.dag.gecos == 'Dag Wieers' + - users_nonunique.dict.dag.uid == '502' + - users_nonunique.dict.dag.gid == '500' + - users_nonunique.dict.jeroen.name == 'jeroen' + - users_nonunique.dict.jeroen.gecos == 'Jeroen Hoekx' + - users_nonunique.dict.jeroen.uid == '501' + - users_nonunique.dict.jeroen.gid == '500' # Read a CSV file using an non-existing dialect @@ -94,8 +94,8 @@ - assert: that: - - users_placebo is failed - - users_placebo.msg == "Dialect 'placebo' is not supported by your version of python." + - users_placebo is failed + - users_placebo.msg == "Dialect 'placebo' is not supported by your version of python." # Create basic CSV file without header @@ -116,14 +116,14 @@ - assert: that: - - users_noheader.dict.dag.name == 'dag' - - users_noheader.dict.dag.gecos == 'Dag Wieërs' - - users_noheader.dict.dag.uid == '500' - - users_noheader.dict.dag.gid == '500' - - users_noheader.dict.jeroen.name == 'jeroen' - - users_noheader.dict.jeroen.gecos == 'Jeroen Hoekx' - - users_noheader.dict.jeroen.uid == '501' - - users_noheader.dict.jeroen.gid == '500' + - users_noheader.dict.dag.name == 'dag' + - users_noheader.dict.dag.gecos == 'Dag Wieërs' + - users_noheader.dict.dag.uid == '500' + - users_noheader.dict.dag.gid == '500' + - users_noheader.dict.jeroen.name == 'jeroen' + - users_noheader.dict.jeroen.gecos == 'Jeroen Hoekx' + - users_noheader.dict.jeroen.uid == '501' + - users_noheader.dict.jeroen.gid == '500' # Create broken file @@ -146,8 +146,8 @@ - assert: that: - - users_broken is failed - - "'Unable to process file' in users_broken.msg" + - users_broken is failed + - "'Unable to process file' in users_broken.msg" # Create basic CSV file with BOM - name: Create unique CSV file with BOM @@ -166,11 +166,11 @@ - assert: that: - - users_bom.list.0.name == 'dag' - - users_bom.list.0.gecos == 'Dag Wieërs' - - users_bom.list.0.uid == '500' - - users_bom.list.0.gid == '500' - - users_bom.list.1.name == 'jeroen' - - users_bom.list.1.gecos == 'Jeroen Hoekx' - - users_bom.list.1.uid == '501' - - users_bom.list.1.gid == '500' + - users_bom.list.0.name == 'dag' + - users_bom.list.0.gecos == 'Dag Wieërs' + - users_bom.list.0.uid == '500' + - users_bom.list.0.gid == '500' + - users_bom.list.1.name == 'jeroen' + - users_bom.list.1.gecos == 'Jeroen Hoekx' + - users_bom.list.1.uid == '501' + - users_bom.list.1.gid == '500' diff --git a/tests/integration/targets/redis_info/meta/main.yml b/tests/integration/targets/redis_info/meta/main.yml index cd516fd239..404b9ba31c 100644 --- a/tests/integration/targets/redis_info/meta/main.yml +++ b/tests/integration/targets/redis_info/meta/main.yml @@ -4,4 +4,4 @@ # SPDX-License-Identifier: GPL-3.0-or-later dependencies: -- setup_redis_replication + - setup_redis_replication diff --git a/tests/integration/targets/redis_info/tasks/main.yml b/tests/integration/targets/redis_info/tasks/main.yml index 4a11de3650..52263ecc4f 100644 --- a/tests/integration/targets/redis_info/tasks/main.yml +++ b/tests/integration/targets/redis_info/tasks/main.yml @@ -14,10 +14,10 @@ - assert: that: - - result is not changed - - result.info is defined - - result.info.tcp_port == master_port - - result.info.role == 'master' + - result is not changed + - result.info is defined + - result.info.tcp_port == master_port + - result.info.role == 'master' - name: redis_info - connect to master (check) community.general.redis_info: @@ -29,10 +29,10 @@ - assert: that: - - result is not changed - - result.info is defined - - result.info.tcp_port == master_port - - result.info.role == 'master' + - result is not changed + - result.info is defined + - result.info.tcp_port == master_port + - result.info.role == 'master' - name: redis_info - connect to replica community.general.redis_info: @@ -42,7 +42,7 @@ - assert: that: - - result is not changed - - result.info is defined - - result.info.tcp_port == replica_port - - result.info.role == 'slave' + - result is not changed + - result.info is defined + - result.info.tcp_port == replica_port + - result.info.role == 'slave' diff --git a/tests/integration/targets/rundeck/defaults/main.yml b/tests/integration/targets/rundeck/defaults/main.yml index 4d7ea31468..503f627857 100644 --- a/tests/integration/targets/rundeck/defaults/main.yml +++ b/tests/integration/targets/rundeck/defaults/main.yml @@ -6,3 +6,32 @@ rundeck_url: http://localhost:4440 rundeck_api_version: 39 rundeck_job_id: 3b8a6e54-69fb-42b7-b98f-f82e59238478 + +system_acl_policy: | + description: Test ACL + context: + application: 'rundeck' + for: + project: + - allow: + - read + by: + group: + - users + +project_acl_policy: | + description: Test project acl + for: + resource: + - equals: + kind: node + allow: [read,refresh] + - equals: + kind: event + allow: [read] + job: + - allow: [run,kill] + node: + - allow: [read,run] + by: + group: users diff --git a/tests/integration/targets/rundeck/files/test_job.yaml b/tests/integration/targets/rundeck/files/test_job.yaml index baa852ecce..073e11fd19 100644 --- a/tests/integration/targets/rundeck/files/test_job.yaml +++ b/tests/integration/targets/rundeck/files/test_job.yaml @@ -11,18 +11,18 @@ name: test_job nodeFilterEditable: false options: - - label: Exit Code - name: exit_code - value: '0' - - label: Sleep - name: sleep - value: '1' + - label: Exit Code + name: exit_code + value: '0' + - label: Sleep + name: sleep + value: '1' plugins: ExecutionLifecycle: null scheduleEnabled: true sequence: commands: - - exec: sleep $RD_OPTION_SLEEP && echo "Test done!" && exit $RD_OPTION_EXIT_CODE + - exec: sleep $RD_OPTION_SLEEP && echo "Test done!" && exit $RD_OPTION_EXIT_CODE keepgoing: false strategy: node-first uuid: 3b8a6e54-69fb-42b7-b98f-f82e59238478 diff --git a/tests/integration/targets/rundeck/meta/main.yml b/tests/integration/targets/rundeck/meta/main.yml index c125e4046a..31dd1cfb9d 100644 --- a/tests/integration/targets/rundeck/meta/main.yml +++ b/tests/integration/targets/rundeck/meta/main.yml @@ -4,4 +4,4 @@ # SPDX-License-Identifier: GPL-3.0-or-later dependencies: -- setup_rundeck + - setup_rundeck diff --git a/tests/integration/targets/rundeck/tasks/main.yml b/tests/integration/targets/rundeck/tasks/main.yml index e42780b9b7..7762832d10 100644 --- a/tests/integration/targets/rundeck/tasks/main.yml +++ b/tests/integration/targets/rundeck/tasks/main.yml @@ -15,6 +15,9 @@ RD_USER: admin RD_PASSWORD: admin register: rundeck_api_token + retries: 3 + until: rundeck_api_token.rc == 0 + changed_when: true - name: Create a Rundeck project community.general.rundeck_project: @@ -24,6 +27,71 @@ token: "{{ rundeck_api_token.stdout_lines[-1] }}" state: present +- name: Create a system ACL + community.general.rundeck_acl_policy: + name: test_acl + api_version: "{{ rundeck_api_version }}" + url: "{{ rundeck_url }}" + token: "{{ rundeck_api_token.stdout_lines[-1] }}" + state: present + policy: "{{ system_acl_policy }}" + +- name: Create a project ACL + community.general.rundeck_acl_policy: + name: test_acl + api_version: "{{ rundeck_api_version }}" + url: "{{ rundeck_url }}" + token: "{{ rundeck_api_token.stdout_lines[-1] }}" + state: present + policy: "{{ project_acl_policy }}" + project: test_project + +- name: Retrieve ACLs + ansible.builtin.uri: + url: "{{ rundeck_url }}/api/{{ rundeck_api_version }}/{{ item }}" + headers: + accept: application/json + x-rundeck-auth-token: "{{ rundeck_api_token.stdout_lines[-1] }}" + register: acl_policy_check + loop: + - system/acl/test_acl.aclpolicy + - project/test_project/acl/test_acl.aclpolicy + +- name: Assert ACL content is correct + ansible.builtin.assert: + that: + - acl_policy_check['results'][0]['json']['contents'] == system_acl_policy + - acl_policy_check['results'][1]['json']['contents'] == project_acl_policy + +- name: Remove system ACL + community.general.rundeck_acl_policy: + name: test_acl + api_version: "{{ rundeck_api_version }}" + url: "{{ rundeck_url }}" + token: "{{ rundeck_api_token.stdout_lines[-1] }}" + state: absent + +- name: Remove project ACL + community.general.rundeck_acl_policy: + name: test_acl + api_version: "{{ rundeck_api_version }}" + url: "{{ rundeck_url }}" + token: "{{ rundeck_api_token.stdout_lines[-1] }}" + state: absent + project: test_project + +- name: Check that ACLs have been removed + ansible.builtin.uri: + url: "{{ rundeck_url }}/api/{{ rundeck_api_version }}/{{ item }}" + headers: + accept: application/json + x-rundeck-auth-token: "{{ rundeck_api_token.stdout_lines[-1] }}" + status_code: + - 404 + loop: + - system/acl/test_acl.aclpolicy + - project/test_project/acl/test_acl.aclpolicy + - name: Copy test_job definition to /tmp copy: src: test_job.yaml diff --git a/tests/integration/targets/scaleway_compute/tasks/security_group.yml b/tests/integration/targets/scaleway_compute/tasks/security_group.yml index 59f81e6af1..971fae6076 100644 --- a/tests/integration/targets/scaleway_compute/tasks/security_group.yml +++ b/tests/integration/targets/scaleway_compute/tasks/security_group.yml @@ -19,105 +19,105 @@ - debug: var=security_group - block: - - name: Create a server with security_group (Check) - check_mode: true - scaleway_compute: - name: '{{ scaleway_name }}' - state: present - image: '{{ scaleway_image_id }}' - organization: '{{ scaleway_organization }}' - region: '{{ scaleway_region }}' - commercial_type: '{{ scaleway_commerial_type }}' - security_group: '{{ security_group.scaleway_security_group.id }}' + - name: Create a server with security_group (Check) + check_mode: true + scaleway_compute: + name: '{{ scaleway_name }}' + state: present + image: '{{ scaleway_image_id }}' + organization: '{{ scaleway_organization }}' + region: '{{ scaleway_region }}' + commercial_type: '{{ scaleway_commerial_type }}' + security_group: '{{ security_group.scaleway_security_group.id }}' - register: server_creation_check_task + register: server_creation_check_task - - debug: var=server_creation_check_task + - debug: var=server_creation_check_task - - assert: - that: - - server_creation_check_task is success - - server_creation_check_task is changed + - assert: + that: + - server_creation_check_task is success + - server_creation_check_task is changed - - name: Create a server - scaleway_compute: - name: '{{ scaleway_name }}' - state: present - image: '{{ scaleway_image_id }}' - organization: '{{ scaleway_organization }}' - region: '{{ scaleway_region }}' - commercial_type: '{{ scaleway_commerial_type }}' - security_group: '{{ security_group.scaleway_security_group.id }}' - wait: true + - name: Create a server + scaleway_compute: + name: '{{ scaleway_name }}' + state: present + image: '{{ scaleway_image_id }}' + organization: '{{ scaleway_organization }}' + region: '{{ scaleway_region }}' + commercial_type: '{{ scaleway_commerial_type }}' + security_group: '{{ security_group.scaleway_security_group.id }}' + wait: true - register: server_creation_task + register: server_creation_task - - debug: var=server_creation_task + - debug: var=server_creation_task - - assert: - that: - - server_creation_task is success - - server_creation_task is changed + - assert: + that: + - server_creation_task is success + - server_creation_task is changed - - name: Create a server with security_group (Confirmation) - scaleway_compute: - name: '{{ scaleway_name }}' - state: present - image: '{{ scaleway_image_id }}' - organization: '{{ scaleway_organization }}' - region: '{{ scaleway_region }}' - commercial_type: '{{ scaleway_commerial_type }}' - security_group: '{{ security_group.scaleway_security_group.id }}' - wait: true + - name: Create a server with security_group (Confirmation) + scaleway_compute: + name: '{{ scaleway_name }}' + state: present + image: '{{ scaleway_image_id }}' + organization: '{{ scaleway_organization }}' + region: '{{ scaleway_region }}' + commercial_type: '{{ scaleway_commerial_type }}' + security_group: '{{ security_group.scaleway_security_group.id }}' + wait: true - register: server_creation_confirmation_task + register: server_creation_confirmation_task - - debug: var=server_creation_confirmation_task + - debug: var=server_creation_confirmation_task - - assert: - that: - - server_creation_confirmation_task is success - - server_creation_confirmation_task is not changed + - assert: + that: + - server_creation_confirmation_task is success + - server_creation_confirmation_task is not changed - - name: Keep current security_group (Check) - check_mode: true - scaleway_compute: - name: '{{ scaleway_name }}' - state: present - image: '{{ scaleway_image_id }}' - organization: '{{ scaleway_organization }}' - region: '{{ scaleway_region }}' - commercial_type: '{{ scaleway_commerial_type }}' - security_group: '{{ security_group.scaleway_security_group.id }}' - wait: true + - name: Keep current security_group (Check) + check_mode: true + scaleway_compute: + name: '{{ scaleway_name }}' + state: present + image: '{{ scaleway_image_id }}' + organization: '{{ scaleway_organization }}' + region: '{{ scaleway_region }}' + commercial_type: '{{ scaleway_commerial_type }}' + security_group: '{{ security_group.scaleway_security_group.id }}' + wait: true - register: server_creation_confirmation_task + register: server_creation_confirmation_task - - debug: var=server_creation_confirmation_task + - debug: var=server_creation_confirmation_task - - assert: - that: - - server_creation_confirmation_task is success - - server_creation_confirmation_task is not changed + - assert: + that: + - server_creation_confirmation_task is success + - server_creation_confirmation_task is not changed - - name: Keep current security_group - scaleway_compute: - name: '{{ scaleway_name }}' - state: present - image: '{{ scaleway_image_id }}' - organization: '{{ scaleway_organization }}' - region: '{{ scaleway_region }}' - commercial_type: '{{ scaleway_commerial_type }}' - wait: true + - name: Keep current security_group + scaleway_compute: + name: '{{ scaleway_name }}' + state: present + image: '{{ scaleway_image_id }}' + organization: '{{ scaleway_organization }}' + region: '{{ scaleway_region }}' + commercial_type: '{{ scaleway_commerial_type }}' + wait: true - register: server_creation_confirmation_task + register: server_creation_confirmation_task - - debug: var=server_creation_confirmation_task + - debug: var=server_creation_confirmation_task - - assert: - that: - - server_creation_confirmation_task is success - - server_creation_confirmation_task is not changed + - assert: + that: + - server_creation_confirmation_task is success + - server_creation_confirmation_task is not changed always: - name: Destroy it diff --git a/tests/integration/targets/scaleway_container_namespace/defaults/main.yml b/tests/integration/targets/scaleway_container_namespace/defaults/main.yml index 876f8b7a63..de5cb3005a 100644 --- a/tests/integration/targets/scaleway_container_namespace/defaults/main.yml +++ b/tests/integration/targets/scaleway_container_namespace/defaults/main.yml @@ -10,6 +10,6 @@ updated_description: Container namespace used for testing scaleway_container_nam environment_variables: MY_VAR: my_value secret_environment_variables: - MY_SECRET_VAR: my_secret_value + MY_SECRET_VAR: my_secret_value updated_secret_environment_variables: MY_SECRET_VAR: my_other_secret_value \ No newline at end of file diff --git a/tests/integration/targets/scaleway_security_group/tasks/main.yml b/tests/integration/targets/scaleway_security_group/tasks/main.yml index cab972ae50..40140e0b1f 100644 --- a/tests/integration/targets/scaleway_security_group/tasks/main.yml +++ b/tests/integration/targets/scaleway_security_group/tasks/main.yml @@ -31,69 +31,69 @@ - security_group_creation is changed - block: - - name: Create security group - scaleway_security_group: - state: present - region: '{{ scaleway_region }}' - name: security_group - description: 'my security group description' - organization: '{{ scaleway_organization }}' - stateful: false - inbound_default_policy: accept - outbound_default_policy: accept - organization_default: false - register: security_group_creation + - name: Create security group + scaleway_security_group: + state: present + region: '{{ scaleway_region }}' + name: security_group + description: 'my security group description' + organization: '{{ scaleway_organization }}' + stateful: false + inbound_default_policy: accept + outbound_default_policy: accept + organization_default: false + register: security_group_creation - - debug: var=security_group_creation + - debug: var=security_group_creation - - name: Ensure security groups facts is success - assert: - that: - - security_group_creation is success - - security_group_creation is changed + - name: Ensure security groups facts is success + assert: + that: + - security_group_creation is success + - security_group_creation is changed - - name: Create security group duplicate - scaleway_security_group: - state: present - region: '{{ scaleway_region }}' - name: security_group - description: 'my security group description' - organization: '{{ scaleway_organization }}' - stateful: false - inbound_default_policy: accept - outbound_default_policy: accept - organization_default: false - register: security_group_creation + - name: Create security group duplicate + scaleway_security_group: + state: present + region: '{{ scaleway_region }}' + name: security_group + description: 'my security group description' + organization: '{{ scaleway_organization }}' + stateful: false + inbound_default_policy: accept + outbound_default_policy: accept + organization_default: false + register: security_group_creation - - debug: var=security_group_creation + - debug: var=security_group_creation - - name: Ensure security groups duplicate facts is success - assert: - that: - - security_group_creation is success - - security_group_creation is not changed + - name: Ensure security groups duplicate facts is success + assert: + that: + - security_group_creation is success + - security_group_creation is not changed - - name: Delete security group check - check_mode: true - scaleway_security_group: - state: absent - region: '{{ scaleway_region }}' - name: security_group - description: 'my security group description' - organization: '{{ scaleway_organization }}' - stateful: false - inbound_default_policy: accept - outbound_default_policy: accept - organization_default: false - register: security_group_deletion + - name: Delete security group check + check_mode: true + scaleway_security_group: + state: absent + region: '{{ scaleway_region }}' + name: security_group + description: 'my security group description' + organization: '{{ scaleway_organization }}' + stateful: false + inbound_default_policy: accept + outbound_default_policy: accept + organization_default: false + register: security_group_deletion - - debug: var=security_group_deletion + - debug: var=security_group_deletion - - name: Ensure security groups delete check facts is success - assert: - that: - - security_group_deletion is success - - security_group_deletion is changed + - name: Ensure security groups delete check facts is success + assert: + that: + - security_group_deletion is success + - security_group_deletion is changed always: - name: Delete security group diff --git a/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml b/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml index 3839421955..a438e4be57 100644 --- a/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml +++ b/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml @@ -44,83 +44,83 @@ - security_group_rule_creation_task is changed - block: - - name: Create security_group_rule check - scaleway_security_group_rule: - state: present - region: '{{ scaleway_region }}' - protocol: '{{ protocol }}' - port: '{{ port }}' - ip_range: '{{ ip_range }}' - direction: '{{ direction }}' - action: '{{ action }}' - security_group: '{{ security_group.scaleway_security_group.id }}' - register: security_group_rule_creation_task + - name: Create security_group_rule check + scaleway_security_group_rule: + state: present + region: '{{ scaleway_region }}' + protocol: '{{ protocol }}' + port: '{{ port }}' + ip_range: '{{ ip_range }}' + direction: '{{ direction }}' + action: '{{ action }}' + security_group: '{{ security_group.scaleway_security_group.id }}' + register: security_group_rule_creation_task - - debug: var=security_group_rule_creation_task + - debug: var=security_group_rule_creation_task - - assert: - that: - - security_group_rule_creation_task is success - - security_group_rule_creation_task is changed + - assert: + that: + - security_group_rule_creation_task is success + - security_group_rule_creation_task is changed - - name: Create security_group_rule duplicate - scaleway_security_group_rule: - state: present - region: '{{ scaleway_region }}' - protocol: '{{ protocol }}' - port: '{{ port }}' - ip_range: '{{ ip_range }}' - direction: '{{ direction }}' - action: '{{ action }}' - security_group: '{{ security_group.scaleway_security_group.id }}' - register: security_group_rule_creation_task + - name: Create security_group_rule duplicate + scaleway_security_group_rule: + state: present + region: '{{ scaleway_region }}' + protocol: '{{ protocol }}' + port: '{{ port }}' + ip_range: '{{ ip_range }}' + direction: '{{ direction }}' + action: '{{ action }}' + security_group: '{{ security_group.scaleway_security_group.id }}' + register: security_group_rule_creation_task - - debug: var=security_group_rule_creation_task + - debug: var=security_group_rule_creation_task - - assert: - that: - - security_group_rule_creation_task is success - - security_group_rule_creation_task is not changed + - assert: + that: + - security_group_rule_creation_task is success + - security_group_rule_creation_task is not changed - - name: Delete security_group_rule check - check_mode: true - scaleway_security_group_rule: - state: absent - region: '{{ scaleway_region }}' - protocol: '{{ protocol }}' - port: '{{ port }}' - ip_range: '{{ ip_range }}' - direction: '{{ direction }}' - action: '{{ action }}' - security_group: '{{ security_group.scaleway_security_group.id }}' - register: security_group_rule_deletion_task + - name: Delete security_group_rule check + check_mode: true + scaleway_security_group_rule: + state: absent + region: '{{ scaleway_region }}' + protocol: '{{ protocol }}' + port: '{{ port }}' + ip_range: '{{ ip_range }}' + direction: '{{ direction }}' + action: '{{ action }}' + security_group: '{{ security_group.scaleway_security_group.id }}' + register: security_group_rule_deletion_task - - debug: var=security_group_rule_deletion_task + - debug: var=security_group_rule_deletion_task - - assert: - that: - - security_group_rule_deletion_task is success - - security_group_rule_deletion_task is changed + - assert: + that: + - security_group_rule_deletion_task is success + - security_group_rule_deletion_task is changed always: - - name: Delete security_group_rule check - scaleway_security_group_rule: - state: absent - region: '{{ scaleway_region }}' - protocol: '{{ protocol }}' - port: '{{ port }}' - ip_range: '{{ ip_range }}' - direction: '{{ direction }}' - action: '{{ action }}' - security_group: '{{ security_group.scaleway_security_group.id }}' - register: security_group_rule_deletion_task + - name: Delete security_group_rule check + scaleway_security_group_rule: + state: absent + region: '{{ scaleway_region }}' + protocol: '{{ protocol }}' + port: '{{ port }}' + ip_range: '{{ ip_range }}' + direction: '{{ direction }}' + action: '{{ action }}' + security_group: '{{ security_group.scaleway_security_group.id }}' + register: security_group_rule_deletion_task - - debug: var=security_group_rule_deletion_task + - debug: var=security_group_rule_deletion_task - - assert: - that: - - security_group_rule_deletion_task is success - - security_group_rule_deletion_task is changed + - assert: + that: + - security_group_rule_deletion_task is success + - security_group_rule_deletion_task is changed - name: Delete security_group_rule check scaleway_security_group_rule: @@ -142,83 +142,83 @@ - security_group_rule_deletion_task is not changed - block: - - name: Create security_group_rule with null check - scaleway_security_group_rule: - state: present - region: '{{ scaleway_region }}' - protocol: '{{ protocol }}' - port: null - ip_range: '{{ ip_range }}' - direction: '{{ direction }}' - action: '{{ action }}' - security_group: '{{ security_group.scaleway_security_group.id }}' - register: security_group_rule_creation_task + - name: Create security_group_rule with null check + scaleway_security_group_rule: + state: present + region: '{{ scaleway_region }}' + protocol: '{{ protocol }}' + port: null + ip_range: '{{ ip_range }}' + direction: '{{ direction }}' + action: '{{ action }}' + security_group: '{{ security_group.scaleway_security_group.id }}' + register: security_group_rule_creation_task - - debug: var=security_group_rule_creation_task + - debug: var=security_group_rule_creation_task - - assert: - that: - - security_group_rule_creation_task is success - - security_group_rule_creation_task is changed + - assert: + that: + - security_group_rule_creation_task is success + - security_group_rule_creation_task is changed - - name: Create security_group_rule with null duplicate - scaleway_security_group_rule: - state: present - region: '{{ scaleway_region }}' - protocol: '{{ protocol }}' - port: null - ip_range: '{{ ip_range }}' - direction: '{{ direction }}' - action: '{{ action }}' - security_group: '{{ security_group.scaleway_security_group.id }}' - register: security_group_rule_creation_task + - name: Create security_group_rule with null duplicate + scaleway_security_group_rule: + state: present + region: '{{ scaleway_region }}' + protocol: '{{ protocol }}' + port: null + ip_range: '{{ ip_range }}' + direction: '{{ direction }}' + action: '{{ action }}' + security_group: '{{ security_group.scaleway_security_group.id }}' + register: security_group_rule_creation_task - - debug: var=security_group_rule_creation_task + - debug: var=security_group_rule_creation_task - - assert: - that: - - security_group_rule_creation_task is success - - security_group_rule_creation_task is not changed + - assert: + that: + - security_group_rule_creation_task is success + - security_group_rule_creation_task is not changed - - name: Delete security_group_rule with null check - check_mode: true - scaleway_security_group_rule: - state: absent - region: '{{ scaleway_region }}' - protocol: '{{ protocol }}' - port: null - ip_range: '{{ ip_range }}' - direction: '{{ direction }}' - action: '{{ action }}' - security_group: '{{ security_group.scaleway_security_group.id }}' - register: security_group_rule_deletion_task + - name: Delete security_group_rule with null check + check_mode: true + scaleway_security_group_rule: + state: absent + region: '{{ scaleway_region }}' + protocol: '{{ protocol }}' + port: null + ip_range: '{{ ip_range }}' + direction: '{{ direction }}' + action: '{{ action }}' + security_group: '{{ security_group.scaleway_security_group.id }}' + register: security_group_rule_deletion_task - - debug: var=security_group_rule_deletion_task + - debug: var=security_group_rule_deletion_task - - assert: - that: - - security_group_rule_deletion_task is success - - security_group_rule_deletion_task is changed + - assert: + that: + - security_group_rule_deletion_task is success + - security_group_rule_deletion_task is changed always: - - name: Delete security_group_rule with null check - scaleway_security_group_rule: - state: absent - region: '{{ scaleway_region }}' - protocol: '{{ protocol }}' - port: null - ip_range: '{{ ip_range }}' - direction: '{{ direction }}' - action: '{{ action }}' - security_group: '{{ security_group.scaleway_security_group.id }}' - register: security_group_rule_deletion_task + - name: Delete security_group_rule with null check + scaleway_security_group_rule: + state: absent + region: '{{ scaleway_region }}' + protocol: '{{ protocol }}' + port: null + ip_range: '{{ ip_range }}' + direction: '{{ direction }}' + action: '{{ action }}' + security_group: '{{ security_group.scaleway_security_group.id }}' + register: security_group_rule_deletion_task - - debug: var=security_group_rule_deletion_task + - debug: var=security_group_rule_deletion_task - - assert: - that: - - security_group_rule_deletion_task is success - - security_group_rule_deletion_task is changed + - assert: + that: + - security_group_rule_deletion_task is success + - security_group_rule_deletion_task is changed - name: Delete security_group_rule with null check scaleway_security_group_rule: diff --git a/tests/integration/targets/sefcontext/tasks/sefcontext.yml b/tests/integration/targets/sefcontext/tasks/sefcontext.yml index 258f1ace91..ae815a22f8 100644 --- a/tests/integration/targets/sefcontext/tasks/sefcontext.yml +++ b/tests/integration/targets/sefcontext/tasks/sefcontext.yml @@ -38,8 +38,8 @@ - assert: that: - - first is changed - - first.setype == 'httpd_sys_content_t' + - first is changed + - first.setype == 'httpd_sys_content_t' - name: Set SELinux file context of foo/bar (again) sefcontext: @@ -51,8 +51,8 @@ - assert: that: - - second is not changed - - second.setype == 'httpd_sys_content_t' + - second is not changed + - second.setype == 'httpd_sys_content_t' - name: Change SELinux file context of foo/bar sefcontext: @@ -64,8 +64,8 @@ - assert: that: - - third is changed - - third.setype == 'unlabeled_t' + - third is changed + - third.setype == 'unlabeled_t' - name: Change SELinux file context of foo/bar (again) sefcontext: @@ -77,8 +77,8 @@ - assert: that: - - fourth is not changed - - fourth.setype == 'unlabeled_t' + - fourth is not changed + - fourth.setype == 'unlabeled_t' - name: Delete SELinux file context of foo/bar sefcontext: @@ -90,8 +90,8 @@ - assert: that: - - fifth is changed - - fifth.setype == 'httpd_sys_content_t' + - fifth is changed + - fifth.setype == 'httpd_sys_content_t' - name: Delete SELinux file context of foo/bar (again) sefcontext: @@ -103,8 +103,8 @@ - assert: that: - - sixth is not changed - - sixth.setype == 'unlabeled_t' + - sixth is not changed + - sixth.setype == 'unlabeled_t' - name: Set SELinux file context path substitution of foo sefcontext: @@ -116,8 +116,8 @@ - assert: that: - - subst_first is changed - - subst_first.substitute == '/home' + - subst_first is changed + - subst_first.substitute == '/home' - name: Set SELinux file context path substitution of foo (again) sefcontext: @@ -129,8 +129,8 @@ - assert: that: - - subst_second is not changed - - subst_second.substitute == '/home' + - subst_second is not changed + - subst_second.substitute == '/home' - name: Change SELinux file context path substitution of foo sefcontext: @@ -142,8 +142,8 @@ - assert: that: - - subst_third is changed - - subst_third.substitute == '/boot' + - subst_third is changed + - subst_third.substitute == '/boot' - name: Change SELinux file context path substitution of foo (again) sefcontext: @@ -155,8 +155,8 @@ - assert: that: - - subst_fourth is not changed - - subst_fourth.substitute == '/boot' + - subst_fourth is not changed + - subst_fourth.substitute == '/boot' - name: Try to delete non-existing SELinux file context path substitution of foo sefcontext: @@ -168,8 +168,8 @@ - assert: that: - - subst_fifth is not changed - - subst_fifth.substitute == '/dev' + - subst_fifth is not changed + - subst_fifth.substitute == '/dev' - name: Delete SELinux file context path substitution of foo sefcontext: @@ -181,8 +181,8 @@ - assert: that: - - subst_sixth is changed - - subst_sixth.substitute == '/boot' + - subst_sixth is changed + - subst_sixth.substitute == '/boot' - name: Delete SELinux file context path substitution of foo (again) sefcontext: @@ -194,8 +194,8 @@ - assert: that: - - subst_seventh is not changed - - subst_seventh.substitute == '/boot' + - subst_seventh is not changed + - subst_seventh.substitute == '/boot' - name: Set SELinux file context path substitution of foo sefcontext: @@ -207,8 +207,8 @@ - assert: that: - - subst_eighth is changed - - subst_eighth.substitute == '/home' + - subst_eighth is changed + - subst_eighth.substitute == '/home' - name: Delete SELinux file context path substitution of foo sefcontext: @@ -219,7 +219,7 @@ - assert: that: - - subst_ninth is changed + - subst_ninth is changed - name: Delete SELinux file context path substitution of foo (again) sefcontext: @@ -230,4 +230,4 @@ - assert: that: - - subst_tenth is not changed + - subst_tenth is not changed diff --git a/tests/integration/targets/setup_apache2/tasks/main.yml b/tests/integration/targets/setup_apache2/tasks/main.yml new file mode 100644 index 0000000000..58651d2ce8 --- /dev/null +++ b/tests/integration/targets/setup_apache2/tasks/main.yml @@ -0,0 +1,30 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Install apache via apt + apt: + name: "{{item}}" + state: present + when: "ansible_os_family == 'Debian'" + with_items: + - apache2 + - libapache2-mod-evasive + +- name: Install apache via zypper + community.general.zypper: + name: apache2 + state: present + when: "ansible_os_family == 'Suse'" + +- name: Enable mod_slotmem_shm on SuSE + apache2_module: + name: slotmem_shm + state: present + when: "ansible_os_family == 'Suse'" diff --git a/tests/integration/targets/setup_cron/tasks/main.yml b/tests/integration/targets/setup_cron/tasks/main.yml index cca7071a3a..92d2893403 100644 --- a/tests/integration/targets/setup_cron/tasks/main.yml +++ b/tests/integration/targets/setup_cron/tasks/main.yml @@ -11,65 +11,65 @@ - when: - not (ansible_os_family == 'Alpine' and ansible_distribution_version is version('3.15', '<')) # TODO block: - - name: Include distribution specific variables - include_vars: '{{ lookup(''first_found'', search) }}' - vars: - search: - files: - - '{{ ansible_distribution | lower }}.yml' - - '{{ ansible_os_family | lower }}.yml' - - '{{ ansible_system | lower }}.yml' - - default.yml - paths: - - vars - - name: install cron package - package: - name: '{{ cron_pkg }}' - when: cron_pkg | default(false, true) - register: cron_package_installed - until: cron_package_installed is success - - when: faketime_pkg | default(false, true) - block: - - name: install cron and faketime packages + - name: Include distribution specific variables + include_vars: '{{ lookup(''first_found'', search) }}' + vars: + search: + files: + - '{{ ansible_distribution | lower }}.yml' + - '{{ ansible_os_family | lower }}.yml' + - '{{ ansible_system | lower }}.yml' + - default.yml + paths: + - vars + - name: install cron package package: - name: '{{ faketime_pkg }}' - register: faketime_package_installed - until: faketime_package_installed is success - - name: Find libfaketime path - shell: '{{ list_pkg_files }} {{ faketime_pkg }} | grep -F libfaketime.so.1' - register: libfaketime_path - - when: ansible_service_mgr == 'systemd' + name: '{{ cron_pkg }}' + when: (cron_pkg | default(false, true)) is truthy + register: cron_package_installed + until: cron_package_installed is success + - when: (faketime_pkg | default(false, true)) is truthy block: - - name: create directory for cron drop-in file - file: - path: /etc/systemd/system/{{ cron_service }}.service.d - state: directory - owner: root - group: root - mode: '0755' - - name: Use faketime with cron service - copy: - content: '[Service] + - name: install cron and faketime packages + package: + name: '{{ faketime_pkg }}' + register: faketime_package_installed + until: faketime_package_installed is success + - name: Find libfaketime path + shell: '{{ list_pkg_files }} {{ faketime_pkg }} | grep -F libfaketime.so.1' + register: libfaketime_path + - when: ansible_service_mgr == 'systemd' + block: + - name: create directory for cron drop-in file + file: + path: /etc/systemd/system/{{ cron_service }}.service.d + state: directory + owner: root + group: root + mode: '0755' + - name: Use faketime with cron service + copy: + content: '[Service] - Environment=LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }} + Environment=LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }} - Environment="FAKETIME=+0y x10" + Environment="FAKETIME=+0y x10" - Environment=RANDOM_DELAY=0' - dest: /etc/systemd/system/{{ cron_service }}.service.d/faketime.conf - owner: root - group: root - mode: '0644' - - when: ansible_system == 'FreeBSD' - name: Use faketime with cron service - copy: - content: cron_env='LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }} FAKETIME="+0y x10"' - dest: /etc/rc.conf.d/cron - owner: root - group: wheel - mode: '0644' - - name: enable cron service - service: - daemon-reload: '{{ (ansible_service_mgr == ''systemd'') | ternary(true, omit) }}' - name: '{{ cron_service }}' - state: restarted + Environment=RANDOM_DELAY=0' + dest: /etc/systemd/system/{{ cron_service }}.service.d/faketime.conf + owner: root + group: root + mode: '0644' + - when: ansible_system == 'FreeBSD' + name: Use faketime with cron service + copy: + content: cron_env='LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }} FAKETIME="+0y x10"' + dest: /etc/rc.conf.d/cron + owner: root + group: wheel + mode: '0644' + - name: enable cron service + service: + daemon-reload: '{{ (ansible_service_mgr == ''systemd'') | ternary(true, omit) }}' + name: '{{ cron_service }}' + state: restarted diff --git a/tests/integration/targets/setup_docker/tasks/main.yml b/tests/integration/targets/setup_docker/tasks/main.yml index 19bc7aa8c3..0c1b86951a 100644 --- a/tests/integration/targets/setup_docker/tasks/main.yml +++ b/tests/integration/targets/setup_docker/tasks/main.yml @@ -47,7 +47,7 @@ become: true ansible.builtin.file: path: /var/run/docker.sock - mode: 0666 + mode: "0666" - name: Install python "requests" ansible.builtin.pip: diff --git a/tests/integration/targets/setup_etcd3/defaults/main.yml b/tests/integration/targets/setup_etcd3/defaults/main.yml index f185ef0c25..3ca15a6220 100644 --- a/tests/integration/targets/setup_etcd3/defaults/main.yml +++ b/tests/integration/targets/setup_etcd3/defaults/main.yml @@ -9,7 +9,7 @@ # etcd3_ver: "v3.2.14" etcd3_download_server: "https://storage.googleapis.com/etcd" -#etcd3_download_server: "https://github.com/coreos/etcd/releases/download" +# etcd3_download_server: "https://github.com/coreos/etcd/releases/download" etcd3_download_url: "{{ etcd3_download_server }}/{{ etcd3_ver }}/etcd-{{ etcd3_ver }}-linux-amd64.tar.gz" etcd3_download_location: /tmp/etcd-download-test etcd3_path: "{{ etcd3_download_location }}/etcd-{{ etcd3_ver }}-linux-amd64" diff --git a/tests/integration/targets/setup_etcd3/tasks/main.yml b/tests/integration/targets/setup_etcd3/tasks/main.yml index 1da52e225f..72252cfe51 100644 --- a/tests/integration/targets/setup_etcd3/tasks/main.yml +++ b/tests/integration/targets/setup_etcd3/tasks/main.yml @@ -15,90 +15,90 @@ # setup etcd3 for supported distros - block: - - name: python 2 - set_fact: - python_suffix: "" - when: ansible_python_version is version('3', '<') + - name: python 2 + set_fact: + python_suffix: "" + when: ansible_python_version is version('3', '<') - - name: python 3 - set_fact: - python_suffix: "-py3" - when: ansible_python_version is version('3', '>=') + - name: python 3 + set_fact: + python_suffix: "-py3" + when: ansible_python_version is version('3', '>=') - - include_vars: '{{ item }}' - with_first_found: - - files: - - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml' - - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml' - - '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml' - - '{{ ansible_os_family }}{{ python_suffix }}.yml' - - 'default{{ python_suffix }}.yml' - - 'default.yml' - paths: '../vars' + - include_vars: '{{ item }}' + with_first_found: + - files: + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml' + - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml' + - '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml' + - '{{ ansible_os_family }}{{ python_suffix }}.yml' + - 'default{{ python_suffix }}.yml' + - 'default.yml' + paths: '../vars' - - name: Upgrade setuptools python2 module - pip: - name: setuptools<45 - extra_args: --upgrade - state: present - when: python_suffix == '' + - name: Upgrade setuptools python2 module + pip: + name: setuptools<45 + extra_args: --upgrade + state: present + when: python_suffix == '' - - name: Install etcd3 python modules - pip: - name: "{{ etcd3_pip_module }}" - extra_args: --only-binary grpcio - state: present + - name: Install etcd3 python modules + pip: + name: "{{ etcd3_pip_module }}" + extra_args: --only-binary grpcio + state: present - # Check if re-installing etcd3 is required - - name: Check if etcd3ctl exists for reuse. - shell: "ETCDCTL_API=3 {{ etcd3_path }}/etcdctl --endpoints=localhost:2379 get foo" - args: - executable: /bin/bash - changed_when: false - failed_when: false - register: _testetcd3ctl + # Check if re-installing etcd3 is required + - name: Check if etcd3ctl exists for reuse. + shell: "ETCDCTL_API=3 {{ etcd3_path }}/etcdctl --endpoints=localhost:2379 get foo" + args: + executable: /bin/bash + changed_when: false + failed_when: false + register: _testetcd3ctl + + - block: + # Installing etcd3 + - name: If can't reuse, prepare download folder + file: + path: "{{ etcd3_download_location }}" + state: directory + register: _etcddownloadexists + when: + - _testetcd3ctl.rc != 0 + + - name: Delete download folder if already exists (to start clean) + file: + path: "{{ etcd3_download_location }}" + state: absent + when: + - _etcddownloadexists is not changed + + - name: Recreate download folder if purged + file: + path: "{{ etcd3_download_location }}" + state: directory + when: + - _etcddownloadexists is not changed + + - name: Download etcd3 + unarchive: + src: "{{ etcd3_download_url }}" + dest: "{{ etcd3_download_location }}" + remote_src: true + + # Running etcd3 and kill afterwards if it wasn't running before. + - name: Run etcd3 + shell: "{{ etcd3_path }}/etcd &" + register: _etcd3run + changed_when: true + + # - name: kill etcd3 + # command: "pkill etcd" - - block: - # Installing etcd3 - - name: If can't reuse, prepare download folder - file: - path: "{{ etcd3_download_location }}" - state: directory - register: _etcddownloadexists when: - _testetcd3ctl.rc != 0 - - name: Delete download folder if already exists (to start clean) - file: - path: "{{ etcd3_download_location }}" - state: absent - when: - - _etcddownloadexists is not changed - - - name: Recreate download folder if purged - file: - path: "{{ etcd3_download_location }}" - state: directory - when: - - _etcddownloadexists is not changed - - - name: Download etcd3 - unarchive: - src: "{{ etcd3_download_url }}" - dest: "{{ etcd3_download_location }}" - remote_src: true - - # Running etcd3 and kill afterwards if it wasn't running before. - - name: Run etcd3 - shell: "{{ etcd3_path }}/etcd &" - register: _etcd3run - changed_when: true - -# - name: kill etcd3 -# command: "pkill etcd" - - when: - - _testetcd3ctl.rc != 0 - when: - - ansible_distribution | lower ~ "-" ~ ansible_distribution_major_version | lower != 'centos-6' + - ansible_distribution | lower ~ "-" ~ ansible_distribution_major_version | lower != 'centos-6' diff --git a/tests/integration/targets/setup_flatpak_remote/meta/main.yaml b/tests/integration/targets/setup_flatpak_remote/meta/main.yaml index 1b3d5b8758..982de6eb03 100644 --- a/tests/integration/targets/setup_flatpak_remote/meta/main.yaml +++ b/tests/integration/targets/setup_flatpak_remote/meta/main.yaml @@ -4,4 +4,4 @@ # SPDX-License-Identifier: GPL-3.0-or-later dependencies: - - setup_remote_tmp_dir + - setup_remote_tmp_dir diff --git a/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml b/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml index 037784738a..e63cb379a0 100644 --- a/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml +++ b/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml @@ -13,20 +13,20 @@ ansible_distribution == 'Fedora' or ansible_distribution == 'Ubuntu' and not ansible_distribution_major_version | int < 16 block: - - name: Copy repo into place - unarchive: - src: repo.tar.xz - dest: '{{ remote_tmp_dir }}' - owner: root - group: root - mode: '0644' - - name: Create deterministic link to temp directory - file: - state: link - src: '{{ remote_tmp_dir }}/' - path: /tmp/flatpak - owner: root - group: root - mode: '0644' - notify: remove temporary flatpak link + - name: Copy repo into place + unarchive: + src: repo.tar.xz + dest: '{{ remote_tmp_dir }}' + owner: root + group: root + mode: '0644' + - name: Create deterministic link to temp directory + file: + state: link + src: '{{ remote_tmp_dir }}/' + path: /tmp/flatpak + owner: root + group: root + mode: '0644' + notify: remove temporary flatpak link become: true diff --git a/tests/integration/targets/setup_java_keytool/tasks/main.yml b/tests/integration/targets/setup_java_keytool/tasks/main.yml index 9f156425d8..56ba7a9581 100644 --- a/tests/integration/targets/setup_java_keytool/tasks/main.yml +++ b/tests/integration/targets/setup_java_keytool/tasks/main.yml @@ -22,6 +22,7 @@ files: - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml' - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' + - '{{ ansible_distribution }}.yml' - '{{ ansible_os_family }}.yml' paths: - '{{ role_path }}/vars' diff --git a/tests/integration/targets/setup_java_keytool/vars/Fedora.yml b/tests/integration/targets/setup_java_keytool/vars/Fedora.yml new file mode 100644 index 0000000000..5f77ea9bba --- /dev/null +++ b/tests/integration/targets/setup_java_keytool/vars/Fedora.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +keytool_package_names: + - java-21-openjdk-headless diff --git a/tests/integration/targets/setup_openssl/tasks/main.yml b/tests/integration/targets/setup_openssl/tasks/main.yml index b8e003710a..6490e65b41 100644 --- a/tests/integration/targets/setup_openssl/tasks/main.yml +++ b/tests/integration/targets/setup_openssl/tasks/main.yml @@ -13,13 +13,13 @@ vars: search: files: - - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' - - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml' - - '{{ ansible_distribution }}.yml' - - '{{ ansible_os_family }}.yml' - - default.yml + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' + - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml' + - '{{ ansible_distribution }}.yml' + - '{{ ansible_os_family }}.yml' + - default.yml paths: - - vars + - vars - name: Install OpenSSL become: true @@ -29,24 +29,24 @@ - when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6'] block: - - name: Install cryptography (Python 3) - become: true - package: - name: '{{ cryptography_package_name_python3 }}' - when: not cryptography_from_pip and ansible_python_version is version('3.0', '>=') + - name: Install cryptography (Python 3) + become: true + package: + name: '{{ cryptography_package_name_python3 }}' + when: not cryptography_from_pip and ansible_python_version is version('3.0', '>=') - - name: Install cryptography (Python 2) - become: true - package: - name: '{{ cryptography_package_name }}' - when: not cryptography_from_pip and ansible_python_version is version('3.0', '<') + - name: Install cryptography (Python 2) + become: true + package: + name: '{{ cryptography_package_name }}' + when: not cryptography_from_pip and ansible_python_version is version('3.0', '<') - - name: Install cryptography (pip) - become: true - pip: - name: cryptography>=3.3 - extra_args: "-c {{ remote_constraints }}" - when: cryptography_from_pip + - name: Install cryptography (pip) + become: true + pip: + name: cryptography>=3.3 + extra_args: "-c {{ remote_constraints }}" + when: cryptography_from_pip - name: Install pyOpenSSL (Python 3) become: true diff --git a/tests/integration/targets/setup_openssl/vars/RedHat-10.yml b/tests/integration/targets/setup_openssl/vars/RedHat-10.yml new file mode 100644 index 0000000000..ac9b3344eb --- /dev/null +++ b/tests/integration/targets/setup_openssl/vars/RedHat-10.yml @@ -0,0 +1,9 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +cryptography_package_name: python-cryptography +cryptography_package_name_python3: python3-cryptography +openssl_package_name: openssl +cryptography_from_pip: false diff --git a/tests/integration/targets/setup_postgresql_db/defaults/main.yml b/tests/integration/targets/setup_postgresql_db/defaults/main.yml index 1a33ecafab..8480995959 100644 --- a/tests/integration/targets/setup_postgresql_db/defaults/main.yml +++ b/tests/integration/targets/setup_postgresql_db/defaults/main.yml @@ -6,8 +6,8 @@ postgresql_service: postgresql postgresql_packages: - - postgresql-server - - python-psycopg2 + - postgresql-server + - python-psycopg2 pg_user: postgres pg_group: root diff --git a/tests/integration/targets/setup_postgresql_db/tasks/main.yml b/tests/integration/targets/setup_postgresql_db/tasks/main.yml index 99668ebc95..d6b8701e9a 100644 --- a/tests/integration/targets/setup_postgresql_db/tasks/main.yml +++ b/tests/integration/targets/setup_postgresql_db/tasks/main.yml @@ -35,12 +35,12 @@ vars: params: files: - - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml' - - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml' - - '{{ ansible_os_family }}{{ python_suffix }}.yml' - - default{{ python_suffix }}.yml + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml' + - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml' + - '{{ ansible_os_family }}{{ python_suffix }}.yml' + - default{{ python_suffix }}.yml paths: - - '{{ role_path }}/vars' + - '{{ role_path }}/vars' - name: make sure the dbus service is started under systemd systemd: @@ -79,8 +79,8 @@ ignore_errors: true when: ansible_os_family == "Debian" loop: - - /etc/postgresql - - /var/lib/postgresql + - /etc/postgresql + - /var/lib/postgresql loop_control: loop_var: loop_item @@ -170,38 +170,38 @@ name: '{{ item }}' state: present with_items: - - pt_BR - - es_ES + - pt_BR + - es_ES when: ansible_os_family == 'Debian' - block: - - name: Install langpacks (RHEL8) - yum: - name: - - glibc-langpack-es - - glibc-langpack-pt - - glibc-all-langpacks - state: present - when: ansible_distribution_major_version is version('8', '>=') + - name: Install langpacks (RHEL8) + yum: + name: + - glibc-langpack-es + - glibc-langpack-pt + - glibc-all-langpacks + state: present + when: ansible_distribution_major_version is version('8', '>=') - - name: Check if locales need to be generated (RedHat) - shell: localedef --list-archive | grep -a -q '^{{ locale }}$' - register: locale_present - ignore_errors: true - with_items: - - es_ES - - pt_BR - loop_control: - loop_var: locale + - name: Check if locales need to be generated (RedHat) + shell: localedef --list-archive | grep -a -q '^{{ locale }}$' + register: locale_present + ignore_errors: true + with_items: + - es_ES + - pt_BR + loop_control: + loop_var: locale - - name: Reinstall internationalization files - shell: yum -y reinstall glibc-common || yum -y install glibc-common - when: locale_present is failed + - name: Reinstall internationalization files + shell: yum -y reinstall glibc-common || yum -y install glibc-common + when: locale_present is failed - - name: Generate locale (RedHat) - command: localedef -f ISO-8859-1 -i {{ item.locale }} {{ item.locale }} - when: item is failed - with_items: '{{ locale_present.results }}' + - name: Generate locale (RedHat) + command: localedef -f ISO-8859-1 -i {{ item.locale }} {{ item.locale }} + when: item is failed + with_items: '{{ locale_present.results }}' when: ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora' - name: Install glibc langpacks (Fedora >= 24) @@ -209,8 +209,8 @@ name: '{{ item }}' state: latest with_items: - - glibc-langpack-es - - glibc-langpack-pt + - glibc-langpack-es + - glibc-langpack-pt when: ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('24', '>=') - name: enable postgresql service (FreeBSD) @@ -257,9 +257,9 @@ dest: /usr/share/postgresql/{{ pg_ver }}/extension/{{ item }} mode: '0444' with_items: - - dummy--1.0.sql - - dummy--2.0.sql - - dummy--3.0.sql + - dummy--1.0.sql + - dummy--2.0.sql + - dummy--3.0.sql when: ansible_os_family == 'Debian' - name: add update paths @@ -268,8 +268,8 @@ mode: '0444' state: touch with_items: - - dummy--1.0--2.0.sql - - dummy--2.0--3.0.sql + - dummy--1.0--2.0.sql + - dummy--2.0--3.0.sql when: ansible_os_family == 'Debian' - name: Get PostgreSQL version diff --git a/tests/integration/targets/setup_redis_replication/defaults/main.yml b/tests/integration/targets/setup_redis_replication/defaults/main.yml index 46dae9898a..cc2d632b85 100644 --- a/tests/integration/targets/setup_redis_replication/defaults/main.yml +++ b/tests/integration/targets/setup_redis_replication/defaults/main.yml @@ -6,21 +6,31 @@ # General redis_packages: Alpine: - - redis + - redis Archlinux: - - redis + - valkey Debian: - - redis-server + - redis-server Ubuntu: - - redis-server + - redis-server openSUSE Leap: - - redis + - redis Fedora: - - redis + - redis CentOS: - - redis + - redis FreeBSD: - - redis + - redis + +redis_user: + Alpine: redis + Archlinux: valkey + Debian: redis + Ubuntu: redis + openSUSE Leap: redis + Fedora: "{{ '998' if ansible_distribution_major_version is version('41', '>=') else 'redis' }}" + CentOS: redis + FreeBSD: redis redis_bin: Alpine: /usr/bin/redis-server @@ -28,7 +38,7 @@ redis_bin: Debian: /usr/bin/redis-server Ubuntu: /usr/bin/redis-server openSUSE Leap: /usr/sbin/redis-server - Fedora: /usr/bin/redis-server + Fedora: "/usr/bin/{{ 'valkey-server' if ansible_distribution_major_version is version('41', '>=') else 'redis-server' }}" CentOS: /usr/bin/redis-server FreeBSD: /usr/local/bin/redis-server diff --git a/tests/integration/targets/setup_redis_replication/handlers/main.yml b/tests/integration/targets/setup_redis_replication/handlers/main.yml index a0595cbe30..46c7571da1 100644 --- a/tests/integration/targets/setup_redis_replication/handlers/main.yml +++ b/tests/integration/targets/setup_redis_replication/handlers/main.yml @@ -28,12 +28,12 @@ path: "{{ item }}" state: absent loop: - - "{{ master_conf }}" - - "{{ master_datadir }}" - - "{{ master_logdir }}" - - /var/run/redis_{{ master_port }}.pid - - "{{ replica_conf }}" - - "{{ replica_datadir }}" - - "{{ replica_logdir }}" - - /var/run/redis_{{ replica_port }}.pid + - "{{ master_conf }}" + - "{{ master_datadir }}" + - "{{ master_logdir }}" + - /var/run/redis_{{ master_port }}.pid + - "{{ replica_conf }}" + - "{{ replica_datadir }}" + - "{{ replica_logdir }}" + - /var/run/redis_{{ replica_port }}.pid listen: cleanup redis diff --git a/tests/integration/targets/setup_redis_replication/meta/main.yml b/tests/integration/targets/setup_redis_replication/meta/main.yml index db2617f4ce..2d6cafb56f 100644 --- a/tests/integration/targets/setup_redis_replication/meta/main.yml +++ b/tests/integration/targets/setup_redis_replication/meta/main.yml @@ -4,5 +4,5 @@ # SPDX-License-Identifier: GPL-3.0-or-later dependencies: -- setup_pkg_mgr -- setup_remote_constraints + - setup_pkg_mgr + - setup_remote_constraints diff --git a/tests/integration/targets/setup_redis_replication/tasks/main.yml b/tests/integration/targets/setup_redis_replication/tasks/main.yml index 076a473594..92ac4fe750 100644 --- a/tests/integration/targets/setup_redis_replication/tasks/main.yml +++ b/tests/integration/targets/setup_redis_replication/tasks/main.yml @@ -9,4 +9,4 @@ - import_tasks: setup_redis_cluster.yml when: - - ansible_distribution in ['CentOS', 'Fedora', 'FreeBSD', 'openSUSE Leap', 'Ubuntu', 'Debian', 'Archlinux', 'Alpine'] + - ansible_distribution in ['CentOS', 'Fedora', 'FreeBSD', 'openSUSE Leap', 'Ubuntu', 'Debian', 'Archlinux', 'Alpine'] diff --git a/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml b/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml index dd48bf2b64..3c9c62ba00 100644 --- a/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml +++ b/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml @@ -21,16 +21,16 @@ notify: cleanup redis - name: Create redis directories - file: + file: path: "{{ item }}" - state: directory - owner: redis - group: redis + state: directory + owner: "{{ redis_user[ansible_distribution] }}" + group: "{{ redis_user[ansible_distribution] }}" loop: - - "{{ master_datadir }}" - - "{{ master_logdir }}" - - "{{ replica_datadir }}" - - "{{ replica_logdir }}" + - "{{ master_datadir }}" + - "{{ master_logdir }}" + - "{{ replica_datadir }}" + - "{{ replica_logdir }}" - name: Create redis configs copy: @@ -44,20 +44,20 @@ requirepass {{ redis_password }} masterauth {{ redis_password }} loop: - - file: "{{ master_conf }}" - port: "{{ master_port }}" - logdir: "{{ master_logdir }}" - datadir: "{{ master_datadir }}" - - file: "{{ replica_conf }}" - port: "{{ replica_port }}" - logdir: "{{ replica_logdir }}" - datadir: "{{ replica_datadir }}" + - file: "{{ master_conf }}" + port: "{{ master_port }}" + logdir: "{{ master_logdir }}" + datadir: "{{ master_datadir }}" + - file: "{{ replica_conf }}" + port: "{{ replica_port }}" + logdir: "{{ replica_logdir }}" + datadir: "{{ replica_datadir }}" - name: Start redis master - shell: "{{ redis_bin[ansible_distribution] }} {{ master_conf }}" + ansible.builtin.command: "{{ redis_bin[ansible_distribution] }} {{ master_conf }}" - name: Start redis replica - shell: "{{ redis_bin[ansible_distribution] }} {{ replica_conf }} --{% if old_redis %}slaveof{% else %}replicaof{% endif %} 127.0.0.1 {{ master_port }}" + ansible.builtin.command: "{{ redis_bin[ansible_distribution] }} {{ replica_conf }} --{% if old_redis %}slaveof{% else %}replicaof{% endif %} 127.0.0.1 {{ master_port }}" - name: Wait for redis master to be started ansible.builtin.wait_for: diff --git a/tests/integration/targets/setup_rundeck/defaults/main.yml b/tests/integration/targets/setup_rundeck/defaults/main.yml index c842901c0f..1c9858ad25 100644 --- a/tests/integration/targets/setup_rundeck/defaults/main.yml +++ b/tests/integration/targets/setup_rundeck/defaults/main.yml @@ -3,5 +3,13 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -rundeck_war_url: https://packagecloud.io/pagerduty/rundeck/packages/java/org.rundeck/rundeck-3.4.4-20210920.war/artifacts/rundeck-3.4.4-20210920.war/download -rundeck_cli_url: https://github.com/rundeck/rundeck-cli/releases/download/v1.3.10/rundeck-cli-1.3.10-all.jar +rundeck_version: 5.11.1-20250415 +rundeck_cli_version: "2.0.8" + +rundeck_war_url: + "https://packagecloud.io/pagerduty/rundeck/packages/java/org.rundeck/\ + rundeck-{{ rundeck_version }}.war/artifacts/rundeck-{{ rundeck_version }}.war/download" + +rundeck_cli_url: + "https://github.com/rundeck/rundeck-cli/releases/download/\ + v{{ rundeck_cli_version }}/rundeck-cli-{{ rundeck_cli_version }}-all.jar" diff --git a/tests/integration/targets/setup_rundeck/vars/RedHat.yml b/tests/integration/targets/setup_rundeck/vars/RedHat.yml index 314f0ef415..bba076aecd 100644 --- a/tests/integration/targets/setup_rundeck/vars/RedHat.yml +++ b/tests/integration/targets/setup_rundeck/vars/RedHat.yml @@ -3,4 +3,4 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -openjdk_pkg: java-1.8.0-openjdk +openjdk_pkg: java-11-openjdk-headless diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-10.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-10.yml new file mode 120000 index 0000000000..0b06951496 --- /dev/null +++ b/tests/integration/targets/setup_snap/tasks/D-RedHat-10.yml @@ -0,0 +1 @@ +nothing.yml \ No newline at end of file diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml deleted file mode 100644 index 5bbfaff128..0000000000 --- a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# Do nothing diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml new file mode 120000 index 0000000000..0b06951496 --- /dev/null +++ b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml @@ -0,0 +1 @@ +nothing.yml \ No newline at end of file diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml new file mode 120000 index 0000000000..0b06951496 --- /dev/null +++ b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml @@ -0,0 +1 @@ +nothing.yml \ No newline at end of file diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.6.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.6.yml new file mode 120000 index 0000000000..0b06951496 --- /dev/null +++ b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.6.yml @@ -0,0 +1 @@ +nothing.yml \ No newline at end of file diff --git a/tests/integration/targets/setup_wildfly_server/handlers/main.yml b/tests/integration/targets/setup_wildfly_server/handlers/main.yml index 1383b15753..38522db30a 100644 --- a/tests/integration/targets/setup_wildfly_server/handlers/main.yml +++ b/tests/integration/targets/setup_wildfly_server/handlers/main.yml @@ -14,5 +14,5 @@ path: '{{ item }}' state: absent loop: - - '{{ wf_service_file_path }}' - - '{{ default_deploy_root }}' + - '{{ wf_service_file_path }}' + - '{{ default_deploy_root }}' diff --git a/tests/integration/targets/setup_wildfly_server/meta/main.yml b/tests/integration/targets/setup_wildfly_server/meta/main.yml index 2d29ebb672..ca1915e05c 100644 --- a/tests/integration/targets/setup_wildfly_server/meta/main.yml +++ b/tests/integration/targets/setup_wildfly_server/meta/main.yml @@ -4,5 +4,5 @@ # SPDX-License-Identifier: GPL-3.0-or-later dependencies: -- setup_pkg_mgr -- setup_remote_tmp_dir + - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/shutdown/tasks/main.yml b/tests/integration/targets/shutdown/tasks/main.yml index 2c9bc6bd6d..2903248407 100644 --- a/tests/integration/targets/shutdown/tasks/main.yml +++ b/tests/integration/targets/shutdown/tasks/main.yml @@ -44,7 +44,7 @@ - name: Verify shutdown command is present in Alpine except systemd assert: that: '"poweroff" in shutdown_result["shutdown_command"]' - when: + when: - "ansible_os_family == 'Alpine'" - '"systemctl" not in shutdown_result["shutdown_command"]' @@ -52,7 +52,7 @@ - name: Verify shutdown command is present in VMKernel except systemd assert: that: '"halt" in shutdown_result["shutdown_command"]' - when: + when: - "ansible_system == 'VMKernel'" - '"systemctl" not in shutdown_result["shutdown_command"]' @@ -111,7 +111,7 @@ community.general.shutdown: register: shutdown_result check_mode: true - when: + when: - "(ansible_distribution == 'Ubuntu' and ansible_distribution_major_version is version('18', '>=')) or (ansible_distribution == 'Debian')" - '"systemd-sysv" not in ansible_facts.packages' diff --git a/tests/integration/targets/snap/aliases b/tests/integration/targets/snap/aliases index b209bbc015..34c378b729 100644 --- a/tests/integration/targets/snap/aliases +++ b/tests/integration/targets/snap/aliases @@ -11,3 +11,4 @@ skip/freebsd skip/osx skip/macos skip/docker +skip/rhel8.8 # TODO: fix diff --git a/tests/integration/targets/snap_alias/aliases b/tests/integration/targets/snap_alias/aliases index b209bbc015..34c378b729 100644 --- a/tests/integration/targets/snap_alias/aliases +++ b/tests/integration/targets/snap_alias/aliases @@ -11,3 +11,4 @@ skip/freebsd skip/osx skip/macos skip/docker +skip/rhel8.8 # TODO: fix diff --git a/tests/integration/targets/snap_alias/tasks/test.yml b/tests/integration/targets/snap_alias/tasks/test.yml index 50e6e33b49..87a7419358 100644 --- a/tests/integration/targets/snap_alias/tasks/test.yml +++ b/tests/integration/targets/snap_alias/tasks/test.yml @@ -43,12 +43,12 @@ - name: assert single alias assert: that: - - alias_single_0 is changed - - alias_single_1 is changed - - alias_single_2 is not changed - - alias_single_3 is not changed - - 'alias_single_1.snap_aliases["hello-world"] == ["hw"]' - - 'alias_single_3.snap_aliases["hello-world"] == ["hw"]' + - alias_single_0 is changed + - alias_single_1 is changed + - alias_single_2 is not changed + - alias_single_3 is not changed + - 'alias_single_1.snap_aliases["hello-world"] == ["hw"]' + - 'alias_single_3.snap_aliases["hello-world"] == ["hw"]' - name: Create multiple aliases (check mode) community.general.snap_alias: @@ -79,12 +79,12 @@ - name: assert multi alias assert: that: - - alias_multi_0 is changed - - alias_multi_1 is changed - - alias_multi_2 is not changed - - alias_multi_3 is not changed - - 'alias_multi_1.snap_aliases["hello-world"] == ["hw", "hw2", "hw3"]' - - 'alias_multi_3.snap_aliases["hello-world"] == ["hw", "hw2", "hw3"]' + - alias_multi_0 is changed + - alias_multi_1 is changed + - alias_multi_2 is not changed + - alias_multi_3 is not changed + - 'alias_multi_1.snap_aliases["hello-world"] == ["hw", "hw2", "hw3"]' + - 'alias_multi_3.snap_aliases["hello-world"] == ["hw", "hw2", "hw3"]' - name: Remove one specific alias (check mode) community.general.snap_alias: @@ -115,12 +115,12 @@ - name: assert remove alias assert: that: - - alias_remove_0 is changed - - alias_remove_1 is changed - - alias_remove_2 is not changed - - alias_remove_3 is not changed - - 'alias_remove_1.snap_aliases["hello-world"] == ["hw2", "hw3"]' - - 'alias_remove_3.snap_aliases["hello-world"] == ["hw2", "hw3"]' + - alias_remove_0 is changed + - alias_remove_1 is changed + - alias_remove_2 is not changed + - alias_remove_3 is not changed + - 'alias_remove_1.snap_aliases["hello-world"] == ["hw2", "hw3"]' + - 'alias_remove_3.snap_aliases["hello-world"] == ["hw2", "hw3"]' - name: Remove all aliases for snap (check mode) community.general.snap_alias: @@ -151,9 +151,9 @@ - name: assert remove_all alias assert: that: - - alias_remove_all_0 is changed - - alias_remove_all_1 is changed - - alias_remove_all_2 is not changed - - alias_remove_all_3 is not changed - - 'alias_remove_all_1.snap_aliases["hello-world"] == []' - - 'alias_remove_all_3.snap_aliases["hello-world"] == []' + - alias_remove_all_0 is changed + - alias_remove_all_1 is changed + - alias_remove_all_2 is not changed + - alias_remove_all_3 is not changed + - 'alias_remove_all_1.snap_aliases["hello-world"] == []' + - 'alias_remove_all_3.snap_aliases["hello-world"] == []' diff --git a/tests/integration/targets/spectrum_model_attrs/tasks/main.yml b/tests/integration/targets/spectrum_model_attrs/tasks/main.yml index 42e53d7d7d..ecc1eb6005 100644 --- a/tests/integration/targets/spectrum_model_attrs/tasks/main.yml +++ b/tests/integration/targets/spectrum_model_attrs/tasks/main.yml @@ -14,65 +14,65 @@ or oneclick_url is not defined - block: - - name: "001: Enforce maintenance mode for {{ model_name }} with a note about why [check_mode test]" - spectrum_model_attrs: &mm_enabled_args - url: "{{ oneclick_url }}" - username: "{{ oneclick_username }}" - password: "{{ oneclick_password }}" - name: "{{ model_name }}" - type: "{{ model_type }}" - validate_certs: false - attributes: - - name: "isManaged" - value: "false" - - name: "Notes" - value: "{{ note_mm_enabled }}" - check_mode: true - register: mm_enabled_check_mode + - name: "001: Enforce maintenance mode for {{ model_name }} with a note about why [check_mode test]" + spectrum_model_attrs: &mm_enabled_args + url: "{{ oneclick_url }}" + username: "{{ oneclick_username }}" + password: "{{ oneclick_password }}" + name: "{{ model_name }}" + type: "{{ model_type }}" + validate_certs: false + attributes: + - name: "isManaged" + value: "false" + - name: "Notes" + value: "{{ note_mm_enabled }}" + check_mode: true + register: mm_enabled_check_mode - - name: "001: assert that changes were made" - assert: - that: - - mm_enabled_check_mode is changed + - name: "001: assert that changes were made" + assert: + that: + - mm_enabled_check_mode is changed - - name: "001: assert that changed_attrs is properly set" - assert: - that: - - mm_enabled_check_mode.changed_attrs.Notes == note_mm_enabled - - mm_enabled_check_mode.changed_attrs.isManaged == "false" + - name: "001: assert that changed_attrs is properly set" + assert: + that: + - mm_enabled_check_mode.changed_attrs.Notes == note_mm_enabled + - mm_enabled_check_mode.changed_attrs.isManaged == "false" - - name: "002: Enforce maintenance mode for {{ model_name }} with a note about why" - spectrum_model_attrs: - <<: *mm_enabled_args - register: mm_enabled - check_mode: false + - name: "002: Enforce maintenance mode for {{ model_name }} with a note about why" + spectrum_model_attrs: + <<: *mm_enabled_args + register: mm_enabled + check_mode: false - - name: "002: assert that changes were made" - assert: - that: - - mm_enabled is changed + - name: "002: assert that changes were made" + assert: + that: + - mm_enabled is changed - - name: "002: assert that changed_attrs is properly set" - assert: - that: - - mm_enabled.changed_attrs.Notes == note_mm_enabled - - mm_enabled.changed_attrs.isManaged == "false" + - name: "002: assert that changed_attrs is properly set" + assert: + that: + - mm_enabled.changed_attrs.Notes == note_mm_enabled + - mm_enabled.changed_attrs.isManaged == "false" - - name: "003: Enforce maintenance mode for {{ model_name }} with a note about why [idempontence test]" - spectrum_model_attrs: - <<: *mm_enabled_args - register: mm_enabled_idp - check_mode: false + - name: "003: Enforce maintenance mode for {{ model_name }} with a note about why [idempontence test]" + spectrum_model_attrs: + <<: *mm_enabled_args + register: mm_enabled_idp + check_mode: false - - name: "003: assert that changes were not made" - assert: - that: - - mm_enabled_idp is not changed + - name: "003: assert that changes were not made" + assert: + that: + - mm_enabled_idp is not changed - - name: "003: assert that changed_attrs is not set" - assert: - that: - - mm_enabled_idp.changed_attrs == {} + - name: "003: assert that changed_attrs is not set" + assert: + that: + - mm_enabled_idp.changed_attrs == {} vars: note_mm_enabled: "MM set via CO #1234 by OJ Simpson" diff --git a/tests/integration/targets/ssh_config/tasks/options.yml b/tests/integration/targets/ssh_config/tasks/options.yml index 203c782487..2f93b952bd 100644 --- a/tests/integration/targets/ssh_config/tasks/options.yml +++ b/tests/integration/targets/ssh_config/tasks/options.yml @@ -20,8 +20,10 @@ identities_only: true controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" - controlpersist: yes + controlpersist: "yes" dynamicforward: '10080' + other_options: + serveraliveinterval: '30' state: present register: options_add check_mode: true @@ -55,8 +57,10 @@ identities_only: true controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" - controlpersist: yes + controlpersist: "yes" dynamicforward: '10080' + other_options: + serveraliveinterval: '30' state: present register: options_add @@ -79,8 +83,10 @@ identities_only: true controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" - controlpersist: yes + controlpersist: "yes" dynamicforward: '10080' + other_options: + serveraliveinterval: '30' state: present register: options_add_again @@ -109,6 +115,7 @@ - "'controlpath ~/.ssh/sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist yes' in slurp_ssh_config['content'] | b64decode" - "'dynamicforward 10080' in slurp_ssh_config['content'] | b64decode" + - "'serveraliveinterval 30' in slurp_ssh_config['content'] | b64decode" - name: Options - Update host community.general.ssh_config: @@ -119,10 +126,12 @@ add_keys_to_agent: false host_key_algorithms: "+ssh-ed25519" identities_only: false - controlmaster: no + controlmaster: "no" controlpath: "~/.ssh/new-sockets/%r@%h-%p" controlpersist: "600" dynamicforward: '11080' + other_options: + serveraliveinterval: '30' state: present register: options_update @@ -145,10 +154,12 @@ add_keys_to_agent: false host_key_algorithms: "+ssh-ed25519" identities_only: false - controlmaster: no + controlmaster: "no" controlpath: "~/.ssh/new-sockets/%r@%h-%p" controlpersist: "600" dynamicforward: '11080' + other_options: + serveraliveinterval: '30' state: present register: options_update @@ -178,6 +189,7 @@ - "'controlpath ~/.ssh/new-sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist 600' in slurp_ssh_config['content'] | b64decode" - "'dynamicforward 11080' in slurp_ssh_config['content'] | b64decode" + - "'serveraliveinterval 30' in slurp_ssh_config['content'] | b64decode" - name: Options - Ensure no update in case option exist in ssh_config file but wasn't defined in playbook community.general.ssh_config: @@ -212,6 +224,7 @@ - "'controlpath ~/.ssh/new-sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist 600' in slurp_ssh_config['content'] | b64decode" - "'dynamicforward 11080' in slurp_ssh_config['content'] | b64decode" + - "'serveraliveinterval 30' in slurp_ssh_config['content'] | b64decode" - name: Debug debug: @@ -264,6 +277,7 @@ - "'controlpath ~/.ssh/sockets/%r@%h-%p' not in slurp_ssh_config['content'] | b64decode" - "'controlpersist yes' not in slurp_ssh_config['content'] | b64decode" - "'dynamicforward 10080' not in slurp_ssh_config['content'] | b64decode" + - "'serveraliveinterval 30' not in slurp_ssh_config['content'] | b64decode" # Proxycommand and ProxyJump are mutually exclusive. # Reset ssh_config before testing options with proxyjump @@ -284,8 +298,10 @@ identities_only: true controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" - controlpersist: yes + controlpersist: "yes" dynamicforward: '10080' + other_options: + serveraliveinterval: '30' state: present register: options_add check_mode: true @@ -319,8 +335,10 @@ identities_only: true controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" - controlpersist: yes + controlpersist: "yes" dynamicforward: '10080' + other_options: + serveraliveinterval: '30' state: present register: options_add @@ -343,8 +361,10 @@ identities_only: true controlmaster: "auto" controlpath: "~/.ssh/sockets/%r@%h-%p" - controlpersist: yes + controlpersist: "yes" dynamicforward: '10080' + other_options: + serveraliveinterval: '30' state: present register: options_add_again @@ -373,6 +393,7 @@ - "'controlpath ~/.ssh/sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist yes' in slurp_ssh_config['content'] | b64decode" - "'dynamicforward 10080' in slurp_ssh_config['content'] | b64decode" + - "'serveraliveinterval 30' in slurp_ssh_config['content'] | b64decode" - name: Options - Update host community.general.ssh_config: @@ -383,10 +404,12 @@ add_keys_to_agent: false host_key_algorithms: "+ssh-ed25519" identities_only: false - controlmaster: no + controlmaster: "no" controlpath: "~/.ssh/new-sockets/%r@%h-%p" controlpersist: "600" dynamicforward: '11080' + other_options: + serveraliveinterval: '30' state: present register: options_update @@ -409,10 +432,12 @@ add_keys_to_agent: false host_key_algorithms: "+ssh-ed25519" identities_only: false - controlmaster: no + controlmaster: "no" controlpath: "~/.ssh/new-sockets/%r@%h-%p" controlpersist: "600" dynamicforward: '11080' + other_options: + serveraliveinterval: '30' state: present register: options_update @@ -442,6 +467,7 @@ - "'controlpath ~/.ssh/new-sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist 600' in slurp_ssh_config['content'] | b64decode" - "'dynamicforward 11080' in slurp_ssh_config['content'] | b64decode" + - "'serveraliveinterval 30' in slurp_ssh_config['content'] | b64decode" - name: Options - Ensure no update in case option exist in ssh_config file but wasn't defined in playbook community.general.ssh_config: @@ -476,6 +502,7 @@ - "'controlpath ~/.ssh/new-sockets/%r@%h-%p' in slurp_ssh_config['content'] | b64decode" - "'controlpersist 600' in slurp_ssh_config['content'] | b64decode" - "'dynamicforward 11080' in slurp_ssh_config['content'] | b64decode" + - "'serveraliveinterval 30' in slurp_ssh_config['content'] | b64decode" - name: Debug debug: @@ -528,3 +555,4 @@ - "'controlpath ~/.ssh/sockets/%r@%h-%p' not in slurp_ssh_config['content'] | b64decode" - "'controlpersist yes' not in slurp_ssh_config['content'] | b64decode" - "'dynamicforward 10080' not in slurp_ssh_config['content'] | b64decode" + - "'serveraliveinterval 30' not in slurp_ssh_config['content'] | b64decode" diff --git a/tests/integration/targets/sudoers/tasks/main.yml b/tests/integration/targets/sudoers/tasks/main.yml index 36397f41ad..fa03b71dac 100644 --- a/tests/integration/targets/sudoers/tasks/main.yml +++ b/tests/integration/targets/sudoers/tasks/main.yml @@ -229,7 +229,7 @@ ignore_errors: true when: ansible_os_family != 'Darwin' register: edge_case_3 - + - name: Revoke non-existing rule community.general.sudoers: name: non-existing-rule @@ -278,12 +278,14 @@ that: - not revoke_rule_1_stat.stat.exists - not revoke_non_existing_rule_stat.stat.exists - + - name: Check edge case responses ansible.builtin.assert: that: - edge_case_1 is failed - "'Failed to validate sudoers rule' in edge_case_1.msg" + - edge_case_1.stdout is defined + - edge_case_1.stderr is defined - edge_case_2 is not failed - name: Check missing validation edge case diff --git a/tests/integration/targets/supervisorctl/aliases b/tests/integration/targets/supervisorctl/aliases index 58524f1fb1..e28f8d0919 100644 --- a/tests/integration/targets/supervisorctl/aliases +++ b/tests/integration/targets/supervisorctl/aliases @@ -4,5 +4,7 @@ azp/posix/2 destructive -skip/python3 skip/aix +skip/rhel # TODO executables are installed in /usr/local/bin, which isn't part of $PATH +skip/macos # TODO executables are installed in /Library/Frameworks/Python.framework/Versions/3.11/bin, which isn't part of $PATH +unstable # TODO fix! diff --git a/tests/integration/targets/supervisorctl/tasks/install_pip.yml b/tests/integration/targets/supervisorctl/tasks/install_pip.yml index b1d3bd7796..89c03371d0 100644 --- a/tests/integration/targets/supervisorctl/tasks/install_pip.yml +++ b/tests/integration/targets/supervisorctl/tasks/install_pip.yml @@ -5,5 +5,5 @@ - name: install supervisord pip: - name: supervisor<4.0.0 # supervisor version 4.0.0 fails tests + name: supervisor state: present diff --git a/tests/integration/targets/supervisorctl/tasks/main.yml b/tests/integration/targets/supervisorctl/tasks/main.yml index 6f8c7968c0..dd47971a31 100644 --- a/tests/integration/targets/supervisorctl/tasks/main.yml +++ b/tests/integration/targets/supervisorctl/tasks/main.yml @@ -8,50 +8,52 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- block: - - tempfile: - state: directory - suffix: supervisorctl-tests - register: supervisord_sock_path +- when: + # setuptools is too old on RHEL/CentOS 6 (https://github.com/Supervisor/meld3/issues/23) + - ansible_os_family != 'RedHat' or ansible_distribution_major_version|int > 6 + # For some reason CentOS 7 and OpenSuSE 15 do not work on ansible-core 2.16 + - ansible_version.minor != 16 or ansible_distribution not in ['CentOS', 'openSUSE Leap'] + block: + - block: + - tempfile: + state: directory + suffix: supervisorctl-tests + register: supervisord_sock_path - - command: 'echo {{ remote_tmp_dir }}' - register: echo - - set_fact: - remote_dir: '{{ echo.stdout }}' + - command: 'echo {{ remote_tmp_dir }}' + register: echo + - set_fact: + remote_dir: '{{ echo.stdout }}' - - include_vars: '{{ item }}' - with_first_found: - - files: - - '{{ ansible_distribution }}.yml' - - '{{ ansible_os_family }}.yml' - - 'defaults.yml' + - include_vars: '{{ item }}' + with_first_found: + - files: + - '{{ ansible_distribution }}.yml' + - '{{ ansible_os_family }}.yml' + - 'defaults.yml' - - include_tasks: '{{ item }}' - with_first_found: - - files: - - 'install_{{ ansible_distribution }}.yml' # CentOS - - 'install_{{ ansible_os_family }}.yml' # RedHat - - 'install_{{ ansible_system }}.yml' # Linux + - include_tasks: '{{ item }}' + with_first_found: + - files: + - 'install_{{ ansible_distribution }}.yml' # CentOS + - 'install_{{ ansible_os_family }}.yml' # RedHat + - 'install_{{ ansible_system }}.yml' # Linux - - include_tasks: test.yml - with_items: - - { username: '', password: '' } - - { username: 'testétest', password: 'passéword' } # non-ASCII credentials - loop_control: - loop_var: credentials + - include_tasks: test.yml + with_items: + - { username: '', password: '' } + - { username: 'testétest', password: 'passéword' } # non-ASCII credentials + loop_control: + loop_var: credentials - # setuptools is too old on RHEL/CentOS 6 (https://github.com/Supervisor/meld3/issues/23) - when: ansible_os_family != 'RedHat' or ansible_distribution_major_version|int > 6 + always: + - include_tasks: '{{ item }}' + with_first_found: + - files: + - 'uninstall_{{ ansible_distribution }}.yml' # CentOS + - 'uninstall_{{ ansible_os_family }}.yml' # RedHat + - 'uninstall_{{ ansible_system }}.yml' # Linux - always: - - include_tasks: '{{ item }}' - when: ansible_os_family != 'RedHat' or ansible_distribution_major_version|int > 6 - with_first_found: - - files: - - 'uninstall_{{ ansible_distribution }}.yml' # CentOS - - 'uninstall_{{ ansible_os_family }}.yml' # RedHat - - 'uninstall_{{ ansible_system }}.yml' # Linux - - - file: - path: '{{ supervisord_sock_path.path }}' - state: absent + - file: + path: '{{ supervisord_sock_path.path }}' + state: absent diff --git a/tests/integration/targets/supervisorctl/tasks/test.yml b/tests/integration/targets/supervisorctl/tasks/test.yml index 5d1a867edc..f3fb9b4c81 100644 --- a/tests/integration/targets/supervisorctl/tasks/test.yml +++ b/tests/integration/targets/supervisorctl/tasks/test.yml @@ -7,6 +7,7 @@ template: src: supervisord.conf dest: '{{ remote_dir }}/supervisord.conf' + diff: true - block: - import_tasks: start_supervisord.yml diff --git a/tests/integration/targets/supervisorctl/tasks/test_start.yml b/tests/integration/targets/supervisorctl/tasks/test_start.yml index b814486cdb..da310eb657 100644 --- a/tests/integration/targets/supervisorctl/tasks/test_start.yml +++ b/tests/integration/targets/supervisorctl/tasks/test_start.yml @@ -22,6 +22,8 @@ when: credentials.username != '' - command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status" + register: result_cmd + failed_when: result_cmd.rc not in [0, 3] - name: check that service is started assert: @@ -30,8 +32,9 @@ - (result is changed and result_with_auth is skip) or (result is skip and result_with_auth is changed) - name: check that service is running (part1) # py1.log content is checked below - script: "files/sendProcessStdin.py 'pys:py1' 2 \ - '{{ credentials.username }}' '{{ credentials.password }}'" + script: + cmd: "files/sendProcessStdin.py 'pys:py1' 2 '{{ credentials.username }}' '{{ credentials.password }}'" + executable: "{{ ansible_facts.python.executable }}" - name: try again to start py1 service (without auth) supervisorctl: diff --git a/tests/integration/targets/supervisorctl/tasks/test_stop.yml b/tests/integration/targets/supervisorctl/tasks/test_stop.yml index 8d8fdd42af..8e8a28b6eb 100644 --- a/tests/integration/targets/supervisorctl/tasks/test_stop.yml +++ b/tests/integration/targets/supervisorctl/tasks/test_stop.yml @@ -24,6 +24,8 @@ when: credentials.username != '' - command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status" + register: result_cmd + failed_when: result_cmd.rc not in [0, 3] - name: check that service is stopped assert: @@ -32,8 +34,9 @@ - (result is changed and result_with_auth is skip) or (result is skip and result_with_auth is changed) - name: "check that service isn't running" - script: "files/sendProcessStdin.py 'pys:py1' 1 \ - '{{ credentials.username }}' '{{ credentials.password }}'" + script: + cmd: "files/sendProcessStdin.py 'pys:py1' 1 '{{ credentials.username }}' '{{ credentials.password }}'" + executable: "{{ ansible_facts.python.executable }}" register: is_py1_alive failed_when: is_py1_alive is success diff --git a/tests/integration/targets/sysrc/files/10394.conf b/tests/integration/targets/sysrc/files/10394.conf new file mode 100644 index 0000000000..fe0bc5b145 --- /dev/null +++ b/tests/integration/targets/sysrc/files/10394.conf @@ -0,0 +1,7 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +k1="v1" +jail_list=" + foo + bar" \ No newline at end of file diff --git a/tests/integration/targets/sysrc/tasks/main.yml b/tests/integration/targets/sysrc/tasks/main.yml index 25d7ca4d59..f1135d488f 100644 --- a/tests/integration/targets/sysrc/tasks/main.yml +++ b/tests/integration/targets/sysrc/tasks/main.yml @@ -140,13 +140,13 @@ - name: Test within jail # # NOTE: currently fails with FreeBSD 12 with minor version less than 4 - # NOTE: currently fails with FreeBSD 13 with minor version less than 2 - # NOTE: currently fails with FreeBSD 14 with minor version less than 1 + # NOTE: currently fails with FreeBSD 13 with minor version less than 4 + # NOTE: currently fails with FreeBSD 14 with minor version less than 2 # when: >- ansible_distribution_version is version('12.4', '>=') and ansible_distribution_version is version('13', '<') - or ansible_distribution_version is version('13.2', '>=') and ansible_distribution_version is version('14', '<') - or ansible_distribution_version is version('14.1', '>=') + or ansible_distribution_version is version('13.4', '>=') and ansible_distribution_version is version('14', '<') + or ansible_distribution_version is version('14.2', '>=') block: - name: Setup testjail include_tasks: setup-testjail.yml @@ -333,13 +333,115 @@ - not sysrc_value_absent_idempotent.changed - "'sysrc_delim=\"t1,t2\"' in sysrc_delim_content.stdout_lines" - "'sysrc_delim_delete' not in sysrc_delim_content.stdout_lines" + + ## + ## sysrc - value contains equals sign + ## + - name: Value contains equals sign + vars: + value_1: "-u spamd -x --allow-tell --max-spare=1 --listen=*" + value_2: "-u spamd -x --allow-tell --max-spare=1 --listen=localhost" + block: + + - name: Add spamd_flags + sysrc: + name: spamd_flags + value: "{{ value_1 }}" + register: sysrc_equals_sign_1 + + - name: Change spamd_flags + sysrc: + name: spamd_flags + value: "{{ value_2 }}" + register: sysrc_equals_sign_2 + + - name: Get file content + command: sysrc -a + register: sysrc_content + + - name: Ensure sysrc did as intended with values that contains equals sign + vars: + conf: "{{ sysrc_content.stdout | from_yaml }}" + assert: + that: + - "value_1 == sysrc_equals_sign_1.value" + - sysrc_equals_sign_2.changed + - "value_2 == sysrc_equals_sign_2.value" + - "value_2 == conf.spamd_flags" + + ## + ## sysrc - #10004 state=absent when using default settings will report `changed=true` + ## + - name: Test that a key from /etc/defaults/rc.conf is not used to mark changed + sysrc: + name: dumpdev + state: absent + path: /tmp/10004.conf + register: sysrc_10004_absent + failed_when: sysrc_10004_absent.changed + + - name: Test that a delimited key from /etc/defaults/rc.conf is not used to mark changed + sysrc: + name: rc_conf_files + state: value_absent + path: /tmp/10004.conf + register: sysrc_10004_value_absent + failed_when: sysrc_10004_value_absent.changed + + - name: Test that a key from /etc/defaults/rc.conf is not used to mark changed without a path + sysrc: + name: static_routes + state: absent + register: sysrc_absent_default + failed_when: sysrc_absent_default.changed + + ## + ## sysrc - #10394 Ensure that files with multi-line values work + ## + - name: Copy 10394.conf + copy: + src: 10394.conf + dest: /tmp/10394.conf + + - name: Change value for k1 + sysrc: + name: k1 + value: v2 + path: /tmp/10394.conf + register: sysrc_10394_changed + + - name: Get file content + shell: "cat /tmp/10394.conf" + register: sysrc_10394_content + + - name: Ensure sysrc changed k1 from v1 to v2 + assert: + that: + - sysrc_10394_changed.changed + - > + 'k1="v2"' in sysrc_10394_content.stdout_lines + + ## + ## sysrc - additional tests + ## + - name: Ensure failure on OID style name since sysrc does not support them + sysrc: + name: not.valid.var + value: test + register: sysrc_name_check + failed_when: + - sysrc_name_check is not failed + - > + 'Name may only contain alpha-numeric and underscore characters' != sysrc_name_check.msg + always: + - name: Restore /etc/rc.conf copy: - content: "{{ cached_etc_rcconf_content }}" + content: "{{ cached_etc_rcconf_content.stdout }}" dest: /etc/rc.conf - name: Restore /boot/loader.conf copy: - content: "{{ cached_boot_loaderconf_content }}" + content: "{{ cached_boot_loaderconf_content.stdout }}" dest: /boot/loader.conf diff --git a/tests/integration/targets/systemd_creds_encrypt/tasks/main.yaml b/tests/integration/targets/systemd_creds_encrypt/tasks/main.yaml index 362fe90bcb..8ed45b2dc3 100644 --- a/tests/integration/targets/systemd_creds_encrypt/tasks/main.yaml +++ b/tests/integration/targets/systemd_creds_encrypt/tasks/main.yaml @@ -19,7 +19,7 @@ - name: Assert encrypted secret output is base64 encoded ansible.builtin.assert: that: - - encrypted_secret.value | b64decode + - (encrypted_secret.value | b64decode) is truthy fail_msg: "Encrypted secret is not base64 encoded" success_msg: "Encrypted secret is base64 encoded" diff --git a/tests/integration/targets/systemd_info/aliases b/tests/integration/targets/systemd_info/aliases new file mode 100644 index 0000000000..84a120ca8c --- /dev/null +++ b/tests/integration/targets/systemd_info/aliases @@ -0,0 +1,10 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +needs/root +azp/posix/1 +skip/aix +skip/freebsd +skip/osx +skip/macos \ No newline at end of file diff --git a/tests/integration/targets/systemd_info/tasks/main.yml b/tests/integration/targets/systemd_info/tasks/main.yml new file mode 100644 index 0000000000..e41816195b --- /dev/null +++ b/tests/integration/targets/systemd_info/tasks/main.yml @@ -0,0 +1,26 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: skip Alpine + meta: end_host + when: ansible_distribution == 'Alpine' + +- name: check ansible_service_mgr + ansible.builtin.assert: + that: ansible_service_mgr == 'systemd' + +- name: Test systemd_facts + block: + + - name: Run tests + import_tasks: tests.yml + + when: > + (ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] and ansible_distribution_major_version is version('7', '>=')) or + ansible_distribution == 'Fedora' or + (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('15.04', '>=')) or + (ansible_distribution == 'Debian' and ansible_distribution_version is version('8', '>=')) or + ansible_os_family == 'Suse' or + ansible_distribution == 'Archlinux' \ No newline at end of file diff --git a/tests/integration/targets/systemd_info/tasks/tests.yml b/tests/integration/targets/systemd_info/tasks/tests.yml new file mode 100644 index 0000000000..06c63dbd91 --- /dev/null +++ b/tests/integration/targets/systemd_info/tasks/tests.yml @@ -0,0 +1,163 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Gather all units from shell + ansible.builtin.command: systemctl list-units --no-pager --type service,target,socket,mount,timer --all --plain --no-legend + register: all_units + +- name: Assert command run successfully + ansible.builtin.assert: + that: + - all_units.rc == 0 + +- name: Gather all units + community.general.systemd_info: + register: units_all + +- name: Check all units exists + ansible.builtin.assert: + that: + - units_all is defined + - units_all.units | length == all_units.stdout_lines | length + success_msg: "Success: All units collected." + +- name: Build all units list + set_fact: + shell_units: "{{ all_units.stdout_lines | map('split') | list }}" + +- name: Check all units properties + ansible.builtin.assert: + that: + - units_all.units[item[0]].name == item[0] + - units_all.units[item[0]].loadstate == item[1] + - units_all.units[item[0]].activestate == item[2] + - units_all.units[item[0]].substate == item[3] + loop: "{{ shell_units }}" + loop_control: + label: "{{ item[0] }}" + +- name: Gather systemd-journald.service properties from shell + ansible.builtin.command: systemctl show systemd-journald.service -p Id,LoadState,ActiveState,SubState,FragmentPath,MainPID,ExecMainPID,UnitFileState,UnitFilePreset,Description,Restart + register: journald_prop + +- name: Assert command run successfully + ansible.builtin.assert: + that: + - journald_prop.rc == 0 + +- name: Gather systemd-journald.service + community.general.systemd_info: + unitname: + - systemd-journald.service + register: journal_unit + +- name: Check unit facts and all properties + ansible.builtin.assert: + that: + - journal_unit.units is defined + - journal_unit.units['systemd-journald.service'] is defined + - journal_unit.units['systemd-journald.service'].name is defined + - journal_unit.units['systemd-journald.service'].loadstate is defined + - journal_unit.units['systemd-journald.service'].activestate is defined + - journal_unit.units['systemd-journald.service'].substate is defined + - journal_unit.units['systemd-journald.service'].fragmentpath is defined + - journal_unit.units['systemd-journald.service'].mainpid is defined + - journal_unit.units['systemd-journald.service'].execmainpid is defined + - journal_unit.units['systemd-journald.service'].unitfilestate is defined + - journal_unit.units['systemd-journald.service'].unitfilepreset is defined + success_msg: "Success: All properties collected." + +- name: Create dict of properties from shell + ansible.builtin.set_fact: + journald_shell: "{{ dict(journald_prop.stdout_lines | map('split', '=', 1) | list) }}" + +- name: Check properties content + ansible.builtin.assert: + that: + - journal_unit.units['systemd-journald.service'].name == journald_shell.Id + - journal_unit.units['systemd-journald.service'].loadstate == journald_shell.LoadState + - journal_unit.units['systemd-journald.service'].activestate == journald_shell.ActiveState + - journal_unit.units['systemd-journald.service'].substate == journald_shell.SubState + - journal_unit.units['systemd-journald.service'].fragmentpath == journald_shell.FragmentPath + - journal_unit.units['systemd-journald.service'].mainpid == journald_shell.MainPID + - journal_unit.units['systemd-journald.service'].execmainpid == journald_shell.ExecMainPID + - journal_unit.units['systemd-journald.service'].unitfilestate == journald_shell.UnitFileState + - journal_unit.units['systemd-journald.service'].unitfilepreset == journald_shell.UnitFilePreset + success_msg: "Success: Property values are correct." + +- name: Gather systemd-journald.service extra properties + community.general.systemd_info: + unitname: + - systemd-journald.service + extra_properties: + - Description + - Restart + register: journal_extra + +- name: Check new properties + ansible.builtin.assert: + that: + - journal_extra.units is defined + - journal_extra.units['systemd-journald.service'] is defined + - journal_extra.units['systemd-journald.service'].description is defined + - journal_extra.units['systemd-journald.service'].restart is defined + - journal_extra.units['systemd-journald.service'].description == journald_shell.Description + - journal_extra.units['systemd-journald.service'].restart == journald_shell.Restart + success_msg: "Success: Extra property values are correct." + +- name: Gather info using wildcard pattern for services + community.general.systemd_info: + unitname: + - '*.service' + extra_properties: + - Description + register: result_wildcards + +- name: Assert that at least one service unit was returned + ansible.builtin.assert: + that: + - result_wildcards.units | length > 0 + +- name: Gather info using multiple wildcard patterns + community.general.systemd_info: + unitname: + - '*.service' + - 'ssh*' + register: result_multi + +- name: Debug multi-wildcard results + ansible.builtin.debug: + var: result_multi.units + +- name: Assert deduplication of units + ansible.builtin.assert: + that: + - unique_keys | length == all_keys | length + vars: + all_keys: "{{ result_multi.units | dict2items | map(attribute='key') | list }}" + unique_keys: "{{ all_keys | unique }}" + +- name: Gather info of systemd-tmpfiles-clean.timer and extra AccuracyUSec + community.general.systemd_info: + unitname: + - systemd-tmpfiles-clean.timer + extra_properties: + - AccuracyUSec + register: result_timer + +- name: Check timer unit properties + ansible.builtin.assert: + that: + - result_timer.units is defined + - result_timer.units['systemd-tmpfiles-clean.timer'] is defined + - result_timer.units['systemd-tmpfiles-clean.timer'].name is defined + - result_timer.units['systemd-tmpfiles-clean.timer'].loadstate is defined + - result_timer.units['systemd-tmpfiles-clean.timer'].activestate is defined + - result_timer.units['systemd-tmpfiles-clean.timer'].substate is defined + - result_timer.units['systemd-tmpfiles-clean.timer'].fragmentpath is defined + - result_timer.units['systemd-tmpfiles-clean.timer'].unitfilestate is defined + - result_timer.units['systemd-tmpfiles-clean.timer'].unitfilepreset is defined + - result_timer.units['systemd-tmpfiles-clean.timer'].accuracyusec is defined + - result_timer.units['systemd-tmpfiles-clean.timer'].accuracyusec | length > 0 + success_msg: "Success: All properties collected." \ No newline at end of file diff --git a/tests/integration/targets/terraform/tasks/complex_variables.yml b/tests/integration/targets/terraform/tasks/complex_variables.yml index 9788a3eed1..81c708e34d 100644 --- a/tests/integration/targets/terraform/tasks/complex_variables.yml +++ b/tests/integration/targets/terraform/tasks/complex_variables.yml @@ -7,7 +7,7 @@ ansible.builtin.file: path: "{{ terraform_project_dir }}/complex_vars" state: directory - mode: 0755 + mode: "0755" - name: copy terraform files to work space ansible.builtin.copy: @@ -49,10 +49,10 @@ one two list_of_lists: - - [ 1 ] - - [ 11, 12, 13 ] - - [ 2 ] - - [ 3 ] + - [1] + - [11, 12, 13] + - [2] + - [3] state: present register: terraform_init_result diff --git a/tests/integration/targets/terraform/tasks/main.yml b/tests/integration/targets/terraform/tasks/main.yml index d04757d8e4..513c3ef225 100644 --- a/tests/integration/targets/terraform/tasks/main.yml +++ b/tests/integration/targets/terraform/tasks/main.yml @@ -8,20 +8,20 @@ - name: Check for existing Terraform in path block: - - name: Check if terraform is present in path - ansible.builtin.command: "command -v terraform" - register: terraform_binary_path - ignore_errors: true + - name: Check if terraform is present in path + ansible.builtin.command: "command -v terraform" + register: terraform_binary_path + ignore_errors: true - - name: Check Terraform version - ansible.builtin.command: terraform version - register: terraform_version_output - when: terraform_binary_path.rc == 0 + - name: Check Terraform version + ansible.builtin.command: terraform version + register: terraform_version_output + when: terraform_binary_path.rc == 0 - - name: Set terraform version - ansible.builtin.set_fact: - terraform_version_installed: "{{ terraform_version_output.stdout | regex_search('(?!Terraform.*v)([0-9]+\\.[0-9]+\\.[0-9]+)') }}" - when: terraform_version_output.changed + - name: Set terraform version + ansible.builtin.set_fact: + terraform_version_installed: "{{ terraform_version_output.stdout | regex_search('(?!Terraform.*v)([0-9]+\\.[0-9]+\\.[0-9]+)') }}" + when: terraform_version_output.changed # This block handles the tasks of installing the Terraform binary. This happens if there is no existing # terraform in $PATH OR version does not match `terraform_version`. @@ -29,22 +29,22 @@ - name: Execute Terraform install tasks block: - - name: Install Terraform - ansible.builtin.debug: - msg: "Installing terraform {{ terraform_version }}, found: {{ terraform_version_installed | default('no terraform binary found') }}." + - name: Install Terraform + ansible.builtin.debug: + msg: "Installing terraform {{ terraform_version }}, found: {{ terraform_version_installed | default('no terraform binary found') }}." - - name: Ensure unzip is present - ansible.builtin.package: - name: unzip - state: present + - name: Ensure unzip is present + ansible.builtin.package: + name: unzip + state: present - - name: Install Terraform binary - ansible.builtin.unarchive: - src: "{{ terraform_url }}" - dest: "{{ remote_tmp_dir }}" - mode: 0755 - remote_src: true - validate_certs: "{{ validate_certs }}" + - name: Install Terraform binary + ansible.builtin.unarchive: + src: "{{ terraform_url }}" + dest: "{{ remote_tmp_dir }}" + mode: "0755" + remote_src: true + validate_certs: "{{ validate_certs }}" when: terraform_version_installed is not defined or terraform_version_installed != terraform_version diff --git a/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml b/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml index b20182c9f3..28d1d8ea22 100644 --- a/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml +++ b/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml @@ -7,7 +7,7 @@ file: path: "{{ terraform_project_dir }}/{{ item['name'] }}" state: directory - mode: 0755 + mode: "0755" loop: "{{ terraform_provider_versions }}" loop_control: index_var: provider_index diff --git a/tests/integration/targets/terraform/vars/main.yml b/tests/integration/targets/terraform/vars/main.yml index 1032adee4f..17032ea81f 100644 --- a/tests/integration/targets/terraform/vars/main.yml +++ b/tests/integration/targets/terraform/vars/main.yml @@ -24,11 +24,11 @@ terraform_provider_upgrade: true # list of dicts containing Terraform providers that will be tested # The null provider is a good candidate, as it's small and has no external dependencies -terraform_provider_versions: - - name: "null" +terraform_provider_versions: + - name: "null" source: "hashicorp/null" version: ">=2.0.0, < 3.0.0" - - name: "null" + - name: "null" source: "hashicorp/null" version: ">=3.0.0" diff --git a/tests/integration/targets/test_a_module/runme.yml b/tests/integration/targets/test_a_module/runme.yml index 4b7a5ec2ce..6ab0a2f7fa 100644 --- a/tests/integration/targets/test_a_module/runme.yml +++ b/tests/integration/targets/test_a_module/runme.yml @@ -39,4 +39,3 @@ - "'onyx_pfc_interface' is not community.general.a_module" # Tombstoned module - "'community.general.docker_image_facts' is not community.general.a_module" - when: ansible_version.string is version('2.10.0', '>=') diff --git a/tests/integration/targets/test_ansible_type/tasks/tasks.yml b/tests/integration/targets/test_ansible_type/tasks/tasks.yml index 261256c0d4..443b36d36f 100644 --- a/tests/integration/targets/test_ansible_type/tasks/tasks.yml +++ b/tests/integration/targets/test_ansible_type/tasks/tasks.yml @@ -10,20 +10,22 @@ that: data is community.general.ansible_type(dtype) success_msg: '"abc" is {{ dtype }}' fail_msg: '"abc" is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: data: "abc" result: '{{ data | community.general.reveal_ansible_type }}' - dtype: 'AnsibleUnicode' + dtype: + - 'AnsibleUnicode' + - '_AnsibleTaggedStr' - name: String. AnsibleUnicode alias str. assert: that: data is community.general.ansible_type(dtype, alias) success_msg: '"abc" is {{ dtype }}' fail_msg: '"abc" is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"AnsibleUnicode": "str"} + alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str"} data: "abc" result: '{{ data | community.general.reveal_ansible_type(alias) }}' dtype: 'str' @@ -33,22 +35,26 @@ that: data is community.general.ansible_type(dtype) success_msg: '["a", "b", "c"] is {{ dtype }}' fail_msg: '["a", "b", "c"] is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: data: ["a", "b", "c"] result: '{{ data | community.general.reveal_ansible_type }}' - dtype: 'list[AnsibleUnicode]' + dtype: + - 'list[AnsibleUnicode]' + - 'list[_AnsibleTaggedStr]' - name: Dictionary. All keys are AnsibleUnicode. All values are AnsibleUnicode. assert: that: data is community.general.ansible_type(dtype) success_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ dtype }}' fail_msg: '{"a": "foo", "b": "bar", "c": "baz"} is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: data: {"a": "foo", "b": "bar", "c": "baz"} result: '{{ data | community.general.reveal_ansible_type }}' - dtype: 'dict[AnsibleUnicode, AnsibleUnicode]' + dtype: + - 'dict[AnsibleUnicode, AnsibleUnicode]' + - 'dict[_AnsibleTaggedStr, _AnsibleTaggedStr]' # No substitution and no alias. Type of strings is str # ---------------------------------------------------- @@ -57,8 +63,8 @@ assert: that: '"abc" is community.general.ansible_type(dtype)' success_msg: '"abc" is {{ dtype }}' - fail_msg: '"abc" is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '"abc" is {{ result }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ "abc" | community.general.reveal_ansible_type }}' dtype: str @@ -67,8 +73,8 @@ assert: that: '123 is community.general.ansible_type(dtype)' success_msg: '123 is {{ dtype }}' - fail_msg: '123 is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '123 is {{ result }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ 123 | community.general.reveal_ansible_type }}' dtype: int @@ -77,8 +83,8 @@ assert: that: '123.45 is community.general.ansible_type(dtype)' success_msg: '123.45 is {{ dtype }}' - fail_msg: '123.45 is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '123.45 is {{ result }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ 123.45 | community.general.reveal_ansible_type }}' dtype: float @@ -87,8 +93,8 @@ assert: that: 'true is community.general.ansible_type(dtype)' success_msg: 'true is {{ dtype }}' - fail_msg: 'true is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: 'true is {{ result }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ true | community.general.reveal_ansible_type }}' dtype: bool @@ -97,8 +103,8 @@ assert: that: '["a", "b", "c"] is community.general.ansible_type(dtype)' success_msg: '["a", "b", "c"] is {{ dtype }}' - fail_msg: '["a", "b", "c"] is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '["a", "b", "c"] is {{ result }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' dtype: list[str] @@ -107,8 +113,8 @@ assert: that: '[{"a": 1}, {"b": 2}] is community.general.ansible_type(dtype)' success_msg: '[{"a": 1}, {"b": 2}] is {{ dtype }}' - fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' dtype: list[dict] @@ -117,8 +123,8 @@ assert: that: '{"a": 1} is community.general.ansible_type(dtype)' success_msg: '{"a": 1} is {{ dtype }}' - fail_msg: '{"a": 1} is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '{"a": 1} is {{ result }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' dtype: dict[str, int] @@ -127,8 +133,8 @@ assert: that: '{"a": 1, "b": 2} is community.general.ansible_type(dtype)' success_msg: '{"a": 1, "b": 2} is {{ dtype }}' - fail_msg: '{"a": 1, "b": 2} is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + fail_msg: '{"a": 1, "b": 2} is {{ result }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' dtype: dict[str, int] @@ -141,9 +147,12 @@ that: data is community.general.ansible_type(dtype, alias) success_msg: '{"1": "a", "b": "b"} is {{ dtype }}' fail_msg: '{"1": "a", "b": "b"} is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"AnsibleUnicode": "str"} + alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int data: {1: 'a', 'b': 'b'} result: '{{ data | community.general.reveal_ansible_type(alias) }}' dtype: dict[int|str, str] @@ -153,9 +162,12 @@ that: data is community.general.ansible_type(dtype, alias) success_msg: '{"1": "a", "2": "b"} is {{ dtype }}' fail_msg: '{"1": "a", "2": "b"} is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"AnsibleUnicode": "str"} + alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int data: {1: 'a', 2: 'b'} result: '{{ data | community.general.reveal_ansible_type(alias) }}' dtype: dict[int, str] @@ -165,10 +177,14 @@ that: data is community.general.ansible_type(dtype, alias) success_msg: '{"a": 1, "b": 1.1, "c": "abc", "d": true, "e": ["x", "y", "z"], "f": {"x": 1, "y": 2}} is {{ dtype }}' fail_msg: '{"a": 1, "b": 1.1, "c": "abc", "d": true, "e": ["x", "y", "z"], "f": {"x": 1, "y": 2}} is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"AnsibleUnicode": "str"} - data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} + alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float + data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': true, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} result: '{{ data | community.general.reveal_ansible_type(alias) }}' dtype: dict[str, bool|dict|float|int|list|str] @@ -177,10 +193,14 @@ that: data is community.general.ansible_type(dtype, alias) success_msg: '[1, 2, 1.1, "abc", true, ["x", "y", "z"], {"x": 1, "y": 2}] is {{ dtype }}' fail_msg: '[1, 2, 1.1, "abc", true, ["x", "y", "z"], {"x": 1, "y": 2}] is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"AnsibleUnicode": "str"} - data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}] + alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float + data: [1, 2, 1.1, 'abc', true, ['x', 'y', 'z'], {'x': 1, 'y': 2}] result: '{{ data | community.general.reveal_ansible_type(alias) }}' dtype: list[bool|dict|float|int|list|str] @@ -192,33 +212,44 @@ that: data is community.general.ansible_type(dtype) success_msg: '"abc" is {{ dtype }}' fail_msg: '"abc" is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: data: abc result: '{{ data | community.general.reveal_ansible_type }}' - dtype: ['AnsibleUnicode', 'str'] + dtype: + - 'AnsibleUnicode' + - '_AnsibleTaggedStr' + - 'str' - name: float or int assert: that: data is community.general.ansible_type(dtype) success_msg: '123 is {{ dtype }}' fail_msg: '123 is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: data: 123 result: '{{ data | community.general.reveal_ansible_type }}' - dtype: ['float', 'int'] + dtype: + - 'float' + - 'int' + - '_AnsibleTaggedInt' + - '_AnsibleTaggedFloat' - name: float or int assert: that: data is community.general.ansible_type(dtype) success_msg: '123.45 is {{ dtype }}' fail_msg: '123.45 is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: data: 123.45 result: '{{ data | community.general.reveal_ansible_type }}' - dtype: ['float', 'int'] + dtype: + - 'float' + - 'int' + - '_AnsibleTaggedInt' + - '_AnsibleTaggedFloat' # Multiple alias # -------------- @@ -228,9 +259,13 @@ that: data is community.general.ansible_type(dtype, alias) success_msg: '123 is {{ dtype }}' fail_msg: '123 is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"int": "number", "float": "number"} + alias: + int: number + float: number + _AnsibleTaggedInt: number + _AnsibleTaggedFloat: number data: 123 result: '{{ data | community.general.reveal_ansible_type(alias) }}' dtype: number @@ -240,9 +275,13 @@ that: data is community.general.ansible_type(dtype, alias) success_msg: '123.45 is {{ dtype }}' fail_msg: '123.45 is {{ result }}' - quiet: '{{ quiet_test | d(true) | bool }}' + quiet: '{{ quiet_test | default(true) | bool }}' vars: - alias: {"int": "number", "float": "number"} + alias: + int: number + float: number + _AnsibleTaggedInt: number + _AnsibleTaggedFloat: number data: 123.45 result: '{{ data | community.general.reveal_ansible_type(alias) }}' dtype: number diff --git a/tests/integration/targets/test_fqdn_valid/tasks/fqdn_valid_1.yml b/tests/integration/targets/test_fqdn_valid/tasks/fqdn_valid_1.yml index 36cfffad0c..3a839a0e6a 100644 --- a/tests/integration/targets/test_fqdn_valid/tasks/fqdn_valid_1.yml +++ b/tests/integration/targets/test_fqdn_valid/tasks/fqdn_valid_1.yml @@ -6,7 +6,7 @@ - name: Debug ansible_version ansible.builtin.debug: var: ansible_version - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool tags: t0 - name: 1. Test valid hostnames. Default options. @@ -14,7 +14,7 @@ - name: "1. Default min_labels=1, allow_underscores=False" ansible.builtin.debug: msg: "hosts_invalid: {{ hosts_invalid }}" - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Assert ansible.builtin.assert: that: hosts_invalid|difference(result)|length == 0 @@ -29,7 +29,7 @@ - name: "2. allow_underscores=True, default min_labels=1" ansible.builtin.debug: msg: "hosts_invalid: {{ hosts_invalid }}" - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Assert ansible.builtin.assert: that: hosts_invalid|difference(result)|length == 0 @@ -45,7 +45,7 @@ - name: "3. allow_underscores=True, min_labels=2" ansible.builtin.debug: msg: "hosts_invalid: {{ hosts_invalid }}" - when: debug_test|d(false)|bool + when: debug_test|default(false)|bool - name: Assert ansible.builtin.assert: that: hosts_invalid|difference(result)|length == 0 diff --git a/tests/integration/targets/timezone/tasks/test.yml b/tests/integration/targets/timezone/tasks/test.yml index 975526800e..1a6787484d 100644 --- a/tests/integration/targets/timezone/tasks/test.yml +++ b/tests/integration/targets/timezone/tasks/test.yml @@ -431,7 +431,7 @@ - hwclock_set_local_deleted_adjtime_local.changed - hwclock_set_local_deleted_adjtime_local.diff.after.hwclock == 'local' - hwclock_set_local_deleted_adjtime_local.diff.before.hwclock == 'UTC' - + ## ## test set hwclock with conf file deleted diff --git a/tests/integration/targets/ufw/aliases b/tests/integration/targets/ufw/aliases index b1dbfd2eb1..07227edc3f 100644 --- a/tests/integration/targets/ufw/aliases +++ b/tests/integration/targets/ufw/aliases @@ -14,6 +14,9 @@ skip/rhel9.1 # FIXME skip/rhel9.2 # FIXME skip/rhel9.3 # FIXME skip/rhel9.4 # FIXME +skip/rhel9.5 # FIXME +skip/rhel9.6 # FIXME +skip/rhel10.0 # FIXME skip/docker needs/root needs/target/setup_epel diff --git a/tests/integration/targets/ufw/tasks/main.yml b/tests/integration/targets/ufw/tasks/main.yml index 5fba2fa4d7..83e6a5138a 100644 --- a/tests/integration/targets/ufw/tasks/main.yml +++ b/tests/integration/targets/ufw/tasks/main.yml @@ -27,19 +27,19 @@ # Run the tests - block: - - include_tasks: run-test.yml - with_fileglob: - - "tests/*.yml" + - include_tasks: run-test.yml + with_fileglob: + - "tests/*.yml" become: true # Cleanup always: - - pause: - # ufw creates backups of the rule files with a timestamp; if reset is called - # twice in a row fast enough (so that both timestamps are taken in the same second), - # the second call will notice that the backup files are already there and fail. - # Waiting one second fixes this problem. - seconds: 1 - - name: Reset ufw to factory defaults and disable - ufw: - state: reset + - pause: + # ufw creates backups of the rule files with a timestamp; if reset is called + # twice in a row fast enough (so that both timestamps are taken in the same second), + # the second call will notice that the backup files are already there and fail. + # Waiting one second fixes this problem. + seconds: 1 + - name: Reset ufw to factory defaults and disable + ufw: + state: reset diff --git a/tests/integration/targets/ufw/tasks/tests/basic.yml b/tests/integration/targets/ufw/tasks/tests/basic.yml index 8c179d7aed..3be130da78 100644 --- a/tests/integration/targets/ufw/tasks/tests/basic.yml +++ b/tests/integration/targets/ufw/tasks/tests/basic.yml @@ -27,10 +27,10 @@ register: enable_idem_check - assert: that: - - enable_check is changed - - enable is changed - - enable_idem is not changed - - enable_idem_check is not changed + - enable_check is changed + - enable is changed + - enable_idem is not changed + - enable_idem_check is not changed # ############################################ - name: ipv4 allow (check mode) @@ -61,10 +61,10 @@ register: ipv4_allow_idem_check - assert: that: - - ipv4_allow_check is changed - - ipv4_allow is changed - - ipv4_allow_idem is not changed - - ipv4_allow_idem_check is not changed + - ipv4_allow_check is changed + - ipv4_allow is changed + - ipv4_allow_idem is not changed + - ipv4_allow_idem_check is not changed # ############################################ - name: delete ipv4 allow (check mode) @@ -99,10 +99,10 @@ register: delete_ipv4_allow_idem_check - assert: that: - - delete_ipv4_allow_check is changed - - delete_ipv4_allow is changed - - delete_ipv4_allow_idem is not changed - - delete_ipv4_allow_idem_check is not changed + - delete_ipv4_allow_check is changed + - delete_ipv4_allow is changed + - delete_ipv4_allow_idem is not changed + - delete_ipv4_allow_idem_check is not changed # ############################################ - name: ipv6 allow (check mode) @@ -133,10 +133,10 @@ register: ipv6_allow_idem_check - assert: that: - - ipv6_allow_check is changed - - ipv6_allow is changed - - ipv6_allow_idem is not changed - - ipv6_allow_idem_check is not changed + - ipv6_allow_check is changed + - ipv6_allow is changed + - ipv6_allow_idem is not changed + - ipv6_allow_idem_check is not changed # ############################################ - name: delete ipv6 allow (check mode) @@ -171,10 +171,10 @@ register: delete_ipv6_allow_idem_check - assert: that: - - delete_ipv6_allow_check is changed - - delete_ipv6_allow is changed - - delete_ipv6_allow_idem is not changed - - delete_ipv6_allow_idem_check is not changed + - delete_ipv6_allow_check is changed + - delete_ipv6_allow is changed + - delete_ipv6_allow_idem is not changed + - delete_ipv6_allow_idem_check is not changed # ############################################ @@ -206,10 +206,10 @@ register: ipv4_allow_idem_check - assert: that: - - ipv4_allow_check is changed - - ipv4_allow is changed - - ipv4_allow_idem is not changed - - ipv4_allow_idem_check is not changed + - ipv4_allow_check is changed + - ipv4_allow is changed + - ipv4_allow_idem is not changed + - ipv4_allow_idem_check is not changed # ############################################ - name: delete ipv4 allow (check mode) @@ -244,10 +244,10 @@ register: delete_ipv4_allow_idem_check - assert: that: - - delete_ipv4_allow_check is changed - - delete_ipv4_allow is changed - - delete_ipv4_allow_idem is not changed - - delete_ipv4_allow_idem_check is not changed + - delete_ipv4_allow_check is changed + - delete_ipv4_allow is changed + - delete_ipv4_allow_idem is not changed + - delete_ipv4_allow_idem_check is not changed # ############################################ - name: ipv6 allow (check mode) @@ -278,10 +278,10 @@ register: ipv6_allow_idem_check - assert: that: - - ipv6_allow_check is changed - - ipv6_allow is changed - - ipv6_allow_idem is not changed - - ipv6_allow_idem_check is not changed + - ipv6_allow_check is changed + - ipv6_allow is changed + - ipv6_allow_idem is not changed + - ipv6_allow_idem_check is not changed # ############################################ - name: delete ipv6 allow (check mode) @@ -316,10 +316,10 @@ register: delete_ipv6_allow_idem_check - assert: that: - - delete_ipv6_allow_check is changed - - delete_ipv6_allow is changed - - delete_ipv6_allow_idem is not changed - - delete_ipv6_allow_idem_check is not changed + - delete_ipv6_allow_check is changed + - delete_ipv6_allow is changed + - delete_ipv6_allow_idem is not changed + - delete_ipv6_allow_idem_check is not changed # ############################################ - name: Reload ufw @@ -333,8 +333,8 @@ register: reload_check - assert: that: - - reload is changed - - reload_check is changed + - reload is changed + - reload_check is changed # ############################################ - name: Disable (check mode) @@ -357,10 +357,10 @@ register: disable_idem_check - assert: that: - - disable_check is changed - - disable is changed - - disable_idem is not changed - - disable_idem_check is not changed + - disable_check is changed + - disable is changed + - disable_idem is not changed + - disable_idem_check is not changed # ############################################ - name: Re-enable @@ -400,7 +400,7 @@ register: reset_idem_check - assert: that: - - reset_check is changed - - reset is changed - - reset_idem is changed - - reset_idem_check is changed + - reset_check is changed + - reset is changed + - reset_idem is changed + - reset_idem_check is changed diff --git a/tests/integration/targets/ufw/tasks/tests/global-state.yml b/tests/integration/targets/ufw/tasks/tests/global-state.yml index f5f1007510..3913a552f3 100644 --- a/tests/integration/targets/ufw/tasks/tests/global-state.yml +++ b/tests/integration/targets/ufw/tasks/tests/global-state.yml @@ -52,14 +52,14 @@ LC_ALL: C - assert: that: - - logging_check is changed - - logging is changed - - "ufw_logging.stdout == 'Logging: on (low)'" - - logging_idem is not changed - - logging_idem_check is not changed - - "ufw_logging_change.stdout == 'Logging: on (full)'" - - logging_change is changed - - logging_change_check is changed + - logging_check is changed + - logging is changed + - "ufw_logging.stdout == 'Logging: on (low)'" + - logging_idem is not changed + - logging_idem_check is not changed + - "ufw_logging_change.stdout == 'Logging: on (full)'" + - logging_change is changed + - logging_change_check is changed # ############################################ - name: Default (check mode) @@ -138,17 +138,17 @@ register: default_change_implicit_idem - assert: that: - - default_check is changed - - default is changed - - "'reject (incoming)' in ufw_defaults.stdout" - - default_idem is not changed - - default_idem_check is not changed - - default_change_check is changed - - default_change is changed - - "'allow (incoming)' in ufw_defaults_change.stdout" - - default_change_2 is changed - - default_change_implicit_check is changed - - default_change_implicit is changed - - default_change_implicit_idem_check is not changed - - default_change_implicit_idem is not changed - - "'allow (incoming)' in ufw_defaults_change_implicit.stdout" + - default_check is changed + - default is changed + - "'reject (incoming)' in ufw_defaults.stdout" + - default_idem is not changed + - default_idem_check is not changed + - default_change_check is changed + - default_change is changed + - "'allow (incoming)' in ufw_defaults_change.stdout" + - default_change_2 is changed + - default_change_implicit_check is changed + - default_change_implicit is changed + - default_change_implicit_idem_check is not changed + - default_change_implicit_idem is not changed + - "'allow (incoming)' in ufw_defaults_change_implicit.stdout" diff --git a/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml b/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml index 67328a0e3f..975600036f 100644 --- a/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml +++ b/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml @@ -71,14 +71,14 @@ register: ufw_status - assert: that: - - ufw_status.stdout_lines == expected_stdout + - ufw_status.stdout_lines == expected_stdout vars: expected_stdout: - - "0.0.0.0 10" - - "0.0.0.0 22" - - "0.0.0.0 11" - - "0.0.0.0 23" - - ":: 110" - - ":: 122" - - ":: 111" - - ":: 123" + - "0.0.0.0 10" + - "0.0.0.0 22" + - "0.0.0.0 11" + - "0.0.0.0 23" + - ":: 110" + - ":: 122" + - ":: 111" + - ":: 123" diff --git a/tests/integration/targets/wakeonlan/tasks/main.yml b/tests/integration/targets/wakeonlan/tasks/main.yml index 0597480318..22bb0706ef 100644 --- a/tests/integration/targets/wakeonlan/tasks/main.yml +++ b/tests/integration/targets/wakeonlan/tasks/main.yml @@ -28,8 +28,8 @@ - name: Check error message assert: that: - - incorrect_mac_length is failed - - incorrect_mac_length.msg is search('Incorrect MAC address length') + - incorrect_mac_length is failed + - incorrect_mac_length.msg is search('Incorrect MAC address length') - name: Provide an incorrect MAC format wakeonlan: @@ -41,8 +41,8 @@ - name: Check error message assert: that: - - incorrect_mac_format is failed - - incorrect_mac_format.msg is search('Incorrect MAC address format') + - incorrect_mac_format is failed + - incorrect_mac_format.msg is search('Incorrect MAC address format') - name: Cause a socket error wakeonlan: @@ -54,5 +54,5 @@ - name: Check error message assert: that: - - incorrect_broadcast_address is failed - - incorrect_broadcast_address.msg is search('not known|Name does not resolve') + - incorrect_broadcast_address is failed + - incorrect_broadcast_address.msg is search('not known|Name does not resolve') diff --git a/tests/integration/targets/xattr/tasks/test.yml b/tests/integration/targets/xattr/tasks/test.yml index 7fe852d77a..dfc4da60e4 100644 --- a/tests/integration/targets/xattr/tasks/test.yml +++ b/tests/integration/targets/xattr/tasks/test.yml @@ -23,11 +23,11 @@ - assert: that: - - "xattr_set_result.changed" - - "xattr_get_all_result['xattr']['user.foo'] == 'bar'" - - "not xattr_get_all_result.changed" - - "xattr_get_specific_result['xattr']['user.foo'] == 'bar'" - - "not xattr_get_specific_result.changed" + - "xattr_set_result.changed" + - "xattr_get_all_result['xattr']['user.foo'] == 'bar'" + - "not xattr_get_all_result.changed" + - "xattr_get_specific_result['xattr']['user.foo'] == 'bar'" + - "not xattr_get_specific_result.changed" - name: Set attribute again xattr: @@ -39,7 +39,7 @@ - assert: that: - - "not xattr_set_again_result.changed" + - "not xattr_set_again_result.changed" - name: Unset attribute xattr: @@ -55,9 +55,9 @@ - assert: that: - - "xattr_unset_result.changed" - - "xattr_get_after_unset_result['xattr'] == {}" - - "not xattr_get_after_unset_result.changed" + - "xattr_unset_result.changed" + - "xattr_get_after_unset_result['xattr'] == {}" + - "not xattr_get_after_unset_result.changed" - name: Unset attribute again xattr: @@ -69,4 +69,4 @@ - assert: that: - - "not xattr_set_again_result.changed" + - "not xattr_set_again_result.changed" diff --git a/tests/integration/targets/xfs_quota/tasks/gquota.yml b/tests/integration/targets/xfs_quota/tasks/gquota.yml index caca1d341d..3fca599221 100644 --- a/tests/integration/targets/xfs_quota/tasks/gquota.yml +++ b/tests/integration/targets/xfs_quota/tasks/gquota.yml @@ -12,136 +12,136 @@ dev: '{{ remote_tmp_dir }}/img-gquota' fstype: xfs - block: - - name: Mount filesystem - become: true - ansible.posix.mount: - fstab: '{{ remote_tmp_dir }}/fstab' - src: '{{ remote_tmp_dir }}/img-gquota' - path: '{{ remote_tmp_dir }}/gquota' - fstype: xfs - opts: gquota - state: mounted - - name: Apply default group limits - xfs_quota: - bsoft: '{{ gquota_default_bsoft }}' - bhard: '{{ gquota_default_bhard }}' - isoft: '{{ gquota_default_isoft }}' - ihard: '{{ gquota_default_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/gquota' - rtbsoft: '{{ gquota_default_rtbsoft }}' - rtbhard: '{{ gquota_default_rtbhard }}' - type: group - become: true - register: test_gquota_default_before - - name: Assert default group limits results - assert: - that: - - test_gquota_default_before.changed - - test_gquota_default_before.bsoft == gquota_default_bsoft|human_to_bytes - - test_gquota_default_before.bhard == gquota_default_bhard|human_to_bytes - - test_gquota_default_before.isoft == gquota_default_isoft - - test_gquota_default_before.ihard == gquota_default_ihard - - test_gquota_default_before.rtbsoft == gquota_default_rtbsoft|human_to_bytes - - test_gquota_default_before.rtbhard == gquota_default_rtbhard|human_to_bytes - - name: Apply group limits - xfs_quota: - bsoft: '{{ gquota_group_bsoft }}' - bhard: '{{ gquota_group_bhard }}' - isoft: '{{ gquota_group_isoft }}' - ihard: '{{ gquota_group_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/gquota' - name: xfsquotauser - rtbsoft: '{{ gquota_group_rtbsoft }}' - rtbhard: '{{ gquota_group_rtbhard }}' - type: group - become: true - register: test_gquota_group_before - - name: Assert group limits results for xfsquotauser - assert: - that: - - test_gquota_group_before.changed - - test_gquota_group_before.bsoft == gquota_group_bsoft|human_to_bytes - - test_gquota_group_before.bhard == gquota_group_bhard|human_to_bytes - - test_gquota_group_before.isoft == gquota_group_isoft - - test_gquota_group_before.ihard == gquota_group_ihard - - test_gquota_group_before.rtbsoft == gquota_group_rtbsoft|human_to_bytes - - test_gquota_group_before.rtbhard == gquota_group_rtbhard|human_to_bytes - - name: Re-apply default group limits - xfs_quota: - bsoft: '{{ gquota_default_bsoft }}' - bhard: '{{ gquota_default_bhard }}' - isoft: '{{ gquota_default_isoft }}' - ihard: '{{ gquota_default_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/gquota' - rtbsoft: '{{ gquota_default_rtbsoft }}' - rtbhard: '{{ gquota_default_rtbhard }}' - type: group - become: true - register: test_gquota_default_after - - name: Assert default group limits results after re-apply - assert: - that: - - not test_gquota_default_after.changed - - name: Re-apply group limits - xfs_quota: - bsoft: '{{ gquota_group_bsoft }}' - bhard: '{{ gquota_group_bhard }}' - isoft: '{{ gquota_group_isoft }}' - ihard: '{{ gquota_group_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/gquota' - name: xfsquotauser - rtbsoft: '{{ gquota_group_rtbsoft }}' - rtbhard: '{{ gquota_group_rtbhard }}' - type: group - become: true - register: test_gquota_group_after - - name: Assert group limits results for xfsquotauser after re-apply - assert: - that: - - not test_gquota_group_after.changed - - name: Reset default group limits - xfs_quota: - mountpoint: '{{ remote_tmp_dir }}/gquota' - state: absent - type: group - become: true - register: test_reset_gquota_default - - name: Assert reset of default group limits results - assert: - that: - - test_reset_gquota_default.changed - - test_reset_gquota_default.bsoft == 0 - - test_reset_gquota_default.bhard == 0 - - test_reset_gquota_default.isoft == 0 - - test_reset_gquota_default.ihard == 0 - - test_reset_gquota_default.rtbsoft == 0 - - test_reset_gquota_default.rtbhard == 0 - - name: Reset group limits for xfsquotauser - xfs_quota: - mountpoint: '{{ remote_tmp_dir }}/gquota' - name: xfsquotauser - state: absent - type: group - become: true - register: test_reset_gquota_group - - name: Assert reset of default group limits results - assert: - that: - - test_reset_gquota_group.changed - - test_reset_gquota_group.bsoft == 0 - - test_reset_gquota_group.bhard == 0 - - test_reset_gquota_group.isoft == 0 - - test_reset_gquota_group.ihard == 0 - - test_reset_gquota_group.rtbsoft == 0 - - test_reset_gquota_group.rtbhard == 0 + - name: Mount filesystem + become: true + ansible.posix.mount: + fstab: '{{ remote_tmp_dir }}/fstab' + src: '{{ remote_tmp_dir }}/img-gquota' + path: '{{ remote_tmp_dir }}/gquota' + fstype: xfs + opts: gquota + state: mounted + - name: Apply default group limits + xfs_quota: + bsoft: '{{ gquota_default_bsoft }}' + bhard: '{{ gquota_default_bhard }}' + isoft: '{{ gquota_default_isoft }}' + ihard: '{{ gquota_default_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/gquota' + rtbsoft: '{{ gquota_default_rtbsoft }}' + rtbhard: '{{ gquota_default_rtbhard }}' + type: group + become: true + register: test_gquota_default_before + - name: Assert default group limits results + assert: + that: + - test_gquota_default_before.changed + - test_gquota_default_before.bsoft == gquota_default_bsoft|human_to_bytes + - test_gquota_default_before.bhard == gquota_default_bhard|human_to_bytes + - test_gquota_default_before.isoft == gquota_default_isoft + - test_gquota_default_before.ihard == gquota_default_ihard + - test_gquota_default_before.rtbsoft == gquota_default_rtbsoft|human_to_bytes + - test_gquota_default_before.rtbhard == gquota_default_rtbhard|human_to_bytes + - name: Apply group limits + xfs_quota: + bsoft: '{{ gquota_group_bsoft }}' + bhard: '{{ gquota_group_bhard }}' + isoft: '{{ gquota_group_isoft }}' + ihard: '{{ gquota_group_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/gquota' + name: xfsquotauser + rtbsoft: '{{ gquota_group_rtbsoft }}' + rtbhard: '{{ gquota_group_rtbhard }}' + type: group + become: true + register: test_gquota_group_before + - name: Assert group limits results for xfsquotauser + assert: + that: + - test_gquota_group_before.changed + - test_gquota_group_before.bsoft == gquota_group_bsoft|human_to_bytes + - test_gquota_group_before.bhard == gquota_group_bhard|human_to_bytes + - test_gquota_group_before.isoft == gquota_group_isoft + - test_gquota_group_before.ihard == gquota_group_ihard + - test_gquota_group_before.rtbsoft == gquota_group_rtbsoft|human_to_bytes + - test_gquota_group_before.rtbhard == gquota_group_rtbhard|human_to_bytes + - name: Re-apply default group limits + xfs_quota: + bsoft: '{{ gquota_default_bsoft }}' + bhard: '{{ gquota_default_bhard }}' + isoft: '{{ gquota_default_isoft }}' + ihard: '{{ gquota_default_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/gquota' + rtbsoft: '{{ gquota_default_rtbsoft }}' + rtbhard: '{{ gquota_default_rtbhard }}' + type: group + become: true + register: test_gquota_default_after + - name: Assert default group limits results after re-apply + assert: + that: + - not test_gquota_default_after.changed + - name: Re-apply group limits + xfs_quota: + bsoft: '{{ gquota_group_bsoft }}' + bhard: '{{ gquota_group_bhard }}' + isoft: '{{ gquota_group_isoft }}' + ihard: '{{ gquota_group_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/gquota' + name: xfsquotauser + rtbsoft: '{{ gquota_group_rtbsoft }}' + rtbhard: '{{ gquota_group_rtbhard }}' + type: group + become: true + register: test_gquota_group_after + - name: Assert group limits results for xfsquotauser after re-apply + assert: + that: + - not test_gquota_group_after.changed + - name: Reset default group limits + xfs_quota: + mountpoint: '{{ remote_tmp_dir }}/gquota' + state: absent + type: group + become: true + register: test_reset_gquota_default + - name: Assert reset of default group limits results + assert: + that: + - test_reset_gquota_default.changed + - test_reset_gquota_default.bsoft == 0 + - test_reset_gquota_default.bhard == 0 + - test_reset_gquota_default.isoft == 0 + - test_reset_gquota_default.ihard == 0 + - test_reset_gquota_default.rtbsoft == 0 + - test_reset_gquota_default.rtbhard == 0 + - name: Reset group limits for xfsquotauser + xfs_quota: + mountpoint: '{{ remote_tmp_dir }}/gquota' + name: xfsquotauser + state: absent + type: group + become: true + register: test_reset_gquota_group + - name: Assert reset of default group limits results + assert: + that: + - test_reset_gquota_group.changed + - test_reset_gquota_group.bsoft == 0 + - test_reset_gquota_group.bhard == 0 + - test_reset_gquota_group.isoft == 0 + - test_reset_gquota_group.ihard == 0 + - test_reset_gquota_group.rtbsoft == 0 + - test_reset_gquota_group.rtbhard == 0 always: - - name: Unmount filesystem - become: true - ansible.posix.mount: - fstab: '{{ remote_tmp_dir }}/fstab' - path: '{{ remote_tmp_dir }}/gquota' - state: unmounted - - name: Remove disk image - file: - path: '{{ remote_tmp_dir }}/img-gquota' - state: absent + - name: Unmount filesystem + become: true + ansible.posix.mount: + fstab: '{{ remote_tmp_dir }}/fstab' + path: '{{ remote_tmp_dir }}/gquota' + state: unmounted + - name: Remove disk image + file: + path: '{{ remote_tmp_dir }}/img-gquota' + state: absent diff --git a/tests/integration/targets/xfs_quota/tasks/pquota.yml b/tests/integration/targets/xfs_quota/tasks/pquota.yml index db364ffd5f..439452da24 100644 --- a/tests/integration/targets/xfs_quota/tasks/pquota.yml +++ b/tests/integration/targets/xfs_quota/tasks/pquota.yml @@ -17,8 +17,8 @@ state: touch become: true loop: - - projid - - projects + - projid + - projects - name: Add test xfs quota project id lineinfile: path: /etc/projid @@ -32,153 +32,153 @@ state: present become: true - block: - - name: Mount filesystem - become: true - ansible.posix.mount: - fstab: '{{ remote_tmp_dir }}/fstab' - src: '{{ remote_tmp_dir }}/img-pquota' - path: '{{ remote_tmp_dir }}/pquota' - fstype: xfs - opts: pquota - state: mounted - - name: Create test directory - file: - path: '{{ remote_tmp_dir }}/pquota/test' - state: directory - become: true - - name: Apply default project limits - xfs_quota: - bsoft: '{{ pquota_default_bsoft }}' - bhard: '{{ pquota_default_bhard }}' - isoft: '{{ pquota_default_isoft }}' - ihard: '{{ pquota_default_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/pquota' - rtbsoft: '{{ pquota_default_rtbsoft }}' - rtbhard: '{{ pquota_default_rtbhard }}' - type: project - become: true - register: test_pquota_default_before - - name: Assert default project limits results - assert: - that: - - test_pquota_default_before.changed - - test_pquota_default_before.bsoft == pquota_default_bsoft|human_to_bytes - - test_pquota_default_before.bhard == pquota_default_bhard|human_to_bytes - - test_pquota_default_before.isoft == pquota_default_isoft - - test_pquota_default_before.ihard == pquota_default_ihard - - test_pquota_default_before.rtbsoft == pquota_default_rtbsoft|human_to_bytes - - test_pquota_default_before.rtbhard == pquota_default_rtbhard|human_to_bytes - - name: Apply project limits - xfs_quota: - bsoft: '{{ pquota_project_bsoft }}' - bhard: '{{ pquota_project_bhard }}' - isoft: '{{ pquota_project_isoft }}' - ihard: '{{ pquota_project_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/pquota' - name: xft_quotaval - rtbsoft: '{{ pquota_project_rtbsoft }}' - rtbhard: '{{ pquota_project_rtbhard }}' - type: project - become: true - register: test_pquota_project_before - - name: Assert project limits results for xft_quotaval - assert: - that: - - test_pquota_project_before.changed - - test_pquota_project_before.bsoft == pquota_project_bsoft|human_to_bytes - - test_pquota_project_before.bhard == pquota_project_bhard|human_to_bytes - - test_pquota_project_before.isoft == pquota_project_isoft - - test_pquota_project_before.ihard == pquota_project_ihard - - test_pquota_project_before.rtbsoft == pquota_project_rtbsoft|human_to_bytes - - test_pquota_project_before.rtbhard == pquota_project_rtbhard|human_to_bytes - - name: Re-apply default project limits - xfs_quota: - bsoft: '{{ pquota_default_bsoft }}' - bhard: '{{ pquota_default_bhard }}' - isoft: '{{ pquota_default_isoft }}' - ihard: '{{ pquota_default_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/pquota' - rtbsoft: '{{ pquota_default_rtbsoft }}' - rtbhard: '{{ pquota_default_rtbhard }}' - type: project - become: true - register: test_pquota_default_after - - name: Assert default project limits results after re-apply - assert: - that: - - not test_pquota_default_after.changed - - name: Re-apply project limits - xfs_quota: - bsoft: '{{ pquota_project_bsoft }}' - bhard: '{{ pquota_project_bhard }}' - isoft: '{{ pquota_project_isoft }}' - ihard: '{{ pquota_project_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/pquota' - name: xft_quotaval - rtbsoft: '{{ pquota_project_rtbsoft }}' - rtbhard: '{{ pquota_project_rtbhard }}' - type: project - become: true - register: test_pquota_project_after - - name: Assert project limits results for xft_quotaval after re-apply - assert: - that: - - test_pquota_project_after is not changed - - name: Reset default project limits - xfs_quota: - mountpoint: '{{ remote_tmp_dir }}/pquota' - state: absent - type: project - become: true - register: test_reset_pquota_default - - name: Assert reset of default projecy limits results - assert: - that: - - test_reset_pquota_default.changed - - test_reset_pquota_default.bsoft == 0 - - test_reset_pquota_default.bhard == 0 - - test_reset_pquota_default.isoft == 0 - - test_reset_pquota_default.ihard == 0 - - test_reset_pquota_default.rtbsoft == 0 - - test_reset_pquota_default.rtbhard == 0 - - name: Reset project limits for xft_quotaval - xfs_quota: - mountpoint: '{{ remote_tmp_dir }}/pquota' - name: xft_quotaval - state: absent - type: project - become: true - register: test_reset_pquota_project - - name: Assert reset of project limits results for xft_quotaval - assert: - that: - - test_reset_pquota_project.changed - - test_reset_pquota_project.bsoft == 0 - - test_reset_pquota_project.bhard == 0 - - test_reset_pquota_project.isoft == 0 - - test_reset_pquota_project.ihard == 0 - - test_reset_pquota_project.rtbsoft == 0 - - test_reset_pquota_project.rtbhard == 0 + - name: Mount filesystem + become: true + ansible.posix.mount: + fstab: '{{ remote_tmp_dir }}/fstab' + src: '{{ remote_tmp_dir }}/img-pquota' + path: '{{ remote_tmp_dir }}/pquota' + fstype: xfs + opts: pquota + state: mounted + - name: Create test directory + file: + path: '{{ remote_tmp_dir }}/pquota/test' + state: directory + become: true + - name: Apply default project limits + xfs_quota: + bsoft: '{{ pquota_default_bsoft }}' + bhard: '{{ pquota_default_bhard }}' + isoft: '{{ pquota_default_isoft }}' + ihard: '{{ pquota_default_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/pquota' + rtbsoft: '{{ pquota_default_rtbsoft }}' + rtbhard: '{{ pquota_default_rtbhard }}' + type: project + become: true + register: test_pquota_default_before + - name: Assert default project limits results + assert: + that: + - test_pquota_default_before.changed + - test_pquota_default_before.bsoft == pquota_default_bsoft|human_to_bytes + - test_pquota_default_before.bhard == pquota_default_bhard|human_to_bytes + - test_pquota_default_before.isoft == pquota_default_isoft + - test_pquota_default_before.ihard == pquota_default_ihard + - test_pquota_default_before.rtbsoft == pquota_default_rtbsoft|human_to_bytes + - test_pquota_default_before.rtbhard == pquota_default_rtbhard|human_to_bytes + - name: Apply project limits + xfs_quota: + bsoft: '{{ pquota_project_bsoft }}' + bhard: '{{ pquota_project_bhard }}' + isoft: '{{ pquota_project_isoft }}' + ihard: '{{ pquota_project_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/pquota' + name: xft_quotaval + rtbsoft: '{{ pquota_project_rtbsoft }}' + rtbhard: '{{ pquota_project_rtbhard }}' + type: project + become: true + register: test_pquota_project_before + - name: Assert project limits results for xft_quotaval + assert: + that: + - test_pquota_project_before.changed + - test_pquota_project_before.bsoft == pquota_project_bsoft|human_to_bytes + - test_pquota_project_before.bhard == pquota_project_bhard|human_to_bytes + - test_pquota_project_before.isoft == pquota_project_isoft + - test_pquota_project_before.ihard == pquota_project_ihard + - test_pquota_project_before.rtbsoft == pquota_project_rtbsoft|human_to_bytes + - test_pquota_project_before.rtbhard == pquota_project_rtbhard|human_to_bytes + - name: Re-apply default project limits + xfs_quota: + bsoft: '{{ pquota_default_bsoft }}' + bhard: '{{ pquota_default_bhard }}' + isoft: '{{ pquota_default_isoft }}' + ihard: '{{ pquota_default_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/pquota' + rtbsoft: '{{ pquota_default_rtbsoft }}' + rtbhard: '{{ pquota_default_rtbhard }}' + type: project + become: true + register: test_pquota_default_after + - name: Assert default project limits results after re-apply + assert: + that: + - not test_pquota_default_after.changed + - name: Re-apply project limits + xfs_quota: + bsoft: '{{ pquota_project_bsoft }}' + bhard: '{{ pquota_project_bhard }}' + isoft: '{{ pquota_project_isoft }}' + ihard: '{{ pquota_project_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/pquota' + name: xft_quotaval + rtbsoft: '{{ pquota_project_rtbsoft }}' + rtbhard: '{{ pquota_project_rtbhard }}' + type: project + become: true + register: test_pquota_project_after + - name: Assert project limits results for xft_quotaval after re-apply + assert: + that: + - test_pquota_project_after is not changed + - name: Reset default project limits + xfs_quota: + mountpoint: '{{ remote_tmp_dir }}/pquota' + state: absent + type: project + become: true + register: test_reset_pquota_default + - name: Assert reset of default projecy limits results + assert: + that: + - test_reset_pquota_default.changed + - test_reset_pquota_default.bsoft == 0 + - test_reset_pquota_default.bhard == 0 + - test_reset_pquota_default.isoft == 0 + - test_reset_pquota_default.ihard == 0 + - test_reset_pquota_default.rtbsoft == 0 + - test_reset_pquota_default.rtbhard == 0 + - name: Reset project limits for xft_quotaval + xfs_quota: + mountpoint: '{{ remote_tmp_dir }}/pquota' + name: xft_quotaval + state: absent + type: project + become: true + register: test_reset_pquota_project + - name: Assert reset of project limits results for xft_quotaval + assert: + that: + - test_reset_pquota_project.changed + - test_reset_pquota_project.bsoft == 0 + - test_reset_pquota_project.bhard == 0 + - test_reset_pquota_project.isoft == 0 + - test_reset_pquota_project.ihard == 0 + - test_reset_pquota_project.rtbsoft == 0 + - test_reset_pquota_project.rtbhard == 0 always: - - name: Unmount filesystem - become: true - ansible.posix.mount: - fstab: '{{ remote_tmp_dir }}/fstab' - path: '{{ remote_tmp_dir }}/pquota' - state: unmounted - - name: Remove disk image - file: - path: '{{ remote_tmp_dir }}/img-pquota' - state: absent - - name: Remove xfs quota project id - lineinfile: - path: /etc/projid - regexp: ^xft_quotaval:99999$ - state: absent - become: true - - name: Remove xfs quota project path - lineinfile: - path: /etc/projects - regexp: ^99999:.*$ - state: absent - become: true + - name: Unmount filesystem + become: true + ansible.posix.mount: + fstab: '{{ remote_tmp_dir }}/fstab' + path: '{{ remote_tmp_dir }}/pquota' + state: unmounted + - name: Remove disk image + file: + path: '{{ remote_tmp_dir }}/img-pquota' + state: absent + - name: Remove xfs quota project id + lineinfile: + path: /etc/projid + regexp: ^xft_quotaval:99999$ + state: absent + become: true + - name: Remove xfs quota project path + lineinfile: + path: /etc/projects + regexp: ^99999:.*$ + state: absent + become: true diff --git a/tests/integration/targets/xfs_quota/tasks/uquota.yml b/tests/integration/targets/xfs_quota/tasks/uquota.yml index 36a7eff766..0fcc0b30e0 100644 --- a/tests/integration/targets/xfs_quota/tasks/uquota.yml +++ b/tests/integration/targets/xfs_quota/tasks/uquota.yml @@ -12,136 +12,136 @@ dev: '{{ remote_tmp_dir }}/img-uquota' fstype: xfs - block: - - name: Mount filesystem - become: true - ansible.posix.mount: - fstab: '{{ remote_tmp_dir }}/fstab' - src: '{{ remote_tmp_dir }}/img-uquota' - path: '{{ remote_tmp_dir }}/uquota' - fstype: xfs - opts: uquota - state: mounted - - name: Apply default user limits - xfs_quota: - bsoft: '{{ uquota_default_bsoft }}' - bhard: '{{ uquota_default_bhard }}' - isoft: '{{ uquota_default_isoft }}' - ihard: '{{ uquota_default_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/uquota' - rtbsoft: '{{ uquota_default_rtbsoft }}' - rtbhard: '{{ uquota_default_rtbhard }}' - type: user - become: true - register: test_uquota_default_before - - name: Assert default user limits results - assert: - that: - - test_uquota_default_before.changed - - test_uquota_default_before.bsoft == uquota_default_bsoft|human_to_bytes - - test_uquota_default_before.bhard == uquota_default_bhard|human_to_bytes - - test_uquota_default_before.isoft == uquota_default_isoft - - test_uquota_default_before.ihard == uquota_default_ihard - - test_uquota_default_before.rtbsoft == uquota_default_rtbsoft|human_to_bytes - - test_uquota_default_before.rtbhard == uquota_default_rtbhard|human_to_bytes - - name: Apply user limits - xfs_quota: - bsoft: '{{ uquota_user_bsoft }}' - bhard: '{{ uquota_user_bhard }}' - isoft: '{{ uquota_user_isoft }}' - ihard: '{{ uquota_user_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/uquota' - name: xfsquotauser - rtbsoft: '{{ uquota_user_rtbsoft }}' - rtbhard: '{{ uquota_user_rtbhard }}' - type: user - become: true - register: test_uquota_user_before - - name: Assert user limits results - assert: - that: - - test_uquota_user_before.changed - - test_uquota_user_before.bsoft == uquota_user_bsoft|human_to_bytes - - test_uquota_user_before.bhard == uquota_user_bhard|human_to_bytes - - test_uquota_user_before.isoft == uquota_user_isoft - - test_uquota_user_before.ihard == uquota_user_ihard - - test_uquota_user_before.rtbsoft == uquota_user_rtbsoft|human_to_bytes - - test_uquota_user_before.rtbhard == uquota_user_rtbhard|human_to_bytes - - name: Re-apply default user limits - xfs_quota: - bsoft: '{{ uquota_default_bsoft }}' - bhard: '{{ uquota_default_bhard }}' - isoft: '{{ uquota_default_isoft }}' - ihard: '{{ uquota_default_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/uquota' - rtbsoft: '{{ uquota_default_rtbsoft }}' - rtbhard: '{{ uquota_default_rtbhard }}' - type: user - become: true - register: test_uquota_default_after - - name: Assert default user limits results after re-apply - assert: - that: - - not test_uquota_default_after.changed - - name: Re-apply user limits - xfs_quota: - bsoft: '{{ uquota_user_bsoft }}' - bhard: '{{ uquota_user_bhard }}' - isoft: '{{ uquota_user_isoft }}' - ihard: '{{ uquota_user_ihard }}' - mountpoint: '{{ remote_tmp_dir }}/uquota' - name: xfsquotauser - rtbsoft: '{{ uquota_user_rtbsoft }}' - rtbhard: '{{ uquota_user_rtbhard }}' - type: user - become: true - register: test_uquota_user_after - - name: Assert user limits results for xfsquotauser after re-apply - assert: - that: - - not test_uquota_user_after.changed - - name: Reset default user limits - xfs_quota: - mountpoint: '{{ remote_tmp_dir }}/uquota' - state: absent - type: user - become: true - register: test_reset_uquota_default - - name: Assert reset of default user limits results - assert: - that: - - test_reset_uquota_default.changed - - test_reset_uquota_default.bsoft == 0 - - test_reset_uquota_default.bhard == 0 - - test_reset_uquota_default.isoft == 0 - - test_reset_uquota_default.ihard == 0 - - test_reset_uquota_default.rtbsoft == 0 - - test_reset_uquota_default.rtbhard == 0 - - name: Reset user limits for xfsquotauser - xfs_quota: - mountpoint: '{{ remote_tmp_dir }}/uquota' - name: xfsquotauser - state: absent - type: user - become: true - register: test_reset_uquota_user - - name: Assert reset of default user limits results - assert: - that: - - test_reset_uquota_user.changed - - test_reset_uquota_user.bsoft == 0 - - test_reset_uquota_user.bhard == 0 - - test_reset_uquota_user.isoft == 0 - - test_reset_uquota_user.ihard == 0 - - test_reset_uquota_user.rtbsoft == 0 - - test_reset_uquota_user.rtbhard == 0 + - name: Mount filesystem + become: true + ansible.posix.mount: + fstab: '{{ remote_tmp_dir }}/fstab' + src: '{{ remote_tmp_dir }}/img-uquota' + path: '{{ remote_tmp_dir }}/uquota' + fstype: xfs + opts: uquota + state: mounted + - name: Apply default user limits + xfs_quota: + bsoft: '{{ uquota_default_bsoft }}' + bhard: '{{ uquota_default_bhard }}' + isoft: '{{ uquota_default_isoft }}' + ihard: '{{ uquota_default_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/uquota' + rtbsoft: '{{ uquota_default_rtbsoft }}' + rtbhard: '{{ uquota_default_rtbhard }}' + type: user + become: true + register: test_uquota_default_before + - name: Assert default user limits results + assert: + that: + - test_uquota_default_before.changed + - test_uquota_default_before.bsoft == uquota_default_bsoft|human_to_bytes + - test_uquota_default_before.bhard == uquota_default_bhard|human_to_bytes + - test_uquota_default_before.isoft == uquota_default_isoft + - test_uquota_default_before.ihard == uquota_default_ihard + - test_uquota_default_before.rtbsoft == uquota_default_rtbsoft|human_to_bytes + - test_uquota_default_before.rtbhard == uquota_default_rtbhard|human_to_bytes + - name: Apply user limits + xfs_quota: + bsoft: '{{ uquota_user_bsoft }}' + bhard: '{{ uquota_user_bhard }}' + isoft: '{{ uquota_user_isoft }}' + ihard: '{{ uquota_user_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/uquota' + name: xfsquotauser + rtbsoft: '{{ uquota_user_rtbsoft }}' + rtbhard: '{{ uquota_user_rtbhard }}' + type: user + become: true + register: test_uquota_user_before + - name: Assert user limits results + assert: + that: + - test_uquota_user_before.changed + - test_uquota_user_before.bsoft == uquota_user_bsoft|human_to_bytes + - test_uquota_user_before.bhard == uquota_user_bhard|human_to_bytes + - test_uquota_user_before.isoft == uquota_user_isoft + - test_uquota_user_before.ihard == uquota_user_ihard + - test_uquota_user_before.rtbsoft == uquota_user_rtbsoft|human_to_bytes + - test_uquota_user_before.rtbhard == uquota_user_rtbhard|human_to_bytes + - name: Re-apply default user limits + xfs_quota: + bsoft: '{{ uquota_default_bsoft }}' + bhard: '{{ uquota_default_bhard }}' + isoft: '{{ uquota_default_isoft }}' + ihard: '{{ uquota_default_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/uquota' + rtbsoft: '{{ uquota_default_rtbsoft }}' + rtbhard: '{{ uquota_default_rtbhard }}' + type: user + become: true + register: test_uquota_default_after + - name: Assert default user limits results after re-apply + assert: + that: + - not test_uquota_default_after.changed + - name: Re-apply user limits + xfs_quota: + bsoft: '{{ uquota_user_bsoft }}' + bhard: '{{ uquota_user_bhard }}' + isoft: '{{ uquota_user_isoft }}' + ihard: '{{ uquota_user_ihard }}' + mountpoint: '{{ remote_tmp_dir }}/uquota' + name: xfsquotauser + rtbsoft: '{{ uquota_user_rtbsoft }}' + rtbhard: '{{ uquota_user_rtbhard }}' + type: user + become: true + register: test_uquota_user_after + - name: Assert user limits results for xfsquotauser after re-apply + assert: + that: + - not test_uquota_user_after.changed + - name: Reset default user limits + xfs_quota: + mountpoint: '{{ remote_tmp_dir }}/uquota' + state: absent + type: user + become: true + register: test_reset_uquota_default + - name: Assert reset of default user limits results + assert: + that: + - test_reset_uquota_default.changed + - test_reset_uquota_default.bsoft == 0 + - test_reset_uquota_default.bhard == 0 + - test_reset_uquota_default.isoft == 0 + - test_reset_uquota_default.ihard == 0 + - test_reset_uquota_default.rtbsoft == 0 + - test_reset_uquota_default.rtbhard == 0 + - name: Reset user limits for xfsquotauser + xfs_quota: + mountpoint: '{{ remote_tmp_dir }}/uquota' + name: xfsquotauser + state: absent + type: user + become: true + register: test_reset_uquota_user + - name: Assert reset of default user limits results + assert: + that: + - test_reset_uquota_user.changed + - test_reset_uquota_user.bsoft == 0 + - test_reset_uquota_user.bhard == 0 + - test_reset_uquota_user.isoft == 0 + - test_reset_uquota_user.ihard == 0 + - test_reset_uquota_user.rtbsoft == 0 + - test_reset_uquota_user.rtbhard == 0 always: - - name: Unmount filesystem - become: true - ansible.posix.mount: - fstab: '{{ remote_tmp_dir }}/fstab' - path: '{{ remote_tmp_dir }}/uquota' - state: unmounted - - name: Remove disk image - file: - path: '{{ remote_tmp_dir }}/img-uquota' - state: absent + - name: Unmount filesystem + become: true + ansible.posix.mount: + fstab: '{{ remote_tmp_dir }}/fstab' + path: '{{ remote_tmp_dir }}/uquota' + state: unmounted + - name: Remove disk image + file: + path: '{{ remote_tmp_dir }}/img-uquota' + state: absent diff --git a/tests/integration/targets/xml/results/test-set-children-elements-value.xml b/tests/integration/targets/xml/results/test-set-children-elements-value.xml new file mode 100644 index 0000000000..53e23c80d6 --- /dev/null +++ b/tests/integration/targets/xml/results/test-set-children-elements-value.xml @@ -0,0 +1,11 @@ + + + Tasty Beverage Co. + + 25 + 10 + + +
http://tastybeverageco.com
+
+
diff --git a/tests/sanity/extra/action-group.json.license b/tests/integration/targets/xml/results/test-set-children-elements-value.xml.license similarity index 100% rename from tests/sanity/extra/action-group.json.license rename to tests/integration/targets/xml/results/test-set-children-elements-value.xml.license diff --git a/tests/integration/targets/xml/tasks/main.yml b/tests/integration/targets/xml/tasks/main.yml index 8235f1a6b6..5c2c01ed53 100644 --- a/tests/integration/targets/xml/tasks/main.yml +++ b/tests/integration/targets/xml/tasks/main.yml @@ -47,40 +47,41 @@ when: lxml_xpath_attribute_result_attrname block: - - include_tasks: test-add-children-elements.yml - - include_tasks: test-add-children-from-groupvars.yml - - include_tasks: test-add-children-insertafter.yml - - include_tasks: test-add-children-insertbefore.yml - - include_tasks: test-add-children-with-attributes.yml - - include_tasks: test-add-element-implicitly.yml - - include_tasks: test-count.yml - - include_tasks: test-mutually-exclusive-attributes.yml - - include_tasks: test-remove-attribute.yml - - include_tasks: test-remove-attribute-nochange.yml - - include_tasks: test-remove-element.yml - - include_tasks: test-remove-element-nochange.yml - - include_tasks: test-set-attribute-value.yml - - include_tasks: test-set-children-elements.yml - - include_tasks: test-set-children-elements-level.yml - - include_tasks: test-set-element-value.yml - - include_tasks: test-set-element-value-empty.yml - - include_tasks: test-pretty-print.yml - - include_tasks: test-pretty-print-only.yml - - include_tasks: test-add-namespaced-children-elements.yml - - include_tasks: test-remove-namespaced-attribute.yml - - include_tasks: test-remove-namespaced-attribute-nochange.yml - - include_tasks: test-set-namespaced-attribute-value.yml - - include_tasks: test-set-namespaced-element-value.yml - - include_tasks: test-set-namespaced-children-elements.yml - - include_tasks: test-get-element-content.yml - - include_tasks: test-xmlstring.yml - - include_tasks: test-children-elements-xml.yml + - include_tasks: test-add-children-elements.yml + - include_tasks: test-add-children-from-groupvars.yml + - include_tasks: test-add-children-insertafter.yml + - include_tasks: test-add-children-insertbefore.yml + - include_tasks: test-add-children-with-attributes.yml + - include_tasks: test-add-element-implicitly.yml + - include_tasks: test-count.yml + - include_tasks: test-mutually-exclusive-attributes.yml + - include_tasks: test-remove-attribute.yml + - include_tasks: test-remove-attribute-nochange.yml + - include_tasks: test-remove-element.yml + - include_tasks: test-remove-element-nochange.yml + - include_tasks: test-set-attribute-value.yml + - include_tasks: test-set-children-elements.yml + - include_tasks: test-set-children-elements-level.yml + - include_tasks: test-set-children-elements-value.yml + - include_tasks: test-set-element-value.yml + - include_tasks: test-set-element-value-empty.yml + - include_tasks: test-pretty-print.yml + - include_tasks: test-pretty-print-only.yml + - include_tasks: test-add-namespaced-children-elements.yml + - include_tasks: test-remove-namespaced-attribute.yml + - include_tasks: test-remove-namespaced-attribute-nochange.yml + - include_tasks: test-set-namespaced-attribute-value.yml + - include_tasks: test-set-namespaced-element-value.yml + - include_tasks: test-set-namespaced-children-elements.yml + - include_tasks: test-get-element-content.yml + - include_tasks: test-xmlstring.yml + - include_tasks: test-children-elements-xml.yml - # Unicode tests - - include_tasks: test-add-children-elements-unicode.yml - - include_tasks: test-add-children-with-attributes-unicode.yml - - include_tasks: test-set-attribute-value-unicode.yml - - include_tasks: test-count-unicode.yml - - include_tasks: test-get-element-content.yml - - include_tasks: test-set-children-elements-unicode.yml - - include_tasks: test-set-element-value-unicode.yml + # Unicode tests + - include_tasks: test-add-children-elements-unicode.yml + - include_tasks: test-add-children-with-attributes-unicode.yml + - include_tasks: test-set-attribute-value-unicode.yml + - include_tasks: test-count-unicode.yml + - include_tasks: test-get-element-content.yml + - include_tasks: test-set-children-elements-unicode.yml + - include_tasks: test-set-element-value-unicode.yml diff --git a/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml b/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml index e15ac5fd92..0b79cdafa5 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml @@ -3,34 +3,34 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Add child element - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - add_children: +- name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + add_children: - beer: Окское - register: add_children_elements_unicode + register: add_children_elements_unicode - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-add-children-elements-unicode.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-add-children-elements-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - add_children_elements_unicode is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-add-children-elements-unicode.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-add-children-elements-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-elements.yml b/tests/integration/targets/xml/tasks/test-add-children-elements.yml index 29467f6d6f..68e295cccf 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-elements.yml @@ -3,34 +3,34 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Add child element - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - add_children: +- name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + add_children: - beer: Old Rasputin - register: add_children_elements + register: add_children_elements - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-add-children-elements.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-add-children-elements.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - add_children_elements is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml b/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml index 2b232b6d0d..d65abd152a 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml @@ -3,33 +3,33 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Add child element - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - add_children: '{{ bad_beers }}' - register: add_children_from_groupvars +- name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + add_children: '{{ bad_beers }}' + register: add_children_from_groupvars - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-add-children-from-groupvars.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-add-children-from-groupvars.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - add_children_from_groupvars is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-add-children-from-groupvars.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-add-children-from-groupvars.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml b/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml index 7795c89663..4c581556e8 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml @@ -3,34 +3,34 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Add child element - xml: - path: /tmp/ansible-xml-beers.xml - xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]' - insertafter: true - add_children: +- name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]' + insertafter: true + add_children: - beer: Old Rasputin - beer: Old Motor Oil - beer: Old Curmudgeon - pretty_print: true - register: add_children_insertafter + pretty_print: true + register: add_children_insertafter - - name: Compare to expected result - copy: - src: results/test-add-children-insertafter.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-add-children-insertafter.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - add_children_insertafter is changed - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml b/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml index b14c5e06fc..6b7b325e8f 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml @@ -3,34 +3,34 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Add child element - xml: - path: /tmp/ansible-xml-beers.xml - xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]' - insertbefore: true - add_children: +- name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]' + insertbefore: true + add_children: - beer: Old Rasputin - beer: Old Motor Oil - beer: Old Curmudgeon - pretty_print: true - register: add_children_insertbefore + pretty_print: true + register: add_children_insertbefore - - name: Compare to expected result - copy: - src: results/test-add-children-insertbefore.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-add-children-insertbefore.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - add_children_insertbefore is changed - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml b/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml index 07905aa15c..b09a117e5b 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml @@ -3,36 +3,36 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Add child element - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - add_children: +- name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + add_children: - beer: name: Окское type: экстра - register: add_children_with_attributes_unicode + register: add_children_with_attributes_unicode - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-add-children-with-attributes-unicode.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-add-children-with-attributes-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - add_children_with_attributes_unicode is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-add-children-with-attributes-unicode.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-add-children-with-attributes-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml b/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml index fede24395f..3cc1b36875 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml @@ -3,40 +3,40 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Add child element - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - add_children: +- name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + add_children: - beer: name: Ansible Brew type: light - register: add_children_with_attributes + register: add_children_with_attributes - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-add-children-with-attributes.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-add-children-with-attributes.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - # NOTE: This test may fail if lxml does not support predictable element attribute order - # So we filter the failure out for these platforms (e.g. CentOS 6) - # The module still works fine, we simply are not comparing as smart as we should. - - name: Test expected result - assert: - that: +# NOTE: This test may fail if lxml does not support predictable element attribute order +# So we filter the failure out for these platforms (e.g. CentOS 6) +# The module still works fine, we simply are not comparing as smart as we should. +- name: Test expected result + assert: + that: - add_children_with_attributes is changed - comparison is not changed # identical - when: lxml_predictable_attribute_order - #command: diff -u {{ role_path }}/results/test-add-children-with-attributes.xml /tmp/ansible-xml-beers.xml + when: lxml_predictable_attribute_order + # command: diff -u {{ role_path }}/results/test-add-children-with-attributes.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml b/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml index b1718e452e..65cc19ca12 100644 --- a/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml +++ b/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml @@ -44,8 +44,8 @@ - name: Add an attribute with a value xml: - file: /tmp/ansible-xml-beers-implicit.xml - xpath: /business/owner/@dob='1976-04-12' + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/owner/@dob='1976-04-12' - name: Add an element with a value, alternate syntax xml: @@ -112,8 +112,8 @@ - name: Test expected result assert: that: - - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-add-element-implicitly.xml /tmp/ansible-xml-beers-implicit.xml + - comparison is not changed # identical + # command: diff -u {{ role_path }}/results/test-add-element-implicitly.xml /tmp/ansible-xml-beers-implicit.xml # Now we repeat the same, just to ensure proper use of namespaces @@ -205,7 +205,7 @@ value: xml tag with no special characters pretty_print: true namespaces: - a: http://example.com/some/namespace + a: http://example.com/some/namespace - name: Add an element with dash @@ -215,7 +215,7 @@ value: xml tag with dashes pretty_print: true namespaces: - a: http://example.com/some/namespace + a: http://example.com/some/namespace - name: Add an element with dot xml: @@ -224,7 +224,7 @@ value: xml tag with dashes and dots pretty_print: true namespaces: - a: http://example.com/some/namespace + a: http://example.com/some/namespace - name: Add an element with underscore xml: @@ -233,7 +233,7 @@ value: xml tag with dashes, dots and underscores pretty_print: true namespaces: - a: http://example.com/some/namespace + a: http://example.com/some/namespace - name: Pretty Print this! xml: diff --git a/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml b/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml index 2a9daab787..884fc4a917 100644 --- a/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml @@ -3,37 +3,37 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-namespaced-beers.xml - dest: /tmp/ansible-xml-namespaced-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers.xml - - name: Add namespaced child element - xml: - path: /tmp/ansible-xml-namespaced-beers.xml - xpath: /bus:business/ber:beers - namespaces: - bus: http://test.business - ber: http://test.beers - add_children: +- name: Add namespaced child element + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + xpath: /bus:business/ber:beers + namespaces: + bus: http://test.business + ber: http://test.beers + add_children: - beer: Old Rasputin - register: add_namespaced_children_elements + register: add_namespaced_children_elements - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml - - name: Compare to expected result - copy: - src: results/test-add-namespaced-children-elements.xml - dest: /tmp/ansible-xml-namespaced-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-add-namespaced-children-elements.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - add_namespaced_children_elements is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-add-namespaced-children-elements.xml /tmp/ansible-xml-namespaced-beers.xml + # command: diff -u {{ role_path }}/results/test-add-namespaced-children-elements.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-children-elements-xml.yml b/tests/integration/targets/xml/tasks/test-children-elements-xml.yml index 1c8c2b804d..630699dd56 100644 --- a/tests/integration/targets/xml/tasks/test-children-elements-xml.yml +++ b/tests/integration/targets/xml/tasks/test-children-elements-xml.yml @@ -3,35 +3,35 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Add child element with xml format - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - input_type: xml - add_children: +- name: Add child element with xml format + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + input_type: xml + add_children: - 'Old Rasputin' - register: children_elements + register: children_elements - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-add-children-elements.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-add-children-elements.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - children_elements is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-count-unicode.yml b/tests/integration/targets/xml/tasks/test-count-unicode.yml index 118e2986db..e54e466902 100644 --- a/tests/integration/targets/xml/tasks/test-count-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-count-unicode.yml @@ -3,21 +3,21 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers-unicode.xml - dest: /tmp/ansible-xml-beers-unicode.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers-unicode.xml + dest: /tmp/ansible-xml-beers-unicode.xml - - name: Count child element - xml: - path: /tmp/ansible-xml-beers-unicode.xml - xpath: /business/beers/beer - count: true - register: beers +- name: Count child element + xml: + path: /tmp/ansible-xml-beers-unicode.xml + xpath: /business/beers/beer + count: true + register: beers - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - beers is not changed - beers.count == 2 diff --git a/tests/integration/targets/xml/tasks/test-count.yml b/tests/integration/targets/xml/tasks/test-count.yml index 79be9402fe..1e4b043a02 100644 --- a/tests/integration/targets/xml/tasks/test-count.yml +++ b/tests/integration/targets/xml/tasks/test-count.yml @@ -3,21 +3,21 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Add child element - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers/beer - count: true - register: beers +- name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers/beer + count: true + register: beers - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - beers is not changed - beers.count == 3 diff --git a/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml b/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml index 475f962ebe..f57e5fa33d 100644 --- a/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml @@ -3,34 +3,34 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers-unicode.xml - dest: /tmp/ansible-xml-beers-unicode.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers-unicode.xml + dest: /tmp/ansible-xml-beers-unicode.xml - - name: Get element attributes - xml: - path: /tmp/ansible-xml-beers-unicode.xml - xpath: /business/rating - content: attribute - register: get_element_attribute +- name: Get element attributes + xml: + path: /tmp/ansible-xml-beers-unicode.xml + xpath: /business/rating + content: attribute + register: get_element_attribute - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - get_element_attribute is not changed - get_element_attribute.matches[0]['rating'] is defined and get_element_attribute.matches[0]['rating']['subjective'] == 'да' - - name: Get element text - xml: - path: /tmp/ansible-xml-beers-unicode.xml - xpath: /business/rating - content: text - register: get_element_text +- name: Get element text + xml: + path: /tmp/ansible-xml-beers-unicode.xml + xpath: /business/rating + content: text + register: get_element_text - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - get_element_text is not changed - get_element_text.matches[0]['rating'] == 'десять' diff --git a/tests/integration/targets/xml/tasks/test-get-element-content.yml b/tests/integration/targets/xml/tasks/test-get-element-content.yml index c75bdb223a..2bef5fd165 100644 --- a/tests/integration/targets/xml/tasks/test-get-element-content.yml +++ b/tests/integration/targets/xml/tasks/test-get-element-content.yml @@ -3,49 +3,49 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Get element attributes - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating - content: attribute - register: get_element_attribute +- name: Get element attributes + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + content: attribute + register: get_element_attribute - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - get_element_attribute is not changed - get_element_attribute.matches[0]['rating'] is defined - get_element_attribute.matches[0]['rating']['subjective'] == 'true' - - name: Get element attributes (should fail) - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating - content: attribute - attribute: subjective - register: get_element_attribute_wrong - ignore_errors: true +- name: Get element attributes (should fail) + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + content: attribute + attribute: subjective + register: get_element_attribute_wrong + ignore_errors: true - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - get_element_attribute_wrong is failed - - name: Get element text - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating - content: text - register: get_element_text +- name: Get element text + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + content: text + register: get_element_text - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - get_element_text is not changed - get_element_text.matches[0]['rating'] == '10' diff --git a/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml b/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml index 33f129e2e6..90bd14f7ed 100644 --- a/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml +++ b/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml @@ -3,24 +3,24 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Specify both children to add and a value - xml: - path: /tmp/ansible-xml-beers.xml - add_children: - - child01 - - child02 - value: conflict! - register: module_output - ignore_errors: true +- name: Specify both children to add and a value + xml: + path: /tmp/ansible-xml-beers.xml + add_children: + - child01 + - child02 + value: conflict! + register: module_output + ignore_errors: true - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - module_output is not changed - module_output is failed diff --git a/tests/integration/targets/xml/tasks/test-pretty-print-only.yml b/tests/integration/targets/xml/tasks/test-pretty-print-only.yml index 03d3299aa7..bd419e2313 100644 --- a/tests/integration/targets/xml/tasks/test-pretty-print-only.yml +++ b/tests/integration/targets/xml/tasks/test-pretty-print-only.yml @@ -3,31 +3,31 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml.orig +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml.orig - - name: Remove spaces from test fixture - shell: sed 's/^[ ]*//g' < /tmp/ansible-xml-beers.xml.orig > /tmp/ansible-xml-beers.xml +- name: Remove spaces from test fixture + shell: sed 's/^[ ]*//g' < /tmp/ansible-xml-beers.xml.orig > /tmp/ansible-xml-beers.xml - - name: Pretty print without modification - xml: - path: /tmp/ansible-xml-beers.xml - pretty_print: true - register: pretty_print_only +- name: Pretty print without modification + xml: + path: /tmp/ansible-xml-beers.xml + pretty_print: true + register: pretty_print_only - - name: Compare to expected result - copy: - src: results/test-pretty-print-only.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-pretty-print-only.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - pretty_print_only is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-pretty-print.yml b/tests/integration/targets/xml/tasks/test-pretty-print.yml index 51b34502d5..baa8570cf2 100644 --- a/tests/integration/targets/xml/tasks/test-pretty-print.yml +++ b/tests/integration/targets/xml/tasks/test-pretty-print.yml @@ -3,32 +3,32 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Pretty print - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - pretty_print: true - add_children: +- name: Pretty print + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + pretty_print: true + add_children: - beer: Old Rasputin - register: pretty_print + register: pretty_print - - name: Compare to expected result - copy: - src: results/test-pretty-print.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-pretty-print.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - pretty_print is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml index 3222bd4368..7e1cc73456 100644 --- a/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml @@ -3,30 +3,30 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: results/test-remove-attribute.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: results/test-remove-attribute.xml + dest: /tmp/ansible-xml-beers.xml - - name: Remove non-existing '/business/rating/@subjective' - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating/@subjective - state: absent - register: remove_attribute +- name: Remove non-existing '/business/rating/@subjective' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating/@subjective + state: absent + register: remove_attribute - - name: Compare to expected result - copy: - src: results/test-remove-attribute.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-remove-attribute.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - remove_attribute is not changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-attribute.yml b/tests/integration/targets/xml/tasks/test-remove-attribute.yml index e8952a655e..9b24a37a9f 100644 --- a/tests/integration/targets/xml/tasks/test-remove-attribute.yml +++ b/tests/integration/targets/xml/tasks/test-remove-attribute.yml @@ -3,33 +3,33 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Remove '/business/rating/@subjective' - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating/@subjective - state: absent - register: remove_attribute +- name: Remove '/business/rating/@subjective' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating/@subjective + state: absent + register: remove_attribute - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-remove-attribute.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-remove-attribute.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - remove_attribute is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml index c1312c5a75..44b1e95a3f 100644 --- a/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml @@ -3,30 +3,30 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: results/test-remove-element.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: results/test-remove-element.xml + dest: /tmp/ansible-xml-beers.xml - - name: Remove non-existing '/business/rating' - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating - state: absent - register: remove_element +- name: Remove non-existing '/business/rating' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + state: absent + register: remove_element - - name: Compare to expected result - copy: - src: results/test-remove-element.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-remove-element.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - remove_element is not changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-element.yml b/tests/integration/targets/xml/tasks/test-remove-element.yml index bea376ba93..c2fc081304 100644 --- a/tests/integration/targets/xml/tasks/test-remove-element.yml +++ b/tests/integration/targets/xml/tasks/test-remove-element.yml @@ -3,33 +3,33 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Remove '/business/rating' - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating - state: absent - register: remove_element +- name: Remove '/business/rating' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + state: absent + register: remove_element - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-remove-element.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-remove-element.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - remove_element is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml index 61b7179ba0..97855ce77b 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml @@ -3,35 +3,35 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: results/test-remove-namespaced-attribute.xml - dest: /tmp/ansible-xml-namespaced-beers.xml +- name: Setup test fixture + copy: + src: results/test-remove-namespaced-attribute.xml + dest: /tmp/ansible-xml-namespaced-beers.xml - - name: Remove non-existing namespaced '/bus:business/rat:rating/@attr:subjective' - xml: - path: /tmp/ansible-xml-namespaced-beers.xml - xpath: /bus:business/rat:rating/@attr:subjective - namespaces: - bus: http://test.business - ber: http://test.beers - rat: http://test.rating - attr: http://test.attribute - state: absent - register: remove_namespaced_attribute +- name: Remove non-existing namespaced '/bus:business/rat:rating/@attr:subjective' + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + xpath: /bus:business/rat:rating/@attr:subjective + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + state: absent + register: remove_namespaced_attribute - - name: Compare to expected result - copy: - src: results/test-remove-namespaced-attribute.xml - dest: /tmp/ansible-xml-namespaced-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-remove-namespaced-attribute.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - remove_namespaced_attribute is not changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml + # command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml index a725ee79cf..45e37d41d4 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml @@ -3,38 +3,38 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-namespaced-beers.xml - dest: /tmp/ansible-xml-namespaced-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers.xml - - name: Remove namespaced '/bus:business/rat:rating/@attr:subjective' - xml: - path: /tmp/ansible-xml-namespaced-beers.xml - xpath: /bus:business/rat:rating/@attr:subjective - namespaces: - bus: http://test.business - ber: http://test.beers - rat: http://test.rating - attr: http://test.attribute - state: absent - register: remove_namespaced_attribute +- name: Remove namespaced '/bus:business/rat:rating/@attr:subjective' + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + xpath: /bus:business/rat:rating/@attr:subjective + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + state: absent + register: remove_namespaced_attribute - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml - - name: Compare to expected result - copy: - src: results/test-remove-namespaced-attribute.xml - dest: /tmp/ansible-xml-namespaced-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-remove-namespaced-attribute.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - remove_namespaced_attribute is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml + # command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml index fd83c54c32..bda8643643 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml @@ -3,35 +3,35 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: results/test-remove-element.xml - dest: /tmp/ansible-xml-namespaced-beers.xml +- name: Setup test fixture + copy: + src: results/test-remove-element.xml + dest: /tmp/ansible-xml-namespaced-beers.xml - - name: Remove non-existing namespaced '/bus:business/rat:rating' - xml: - path: /tmp/ansible-xml-namespaced-beers.xml - xpath: /bus:business/rat:rating - namespaces: - bus: http://test.business - ber: http://test.beers - rat: http://test.rating - attr: http://test.attribute - state: absent - register: remove_namespaced_element +- name: Remove non-existing namespaced '/bus:business/rat:rating' + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + xpath: /bus:business/rat:rating + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + state: absent + register: remove_namespaced_element - - name: Compare to expected result - copy: - src: results/test-remove-element.xml - dest: /tmp/ansible-xml-namespaced-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-remove-element.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - remove_namespaced_element is not changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml + # command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml index c4129f33e2..4bbab437ca 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml @@ -3,38 +3,38 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-namespaced-beers.xml - dest: /tmp/ansible-xml-namespaced-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers.xml - - name: Remove namespaced '/bus:business/rat:rating' - xml: - path: /tmp/ansible-xml-namespaced-beers.xml - xpath: /bus:business/rat:rating - namespaces: - bus: http://test.business - ber: http://test.beers - rat: http://test.rating - attr: http://test.attribute - state: absent - register: remove_namespaced_element +- name: Remove namespaced '/bus:business/rat:rating' + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + xpath: /bus:business/rat:rating + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + state: absent + register: remove_namespaced_element - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml - - name: Compare to expected result - copy: - src: results/test-remove-element.xml - dest: /tmp/ansible-xml-namespaced-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-remove-element.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - remove_namespaced_element is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml + # command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml b/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml index bf35bfdd95..a64442a215 100644 --- a/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml @@ -3,34 +3,34 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Set '/business/rating/@subjective' to 'нет' - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating - attribute: subjective - value: нет - register: set_attribute_value_unicode +- name: Set '/business/rating/@subjective' to 'нет' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + attribute: subjective + value: нет + register: set_attribute_value_unicode - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-set-attribute-value-unicode.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-attribute-value-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_attribute_value_unicode is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-attribute-value-unicode.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-set-attribute-value-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-attribute-value.yml b/tests/integration/targets/xml/tasks/test-set-attribute-value.yml index 2908e00aa3..a5229a61f5 100644 --- a/tests/integration/targets/xml/tasks/test-set-attribute-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-attribute-value.yml @@ -3,34 +3,34 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Set '/business/rating/@subjective' to 'false' - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating - attribute: subjective - value: 'false' - register: set_attribute_value +- name: Set '/business/rating/@subjective' to 'false' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + attribute: subjective + value: 'false' + register: set_attribute_value - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-set-attribute-value.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-attribute-value.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_attribute_value is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-attribute-value.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-set-attribute-value.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml b/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml index 648f5b25af..48d0de64a3 100644 --- a/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml +++ b/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml @@ -3,79 +3,79 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Set child elements - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - set_children: &children - - beer: - alcohol: "0.5" - name: 90 Minute IPA - _: - - Water: - liter: "0.2" - quantity: 200g - - Starch: - quantity: 10g - - Hops: - quantity: 50g - - Yeast: - quantity: 20g - - beer: - alcohol: "0.3" - name: Harvest Pumpkin Ale - _: - - Water: - liter: "0.2" - quantity: 200g - - Hops: - quantity: 25g - - Yeast: - quantity: 20g - register: set_children_elements_level +- name: Set child elements + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: &children + - beer: + alcohol: "0.5" + name: 90 Minute IPA + _: + - Water: + liter: "0.2" + quantity: 200g + - Starch: + quantity: 10g + - Hops: + quantity: 50g + - Yeast: + quantity: 20g + - beer: + alcohol: "0.3" + name: Harvest Pumpkin Ale + _: + - Water: + liter: "0.2" + quantity: 200g + - Hops: + quantity: 25g + - Yeast: + quantity: 20g + register: set_children_elements_level - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-set-children-elements-level.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-children-elements-level.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_children_elements_level is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-children-elements-level.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-set-children-elements-level.xml /tmp/ansible-xml-beers.xml - - name: Set child elements (again) - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - set_children: *children - register: set_children_again +- name: Set child elements (again) + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: *children + register: set_children_again - - name: Compare to expected result - copy: - src: results/test-set-children-elements-level.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-children-elements-level.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_children_again is not changed - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml b/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml index 8c4fc10941..f890c01bc4 100644 --- a/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml @@ -3,51 +3,51 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Set child elements - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - set_children: &children +- name: Set child elements + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: &children - beer: Окское - beer: Невское - register: set_children_elements_unicode + register: set_children_elements_unicode - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-set-children-elements-unicode.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-children-elements-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_children_elements_unicode is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-set-children-elements-unicode.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-children-elements-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_children_again is not changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements-value.yml b/tests/integration/targets/xml/tasks/test-set-children-elements-value.yml new file mode 100644 index 0000000000..17ed24d283 --- /dev/null +++ b/tests/integration/targets/xml/tasks/test-set-children-elements-value.yml @@ -0,0 +1,64 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + +- name: Set child elements + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: &children + - beer: + alcohol: "0.5" + name: 90 Minute IPA + +value: "2" + - beer: + alcohol: "0.3" + name: Harvest Pumpkin Ale + +value: "5" + register: set_children_elements_value + +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml + +- name: Compare to expected result + copy: + src: results/test-set-children-elements-value.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison + +- name: Test expected result + assert: + that: + - set_children_elements_value is changed + - comparison is not changed # identical + + +- name: Set child elements (again) + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: *children + register: set_children_again + +- name: Compare to expected result + copy: + src: results/test-set-children-elements-value.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison + +- name: Test expected result + assert: + that: + - set_children_again is not changed + - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements.yml b/tests/integration/targets/xml/tasks/test-set-children-elements.yml index ed9e4a54ee..d2987e83f4 100644 --- a/tests/integration/targets/xml/tasks/test-set-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-set-children-elements.yml @@ -3,84 +3,84 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Set child elements - empty list - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - set_children: [] - register: set_children_elements +- name: Set child elements - empty list + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: [] + register: set_children_elements - - name: Compare to expected result - copy: - src: results/test-set-children-elements-empty-list.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-children-elements-empty-list.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_children_elements is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Set child elements - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - set_children: &children - - beer: 90 Minute IPA - - beer: Harvest Pumpkin Ale - register: set_children_elements +- name: Set child elements + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: &children + - beer: 90 Minute IPA + - beer: Harvest Pumpkin Ale + register: set_children_elements - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-set-children-elements.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-children-elements.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_children_elements is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml - - name: Set child elements (again) - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/beers - set_children: *children - register: set_children_again +- name: Set child elements (again) + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: *children + register: set_children_again - - name: Compare to expected result - copy: - src: results/test-set-children-elements.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-children-elements.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_children_again is not changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml b/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml index 4041bf9106..3c5212db45 100644 --- a/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml +++ b/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml @@ -3,33 +3,33 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Set '/business/website/address' to empty string. - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/website/address - value: '' - register: set_element_value_empty +- name: Set '/business/website/address' to empty string. + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/website/address + value: '' + register: set_element_value_empty - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-set-element-value-empty.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-element-value-empty.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_element_value_empty is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-element-value-empty.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-set-element-value-empty.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml b/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml index 616f26ddc8..7dec91b920 100644 --- a/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml @@ -3,48 +3,48 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Add 2nd '/business/rating' with value 'пять' - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business - add_children: +- name: Add 2nd '/business/rating' with value 'пять' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business + add_children: - rating: пять - - name: Set '/business/rating' to 'пять' - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating - value: пять - register: set_element_first_run +- name: Set '/business/rating' to 'пять' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + value: пять + register: set_element_first_run - - name: Set '/business/rating' to 'false'... again - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating - value: пять - register: set_element_second_run +- name: Set '/business/rating' to 'false'... again + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + value: пять + register: set_element_second_run - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-set-element-value-unicode.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-element-value-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_element_first_run is changed - set_element_second_run is not changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-element-value-unicode.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-set-element-value-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-element-value.yml b/tests/integration/targets/xml/tasks/test-set-element-value.yml index b563b25766..83b0840bac 100644 --- a/tests/integration/targets/xml/tasks/test-set-element-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-element-value.yml @@ -3,48 +3,48 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-beers.xml - dest: /tmp/ansible-xml-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml - - name: Add 2nd '/business/rating' with value '5' - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business - add_children: +- name: Add 2nd '/business/rating' with value '5' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business + add_children: - rating: '5' - - name: Set '/business/rating' to '5' - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating - value: '5' - register: set_element_first_run +- name: Set '/business/rating' to '5' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + value: '5' + register: set_element_first_run - - name: Set '/business/rating' to '5'... again - xml: - path: /tmp/ansible-xml-beers.xml - xpath: /business/rating - value: '5' - register: set_element_second_run +- name: Set '/business/rating' to '5'... again + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + value: '5' + register: set_element_second_run - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-beers.xml - - name: Compare to expected result - copy: - src: results/test-set-element-value.xml - dest: /tmp/ansible-xml-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-element-value.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_element_first_run is changed - set_element_second_run is not changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-element-value.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-set-element-value.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml index 7c1bbd2376..0c1992730d 100644 --- a/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml @@ -3,39 +3,39 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-namespaced-beers.xml - dest: /tmp/ansible-xml-namespaced-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers.xml - - name: Set namespaced '/bus:business/rat:rating/@attr:subjective' to 'false' - xml: - path: /tmp/ansible-xml-namespaced-beers.xml - xpath: /bus:business/rat:rating - namespaces: - bus: http://test.business - ber: http://test.beers - rat: http://test.rating - attr: http://test.attribute - attribute: attr:subjective - value: 'false' - register: set_namespaced_attribute_value +- name: Set namespaced '/bus:business/rat:rating/@attr:subjective' to 'false' + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + xpath: /bus:business/rat:rating + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + attribute: attr:subjective + value: 'false' + register: set_namespaced_attribute_value - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml - - name: Compare to expected result - copy: - src: results/test-set-namespaced-attribute-value.xml - dest: /tmp/ansible-xml-namespaced-beers.xml - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + src: results/test-set-namespaced-attribute-value.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_namespaced_attribute_value is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-set-namespaced-attribute-value.xml /tmp/ansible-xml-namespaced-beers.xml + # command: diff -u {{ role_path }}/results/test-set-namespaced-attribute-value.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml index e6ed1bdecc..dbda409d07 100644 --- a/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml @@ -3,59 +3,59 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-namespaced-beers.xml - dest: /tmp/ansible-xml-namespaced-beers-xml.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers-xml.xml - - name: Set child elements - xml: - path: /tmp/ansible-xml-namespaced-beers-xml.xml - xpath: /bus:business/ber:beers - namespaces: - bus: http://test.business - ber: http://test.beers - set_children: +- name: Set child elements + xml: + path: /tmp/ansible-xml-namespaced-beers-xml.xml + xpath: /bus:business/ber:beers + namespaces: + bus: http://test.business + ber: http://test.beers + set_children: - beer: 90 Minute IPA - beer: Harvest Pumpkin Ale - - name: Copy state after first set_children - copy: - src: /tmp/ansible-xml-namespaced-beers.xml - dest: /tmp/ansible-xml-namespaced-beers-1.xml - remote_src: true +- name: Copy state after first set_children + copy: + src: /tmp/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers-1.xml + remote_src: true - - name: Set child elements again - xml: - path: /tmp/ansible-xml-namespaced-beers-xml.xml - xpath: /bus:business/ber:beers - namespaces: - bus: http://test.business - ber: http://test.beers - set_children: +- name: Set child elements again + xml: + path: /tmp/ansible-xml-namespaced-beers-xml.xml + xpath: /bus:business/ber:beers + namespaces: + bus: http://test.business + ber: http://test.beers + set_children: - beer: 90 Minute IPA - beer: Harvest Pumpkin Ale - register: set_children_again + register: set_children_again - - name: Copy state after second set_children - copy: - src: /tmp/ansible-xml-namespaced-beers.xml - dest: /tmp/ansible-xml-namespaced-beers-2.xml - remote_src: true +- name: Copy state after second set_children + copy: + src: /tmp/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers-2.xml + remote_src: true - - name: Compare to expected result - copy: - src: /tmp/ansible-xml-namespaced-beers-1.xml - dest: /tmp/ansible-xml-namespaced-beers-2.xml - remote_src: true - check_mode: true - diff: true - register: comparison - #command: diff /tmp/ansible-xml-namespaced-beers-1.xml /tmp/ansible-xml-namespaced-beers-2.xml +- name: Compare to expected result + copy: + src: /tmp/ansible-xml-namespaced-beers-1.xml + dest: /tmp/ansible-xml-namespaced-beers-2.xml + remote_src: true + check_mode: true + diff: true + register: comparison + # command: diff /tmp/ansible-xml-namespaced-beers-1.xml /tmp/ansible-xml-namespaced-beers-2.xml - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_children_again is not changed # idempotency - set_namespaced_attribute_value is changed - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml index 9944da8a55..6bdcd2e0e6 100644 --- a/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml @@ -3,51 +3,51 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Setup test fixture - copy: - src: fixtures/ansible-xml-namespaced-beers.xml - dest: /tmp/ansible-xml-namespaced-beers.xml +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers.xml - - name: Set namespaced '/bus:business/rat:rating' to '11' - xml: - path: /tmp/ansible-xml-namespaced-beers.xml - namespaces: - bus: http://test.business - ber: http://test.beers - rat: http://test.rating - attr: http://test.attribute - xpath: /bus:business/rat:rating - value: '11' - register: set_element_first_run +- name: Set namespaced '/bus:business/rat:rating' to '11' + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + xpath: /bus:business/rat:rating + value: '11' + register: set_element_first_run - - name: Set namespaced '/bus:business/rat:rating' to '11' again - xml: - path: /tmp/ansible-xml-namespaced-beers.xml - namespaces: - bus: http://test.business - ber: http://test.beers - rat: http://test.rating - attr: http://test.attribute - xpath: /bus:business/rat:rating - value: '11' - register: set_element_second_run +- name: Set namespaced '/bus:business/rat:rating' to '11' again + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + xpath: /bus:business/rat:rating + value: '11' + register: set_element_second_run - - name: Add trailing newline - shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml +- name: Add trailing newline + shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml - - name: Compare to expected result - copy: - src: results/test-set-namespaced-element-value.xml - dest: /tmp/ansible-xml-namespaced-beers.xml - check_mode: true - diff: true - register: comparison - #command: diff -u {{ role_path }}/results/test-set-namespaced-element-value.xml /tmp/ansible-xml-namespaced-beers.xml +- name: Compare to expected result + copy: + src: results/test-set-namespaced-element-value.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: true + diff: true + register: comparison + # command: diff -u {{ role_path }}/results/test-set-namespaced-element-value.xml /tmp/ansible-xml-namespaced-beers.xml - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - set_element_first_run is changed - set_element_second_run is not changed - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-xmlstring.yml b/tests/integration/targets/xml/tasks/test-xmlstring.yml index 1c2e4de4a8..c7339742b1 100644 --- a/tests/integration/targets/xml/tasks/test-xmlstring.yml +++ b/tests/integration/targets/xml/tasks/test-xmlstring.yml @@ -3,83 +3,83 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later - - name: Copy expected results to remote - copy: - src: "results/{{ item }}" - dest: "/tmp/{{ item }}" - with_items: - - test-pretty-print.xml - - test-pretty-print-only.xml +- name: Copy expected results to remote + copy: + src: "results/{{ item }}" + dest: "/tmp/{{ item }}" + with_items: + - test-pretty-print.xml + - test-pretty-print-only.xml - # NOTE: Jinja2 templating eats trailing newlines - - name: Read from xmlstring (not using pretty_print) - xml: - xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}" - xpath: . - register: xmlresponse +# NOTE: Jinja2 templating eats trailing newlines +- name: Read from xmlstring (not using pretty_print) + xml: + xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}" + xpath: . + register: xmlresponse - - name: Compare to expected result - copy: - content: "{{ xmlresponse.xmlstring }}\n" - dest: '/tmp/test-pretty-print-only.xml' - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + content: "{{ xmlresponse.xmlstring }}\n" + dest: '/tmp/test-pretty-print-only.xml' + check_mode: true + diff: true + register: comparison - - name: Test expected result - assert: - that: +- name: Test expected result + assert: + that: - xmlresponse is not changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml - # NOTE: Jinja2 templating eats trailing newlines - - name: Read from xmlstring (using pretty_print) - xml: - xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}" - pretty_print: true - register: xmlresponse +# NOTE: Jinja2 templating eats trailing newlines +- name: Read from xmlstring (using pretty_print) + xml: + xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}" + pretty_print: true + register: xmlresponse - - name: Compare to expected result - copy: - content: '{{ xmlresponse.xmlstring }}' - dest: '/tmp/test-pretty-print-only.xml' - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + content: '{{ xmlresponse.xmlstring }}' + dest: '/tmp/test-pretty-print-only.xml' + check_mode: true + diff: true + register: comparison - # FIXME: This change is related to the newline added by pretty_print - - name: Test expected result - assert: - that: +# FIXME: This change is related to the newline added by pretty_print +- name: Test expected result + assert: + that: - xmlresponse is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml - # NOTE: Jinja2 templating eats trailing newlines - - name: Read from xmlstring - xml: - xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}" - xpath: /business/beers - pretty_print: true - add_children: +# NOTE: Jinja2 templating eats trailing newlines +- name: Read from xmlstring + xml: + xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}" + xpath: /business/beers + pretty_print: true + add_children: - beer: Old Rasputin - register: xmlresponse_modification + register: xmlresponse_modification - - name: Compare to expected result - copy: - content: '{{ xmlresponse_modification.xmlstring }}' - dest: '/tmp/test-pretty-print.xml' - check_mode: true - diff: true - register: comparison +- name: Compare to expected result + copy: + content: '{{ xmlresponse_modification.xmlstring }}' + dest: '/tmp/test-pretty-print.xml' + check_mode: true + diff: true + register: comparison - # FIXME: This change is related to the newline added by pretty_print - - name: Test expected result - assert: - that: +# FIXME: This change is related to the newline added by pretty_print +- name: Test expected result + assert: + that: - xmlresponse_modification is changed - comparison is not changed # identical - #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml + # command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/vars/main.yml b/tests/integration/targets/xml/vars/main.yml index a8dfc23962..a7b08c3137 100644 --- a/tests/integration/targets/xml/vars/main.yml +++ b/tests/integration/targets/xml/vars/main.yml @@ -6,6 +6,6 @@ # -*- mode: yaml -* bad_beers: -- beer: "Natty Lite" -- beer: "Miller Lite" -- beer: "Coors Lite" + - beer: "Natty Lite" + - beer: "Miller Lite" + - beer: "Coors Lite" diff --git a/tests/integration/targets/yarn/tasks/run.yml b/tests/integration/targets/yarn/tasks/run.yml index 0d7d6fb421..d48eacc4d4 100644 --- a/tests/integration/targets/yarn/tasks/run.yml +++ b/tests/integration/targets/yarn/tasks/run.yml @@ -33,7 +33,7 @@ # Set vars for our test harness - vars: - #node_bin_path: "/usr/local/lib/nodejs/node-v{{nodejs_version}}/bin" + # node_bin_path: "/usr/local/lib/nodejs/node-v{{nodejs_version}}/bin" node_bin_path: "/usr/local/lib/nodejs/{{ nodejs_path }}/bin" yarn_bin_path: "{{ remote_tmp_dir }}/yarn-v{{ yarn_version }}/bin" package: 'iconv-lite' diff --git a/tests/integration/targets/zpool/aliases b/tests/integration/targets/zpool/aliases new file mode 100644 index 0000000000..083656f786 --- /dev/null +++ b/tests/integration/targets/zpool/aliases @@ -0,0 +1,15 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/3 +azp/posix/vm +destructive +needs/privileged +skip/aix +skip/freebsd +skip/osx +skip/macos +skip/rhel +skip/docker +skip/alpine # TODO: figure out what goes wrong diff --git a/tests/integration/targets/zpool/defaults/main.yml b/tests/integration/targets/zpool/defaults/main.yml new file mode 100644 index 0000000000..e55a02c00b --- /dev/null +++ b/tests/integration/targets/zpool/defaults/main.yml @@ -0,0 +1,34 @@ +--- +# Copyright (c) 2025, Tom Hesse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +zpool_single_disk_config: + - "{{ remote_tmp_dir }}/disk0.img" + +zpool_mirror_disk_config: + - "{{ remote_tmp_dir }}/disk1.img" + - "{{ remote_tmp_dir }}/disk2.img" + +zpool_raidz_disk_config: + - "{{ remote_tmp_dir }}/disk3.img" + - "{{ remote_tmp_dir }}/disk4.img" + +zpool_vdevs_disk_config: + vdev1: + - "{{ remote_tmp_dir }}/disk5.img" + vdev2: + - "{{ remote_tmp_dir }}/disk6.img" + vdev3: + - "{{ remote_tmp_dir }}/disk7.img" + - "{{ remote_tmp_dir }}/disk8.img" + vdev4: + - "{{ remote_tmp_dir }}/disk9.img" + - "{{ remote_tmp_dir }}/disk10.img" + +zpool_disk_configs: "{{ zpool_single_disk_config + zpool_mirror_disk_config + zpool_raidz_disk_config + (zpool_vdevs_disk_config.values() | flatten) }}" + +zpool_single_disk_pool_name: spool +zpool_mirror_disk_pool_name: mpool +zpool_raidz_disk_pool_name: rpool +zpool_generic_pool_name: tank diff --git a/tests/integration/targets/zpool/meta/main.yml b/tests/integration/targets/zpool/meta/main.yml new file mode 100644 index 0000000000..33f3a16566 --- /dev/null +++ b/tests/integration/targets/zpool/meta/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) 2025, Tom Hesse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/zpool/tasks/add_remove_vdevs.yml b/tests/integration/targets/zpool/tasks/add_remove_vdevs.yml new file mode 100644 index 0000000000..f2cd7c55b9 --- /dev/null +++ b/tests/integration/targets/zpool/tasks/add_remove_vdevs.yml @@ -0,0 +1,147 @@ +--- +# Copyright (c) 2025, Tom Hesse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Test adding a single disk vdev + block: + - name: Ensure a single disk pool exists + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + vdevs: + - disks: "{{ zpool_vdevs_disk_config.vdev1 }}" + state: present + + - name: Add a single disk vdev + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + vdevs: + - disks: "{{ zpool_vdevs_disk_config.vdev1 }}" + - disks: "{{ zpool_vdevs_disk_config.vdev2 }}" + state: present + + - name: Check if vdev was added + ansible.builtin.command: + cmd: "zpool status -P -L {{ zpool_generic_pool_name }}" + register: single_disk_pool_check + changed_when: false + + - name: Assert that added disk is present + ansible.builtin.assert: + that: + - "zpool_vdevs_disk_config.vdev2[0] in single_disk_pool_check.stdout" + + - name: Ensure the single disk pool is absent + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + state: absent + +- name: Test adding a mirror vdev + block: + - name: Ensure a single disk pool exists + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + vdevs: + - disks: "{{ zpool_vdevs_disk_config.vdev1 }}" + state: present + + - name: Add a mirror vdev + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + force: true # This is necessary because of the mismatched replication level + vdevs: + - disks: "{{ zpool_vdevs_disk_config.vdev1 }}" + - type: mirror + disks: "{{ zpool_vdevs_disk_config.vdev3 }}" + state: present + + - name: Check if vdev was added + ansible.builtin.command: + cmd: "zpool status -P -L {{ zpool_generic_pool_name }}" + register: mirror_pool_check + changed_when: false + + - name: Assert that added vdev is present + ansible.builtin.assert: + that: + - "zpool_vdevs_disk_config.vdev3[0] in mirror_pool_check.stdout" + - "zpool_vdevs_disk_config.vdev3[1] in mirror_pool_check.stdout" + + - name: Ensure the single disk pool is absent + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + state: absent + +- name: Test adding a raidz vdev + block: + - name: Ensure a single disk pool exists + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + vdevs: + - disks: "{{ zpool_vdevs_disk_config.vdev1 }}" + state: present + + - name: Add a raidz vdev + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + force: true # This is necessary because of the mismatched replication level + vdevs: + - disks: "{{ zpool_vdevs_disk_config.vdev1 }}" + - type: raidz + disks: "{{ zpool_vdevs_disk_config.vdev3 }}" + state: present + + - name: Check if vdev was added + ansible.builtin.command: + cmd: "zpool status -P -L {{ zpool_generic_pool_name }}" + register: raidz_pool_check + changed_when: false + + - name: Assert that added vdev is present + ansible.builtin.assert: + that: + - "zpool_vdevs_disk_config.vdev3[0] in raidz_pool_check.stdout" + - "zpool_vdevs_disk_config.vdev3[1] in raidz_pool_check.stdout" + + - name: Ensure the single disk pool is absent + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + state: absent + +- name: Test removing an existing vdev + block: + - name: Ensure a pool with two mirrored vdevs exists + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + vdevs: + - type: mirror + disks: "{{ zpool_vdevs_disk_config.vdev3 }}" + - type: mirror + disks: "{{ zpool_vdevs_disk_config.vdev4 }}" + state: present + + - name: Remove a vdev + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + vdevs: + - type: mirror + disks: "{{ zpool_vdevs_disk_config.vdev4 }}" + state: present + + - name: Check if vdev was removed + ansible.builtin.command: + cmd: "zpool status -P -L {{ zpool_generic_pool_name }}" + register: remove_vdev_check + changed_when: false + + - name: Assert that removed vdev is absent + ansible.builtin.assert: + that: + - "zpool_vdevs_disk_config.vdev3[0] not in remove_vdev_check.stdout" + - "zpool_vdevs_disk_config.vdev3[1] not in remove_vdev_check.stdout" + - "'Removal of vdev' in remove_vdev_check.stdout" + + - name: Ensure the pool is absent + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + state: absent diff --git a/tests/integration/targets/zpool/tasks/create_destroy.yml b/tests/integration/targets/zpool/tasks/create_destroy.yml new file mode 100644 index 0000000000..f327a4f908 --- /dev/null +++ b/tests/integration/targets/zpool/tasks/create_destroy.yml @@ -0,0 +1,123 @@ +--- +# Copyright (c) 2025, Tom Hesse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Test single disk pool creation + block: + - name: Ensure single disk pool exists + community.general.zpool: + name: "{{ zpool_single_disk_pool_name }}" + vdevs: + - disks: "{{ zpool_single_disk_config }}" + + - name: Check if single disk pool exists + ansible.builtin.command: + cmd: "zpool list -H -o name,health {{ zpool_single_disk_pool_name }}" + register: single_disk_pool_check + changed_when: false + + - name: Assert that single disk pool is online + ansible.builtin.assert: + that: + - "zpool_single_disk_pool_name in single_disk_pool_check.stdout" + - "'ONLINE' in single_disk_pool_check.stdout" + +- name: Test mirror disk pool creation + block: + - name: Ensure mirror disk pool exists + community.general.zpool: + name: "{{ zpool_mirror_disk_pool_name }}" + vdevs: + - type: mirror + disks: "{{ zpool_mirror_disk_config }}" + + - name: Check if mirror disk pool exists + ansible.builtin.command: + cmd: "zpool list -H -o name,health {{ zpool_mirror_disk_pool_name }}" + register: mirror_disk_pool_check + changed_when: false + + - name: Assert that mirror disk pool is online + ansible.builtin.assert: + that: + - "zpool_mirror_disk_pool_name in mirror_disk_pool_check.stdout" + - "'ONLINE' in mirror_disk_pool_check.stdout" + +- name: Test raidz disk pool creation + block: + - name: Ensure raidz disk pool exists + community.general.zpool: + name: "{{ zpool_raidz_disk_pool_name }}" + vdevs: + - type: raidz + disks: "{{ zpool_raidz_disk_config }}" + + - name: Check if raidz disk pool exists + ansible.builtin.command: + cmd: "zpool list -H -o name,health {{ zpool_raidz_disk_pool_name }}" + register: raidz_disk_pool_check + changed_when: false + + - name: Assert that raidz disk pool is online + ansible.builtin.assert: + that: + - "zpool_raidz_disk_pool_name in raidz_disk_pool_check.stdout" + - "'ONLINE' in raidz_disk_pool_check.stdout" + +- name: Test single disk pool deletion + block: + - name: Ensure single disk pool is absent + community.general.zpool: + name: "{{ zpool_single_disk_pool_name }}" + state: absent + + - name: Check if single disk pool is absent + ansible.builtin.command: + cmd: "zpool list -H -o name,health {{ zpool_single_disk_pool_name }}" + register: single_disk_pool_check + ignore_errors: true + changed_when: false + + - name: Assert that single disk pool is online + ansible.builtin.assert: + that: + - "'no such pool' in single_disk_pool_check.stderr" + +- name: Test mirror disk pool deletion + block: + - name: Ensure mirror disk pool is absent + community.general.zpool: + name: "{{ zpool_mirror_disk_pool_name }}" + state: absent + + - name: Check if mirror disk pool is absent + ansible.builtin.command: + cmd: "zpool list -H -o name,health {{ zpool_mirror_disk_pool_name }}" + register: mirror_disk_pool_check + ignore_errors: true + changed_when: false + + - name: Assert that mirror disk pool is online + ansible.builtin.assert: + that: + - "'no such pool' in mirror_disk_pool_check.stderr" + +- name: Test raidz disk pool deletion + block: + - name: Ensure raidz disk pool is absent + community.general.zpool: + name: "{{ zpool_raidz_disk_pool_name }}" + state: absent + + - name: Check if raidz disk pool is absent + ansible.builtin.command: + cmd: "zpool list -H -o name,health {{ zpool_raidz_disk_pool_name }}" + register: raidz_disk_pool_check + ignore_errors: true + changed_when: false + + - name: Assert that raidz disk pool is online + ansible.builtin.assert: + that: + - "'no such pool' in raidz_disk_pool_check.stderr" diff --git a/tests/integration/targets/zpool/tasks/install_requirements_alpine.yml b/tests/integration/targets/zpool/tasks/install_requirements_alpine.yml new file mode 100644 index 0000000000..a734ed4616 --- /dev/null +++ b/tests/integration/targets/zpool/tasks/install_requirements_alpine.yml @@ -0,0 +1,15 @@ +--- +# Copyright (c) 2025, Tom Hesse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Install required packages + community.general.apk: + name: + - zfs + - zfs-lts + +- name: Load zfs module + community.general.modprobe: + name: zfs + state: present diff --git a/tests/integration/targets/zpool/tasks/install_requirements_ubuntu.yml b/tests/integration/targets/zpool/tasks/install_requirements_ubuntu.yml new file mode 100644 index 0000000000..435f4752fc --- /dev/null +++ b/tests/integration/targets/zpool/tasks/install_requirements_ubuntu.yml @@ -0,0 +1,10 @@ +--- +# Copyright (c) 2025, Tom Hesse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Install required packages + ansible.builtin.apt: + name: + - zfsutils-linux + - util-linux diff --git a/tests/integration/targets/zpool/tasks/main.yml b/tests/integration/targets/zpool/tasks/main.yml new file mode 100644 index 0000000000..b5eefc2ffd --- /dev/null +++ b/tests/integration/targets/zpool/tasks/main.yml @@ -0,0 +1,25 @@ +--- +# Copyright (c) 2025, Tom Hesse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Execute integration tests + become: true + block: + - name: Ensure disk files exists + ansible.builtin.command: + cmd: "dd if=/dev/zero of={{ item }} bs=1M count=256 conv=fsync" + creates: "{{ item }}" + loop: "{{ zpool_disk_configs }}" + + - name: Include distribution specific install_requirements.yml + ansible.builtin.include_tasks: install_requirements_{{ ansible_distribution | lower }}.yml + + - name: Include create_destroy.yml + ansible.builtin.include_tasks: create_destroy.yml + + - name: Include add_remove_vdevs.yml + ansible.builtin.include_tasks: add_remove_vdevs.yml + + - name: Include properties.yml + ansible.builtin.include_tasks: properties.yml diff --git a/tests/integration/targets/zpool/tasks/properties.yml b/tests/integration/targets/zpool/tasks/properties.yml new file mode 100644 index 0000000000..ec7a8c3d7b --- /dev/null +++ b/tests/integration/targets/zpool/tasks/properties.yml @@ -0,0 +1,73 @@ +--- +# Copyright (c) 2025, Tom Hesse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Ensure pool with two mirrored disks exists + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + pool_properties: + ashift: 12 + filesystem_properties: + compression: false + vdevs: + - type: mirror + disks: "{{ zpool_vdevs_disk_config.vdev3 }}" + - type: mirror + disks: "{{ zpool_vdevs_disk_config.vdev4 }}" + state: present + +- name: Test changing of a pool property + block: + - name: Change ashift from 12 to 13 + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + pool_properties: + ashift: 13 + vdevs: + - type: mirror + disks: "{{ zpool_vdevs_disk_config.vdev3 }}" + - type: mirror + disks: "{{ zpool_vdevs_disk_config.vdev4 }}" + state: present + + - name: Check ashift + ansible.builtin.command: + cmd: "zpool get -H -o value ashift {{ zpool_generic_pool_name }}" + changed_when: false + register: ashift_check + + - name: Assert ashift has changed + ansible.builtin.assert: + that: + - "'13' in ashift_check.stdout" + +- name: Test changing of a dataset property + block: + - name: Change compression from off to lz4 + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + filesystem_properties: + compression: lz4 + vdevs: + - type: mirror + disks: "{{ zpool_vdevs_disk_config.vdev3 }}" + - type: mirror + disks: "{{ zpool_vdevs_disk_config.vdev4 }}" + state: present + + - name: Check compression + ansible.builtin.command: + cmd: "zfs get -H -o value compression {{ zpool_generic_pool_name }}" + changed_when: false + register: compression_check + + - name: Assert compression has changed + ansible.builtin.assert: + that: + - "'lz4' in compression_check.stdout" + +- name: Cleanup pool + community.general.zpool: + name: "{{ zpool_generic_pool_name }}" + state: absent diff --git a/tests/integration/targets/zypper/files/post_error.spec b/tests/integration/targets/zypper/files/post_error.spec new file mode 100644 index 0000000000..af5bc03108 --- /dev/null +++ b/tests/integration/targets/zypper/files/post_error.spec @@ -0,0 +1,15 @@ +Summary: Post error RPM +Name: post_error +Version: 1 +Release: 0 +License: GPLv3 +Group: Applications/System +BuildArch: noarch + +%description +Post error RPM + +%files + +%post +exit 1 diff --git a/tests/sanity/extra/aliases.json.license b/tests/integration/targets/zypper/files/post_error.spec.license similarity index 100% rename from tests/sanity/extra/aliases.json.license rename to tests/integration/targets/zypper/files/post_error.spec.license diff --git a/tests/integration/targets/zypper/tasks/zypper.yml b/tests/integration/targets/zypper/tasks/zypper.yml index 3eefddbdfc..ae7dc83b4a 100644 --- a/tests/integration/targets/zypper/tasks/zypper.yml +++ b/tests/integration/targets/zypper/tasks/zypper.yml @@ -31,8 +31,8 @@ - name: verify uninstallation of hello assert: that: - - "zypper_result.rc == 0" - - "rpm_result.rc == 1" + - "zypper_result.rc == 0" + - "rpm_result.rc == 1" # UNINSTALL AGAIN - name: uninstall hello again @@ -44,7 +44,7 @@ - name: verify no change on re-uninstall assert: that: - - "not zypper_result.changed" + - "not zypper_result.changed" # INSTALL - name: install hello @@ -64,9 +64,9 @@ - name: verify installation of hello assert: that: - - "zypper_result.rc == 0" - - "zypper_result.changed" - - "rpm_result.rc == 0" + - "zypper_result.rc == 0" + - "zypper_result.changed" + - "rpm_result.rc == 0" # INSTALL AGAIN - name: install hello again @@ -78,7 +78,7 @@ - name: verify no change on second install assert: that: - - "not zypper_result.changed" + - "not zypper_result.changed" # Multiple packages - name: uninstall hello and metamail @@ -102,8 +102,8 @@ - name: verify packages uninstalled assert: that: - - "rpm_hello_result.rc != 0" - - "rpm_metamail_result.rc != 0" + - "rpm_hello_result.rc != 0" + - "rpm_metamail_result.rc != 0" - name: install hello and metamail zypper: @@ -126,10 +126,10 @@ - name: verify packages installed assert: that: - - "zypper_result.rc == 0" - - "zypper_result.changed" - - "rpm_hello_result.rc == 0" - - "rpm_metamail_result.rc == 0" + - "zypper_result.rc == 0" + - "zypper_result.changed" + - "rpm_hello_result.rc == 0" + - "rpm_metamail_result.rc == 0" - name: uninstall hello and metamail zypper: @@ -190,7 +190,7 @@ state: present - name: clean zypper RPM cache - file: + file: name: /var/cache/zypper/RPMS state: absent @@ -230,9 +230,9 @@ - name: verify installation of empty assert: that: - - "zypper_result.rc == 0" - - "zypper_result.changed" - - "rpm_result.rc == 0" + - "zypper_result.rc == 0" + - "zypper_result.changed" + - "rpm_result.rc == 0" - name: uninstall empty zypper: @@ -253,8 +253,93 @@ - name: check that we extract rpm package in testdir folder and folder var is exist assert: that: - - "stat_result.stat.exists == true" + - "stat_result.stat.exists == true" +# Build and install an empty rpm with error in post script +- name: uninstall post_error + zypper: + name: post_error + state: removed + +- name: install rpmbuild + zypper: + name: rpmbuild + state: present + +- name: clean zypper RPM cache + file: + name: /var/cache/zypper/RPMS + state: absent + +- name: create directory + file: + path: "{{ remote_tmp_dir | expanduser }}/zypper2" + state: directory + +- name: copy spec file + copy: + src: post_error.spec + dest: "{{ remote_tmp_dir | expanduser }}/zypper2/post_error.spec" + +- name: build rpm + command: | + rpmbuild -bb \ + --define "_topdir {{remote_tmp_dir | expanduser }}/zypper2/rpm-build" + --define "_builddir %{_topdir}" \ + --define "_rpmdir %{_topdir}" \ + --define "_srcrpmdir %{_topdir}" \ + --define "_specdir {{remote_tmp_dir | expanduser}}/zypper2" \ + --define "_sourcedir %{_topdir}" \ + {{ remote_tmp_dir }}/zypper2/post_error.spec + register: rpm_build_result + +- name: install post_error rpm with skip_post_errors + zypper: + name: "{{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/post_error-1-0.noarch.rpm" + disable_gpg_check: true + skip_post_errors: true + register: zypper_result + +- name: check post_error rpm + shell: rpm -q post_error + failed_when: false + register: rpm_result + +- name: verify installation of post_error + assert: + that: + - "zypper_result.rc == 0" + - "zypper_result.changed" + - "rpm_result.rc == 0" + +- name: uninstall post_error + zypper: + name: post_error + state: removed + +- name: install post_error rpm without skip_post_errors + zypper: + name: "{{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/post_error-1-0.noarch.rpm" + disable_gpg_check: true + register: zypper_result + ignore_errors: true + +- name: check post_error rpm + shell: rpm -q post_error + failed_when: false + register: rpm_result + +- name: verify installation of post_error + assert: + that: + - "zypper_result.rc == 107" + - "not zypper_result.changed" + - "rpm_result.rc == 0" + +- name: uninstall post_error + zypper: + name: post_error + state: removed # test simultaneous remove and install using +- prefixes @@ -269,8 +354,8 @@ state: absent - name: install and remove in the same run, with +- prefix - zypper: - name: + zypper: + name: - -hello - +metamail state: present @@ -332,13 +417,13 @@ - name: try rm patch zypper: - name: openSUSE-2016-128 + name: openSUSE-2016-128 type: patch state: absent ignore_errors: true register: zypper_patch - assert: - that: + that: - zypper_patch is failed - zypper_patch.msg.startswith('Can not remove patches.') @@ -349,7 +434,7 @@ ignore_errors: true register: zypper_rm - assert: - that: + that: - zypper_rm is failed - zypper_rm.msg.startswith('Can not remove via URL.') @@ -365,7 +450,7 @@ type: pattern state: present register: zypper_install_pattern1 - + - name: install pattern update_test again zypper: name: update_test @@ -394,7 +479,7 @@ name: hello state: present register: zypperin2 - + - assert: that: - zypperin1 is succeeded diff --git a/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml b/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml index ec362af108..8c322421c4 100644 --- a/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml +++ b/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml @@ -218,72 +218,72 @@ # (Maybe 'Uyuni' needs to be replaced with something else?) - when: ansible_distribution_version is version('15.4', '<') block: - - name: add new repository via url to .repo file - community.general.zypper_repository: - repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo - state: present - register: added_by_repo_file + - name: add new repository via url to .repo file + community.general.zypper_repository: + repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo + state: present + register: added_by_repo_file - - name: get repository details from zypper - command: zypper lr systemsmanagement_Uyuni_Stable - register: get_repository_details_from_zypper + - name: get repository details from zypper + command: zypper lr systemsmanagement_Uyuni_Stable + register: get_repository_details_from_zypper - - name: verify adding via .repo file was successful - assert: - that: - - "added_by_repo_file is changed" - - "get_repository_details_from_zypper.rc == 0" - - "'/systemsmanagement:/Uyuni:/Stable/' in get_repository_details_from_zypper.stdout" + - name: verify adding via .repo file was successful + assert: + that: + - "added_by_repo_file is changed" + - "get_repository_details_from_zypper.rc == 0" + - "'/systemsmanagement:/Uyuni:/Stable/' in get_repository_details_from_zypper.stdout" - - name: add same repository via url to .repo file again to verify idempotency - community.general.zypper_repository: - repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo - state: present - register: added_again_by_repo_file + - name: add same repository via url to .repo file again to verify idempotency + community.general.zypper_repository: + repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo + state: present + register: added_again_by_repo_file - - name: verify nothing was changed adding a repo with the same .repo file - assert: - that: - - added_again_by_repo_file is not changed + - name: verify nothing was changed adding a repo with the same .repo file + assert: + that: + - added_again_by_repo_file is not changed - - name: remove repository via url to .repo file - community.general.zypper_repository: - repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo - state: absent - register: removed_by_repo_file + - name: remove repository via url to .repo file + community.general.zypper_repository: + repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo + state: absent + register: removed_by_repo_file - - name: get list of files in /etc/zypp/repos.d/ - command: ls /etc/zypp/repos.d/ - changed_when: false - register: etc_zypp_reposd + - name: get list of files in /etc/zypp/repos.d/ + command: ls /etc/zypp/repos.d/ + changed_when: false + register: etc_zypp_reposd - - name: verify removal via .repo file was successful, including cleanup of local .repo file in /etc/zypp/repos.d/ - assert: - that: - - "removed_by_repo_file" - - "'/systemsmanagement:/Uyuni:/Stable/' not in etc_zypp_reposd.stdout" + - name: verify removal via .repo file was successful, including cleanup of local .repo file in /etc/zypp/repos.d/ + assert: + that: + - "removed_by_repo_file" + - "'/systemsmanagement:/Uyuni:/Stable/' not in etc_zypp_reposd.stdout" # FIXME: THIS DOESN'T SEEM TO WORK ANYMORE WITH ANY OPENSUSE VERSION IN CI! - when: false block: - - name: Copy test .repo file - copy: - src: 'files/systemsmanagement_Uyuni_Utils.repo' - dest: '{{ remote_tmp_dir }}' + - name: Copy test .repo file + copy: + src: 'files/systemsmanagement_Uyuni_Utils.repo' + dest: '{{ remote_tmp_dir }}' - - name: add new repository via local path to .repo file - community.general.zypper_repository: - repo: "{{ remote_tmp_dir }}/systemsmanagement_Uyuni_Utils.repo" - state: present - register: added_by_repo_local_file + - name: add new repository via local path to .repo file + community.general.zypper_repository: + repo: "{{ remote_tmp_dir }}/systemsmanagement_Uyuni_Utils.repo" + state: present + register: added_by_repo_local_file - - name: get repository details for systemsmanagement_Uyuni_Utils from zypper - command: zypper lr systemsmanagement_Uyuni_Utils - register: get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils + - name: get repository details for systemsmanagement_Uyuni_Utils from zypper + command: zypper lr systemsmanagement_Uyuni_Utils + register: get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils - - name: verify adding repository via local .repo file was successful - assert: - that: - - "added_by_repo_local_file is changed" - - "get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.rc == 0" - - "'/systemsmanagement:/Uyuni:/Utils/' in get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.stdout" + - name: verify adding repository via local .repo file was successful + assert: + that: + - "added_by_repo_local_file is changed" + - "get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.rc == 0" + - "'/systemsmanagement:/Uyuni:/Utils/' in get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.stdout" diff --git a/tests/sanity/extra/action-group.json b/tests/sanity/extra/action-group.json deleted file mode 100644 index db6a92bcb7..0000000000 --- a/tests/sanity/extra/action-group.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "include_symlinks": true, - "prefixes": [ - "meta/runtime.yml", - "plugins/modules/", - "tests/sanity/extra/action-group." - ], - "output": "path-message", - "requirements": [ - "pyyaml" - ] -} diff --git a/tests/sanity/extra/action-group.py b/tests/sanity/extra/action-group.py deleted file mode 100755 index 9c82ff8619..0000000000 --- a/tests/sanity/extra/action-group.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2024, Felix Fontein -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -"""Make sure all modules that should show up in the action group.""" - -from __future__ import annotations - -import os -import re -import yaml - - -ACTION_GROUPS = { - # The format is as follows: - # * 'pattern': a regular expression matching all module names potentially belonging to the action group; - # * 'exclusions': a list of modules that are not part of the action group; all other modules matching 'pattern' must be part of it; - # * 'doc_fragment': the docs fragment that documents membership of the action group. - 'consul': { - 'pattern': re.compile('^consul_.*$'), - 'exclusions': [ - 'consul_acl_bootstrap', - 'consul_kv', - ], - 'doc_fragment': 'community.general.consul.actiongroup_consul', - }, - 'keycloak': { - 'pattern': re.compile('^keycloak_.*$'), - 'exclusions': [ - 'keycloak_realm_info', - ], - 'doc_fragment': 'community.general.keycloak.actiongroup_keycloak', - }, - 'proxmox': { - 'pattern': re.compile('^proxmox(_.*)?$'), - 'exclusions': [], - 'doc_fragment': 'community.general.proxmox.actiongroup_proxmox', - }, -} - - -def main(): - """Main entry point.""" - - # Load redirects - meta_runtime = 'meta/runtime.yml' - self_path = 'tests/sanity/extra/action-group.py' - try: - with open(meta_runtime, 'rb') as f: - data = yaml.safe_load(f) - action_groups = data['action_groups'] - except Exception as exc: - print(f'{meta_runtime}: cannot load action groups: {exc}') - return - - for action_group in action_groups: - if action_group not in ACTION_GROUPS: - print(f'{meta_runtime}: found unknown action group {action_group!r}; likely {self_path} needs updating') - for action_group, action_group_data in list(ACTION_GROUPS.items()): - if action_group not in action_groups: - print(f'{meta_runtime}: cannot find action group {action_group!r}; likely {self_path} needs updating') - - modules_directory = 'plugins/modules/' - modules_suffix = '.py' - - for file in os.listdir(modules_directory): - if not file.endswith(modules_suffix): - continue - module_name = file[:-len(modules_suffix)] - - for action_group, action_group_data in ACTION_GROUPS.items(): - action_group_content = action_groups.get(action_group) or [] - path = os.path.join(modules_directory, file) - - if not action_group_data['pattern'].match(module_name): - if module_name in action_group_content: - print(f'{path}: module is in action group {action_group!r} despite not matching its pattern as defined in {self_path}') - continue - - should_be_in_action_group = module_name not in action_group_data['exclusions'] - - if should_be_in_action_group: - if module_name not in action_group_content: - print(f'{meta_runtime}: module {module_name!r} is not part of {action_group!r} action group') - else: - action_group_content.remove(module_name) - - documentation = [] - in_docs = False - with open(path, 'r', encoding='utf-8') as f: - for line in f: - if line.startswith('DOCUMENTATION ='): - in_docs = True - elif line.startswith(("'''", '"""')) and in_docs: - in_docs = False - elif in_docs: - documentation.append(line) - if in_docs: - print(f'{path}: cannot find DOCUMENTATION end') - if not documentation: - print(f'{path}: cannot find DOCUMENTATION') - continue - - try: - docs = yaml.safe_load('\n'.join(documentation)) - if not isinstance(docs, dict): - raise Exception('is not a top-level dictionary') - except Exception as exc: - print(f'{path}: cannot load DOCUMENTATION as YAML: {exc}') - continue - - docs_fragments = docs.get('extends_documentation_fragment') or [] - is_in_action_group = action_group_data['doc_fragment'] in docs_fragments - - if should_be_in_action_group != is_in_action_group: - if should_be_in_action_group: - print( - f'{path}: module does not document itself as part of action group {action_group!r}, but it should;' - f' you need to add {action_group_data["doc_fragment"]} to "extends_documentation_fragment" in DOCUMENTATION' - ) - else: - print(f'{path}: module documents itself as part of action group {action_group!r}, but it should not be') - - for action_group, action_group_data in ACTION_GROUPS.items(): - action_group_content = action_groups.get(action_group) or [] - for module_name in action_group_content: - print( - f'{meta_runtime}: module {module_name} mentioned in {action_group!r} action group' - f' does not exist or does not match pattern defined in {self_path}' - ) - - -if __name__ == '__main__': - main() diff --git a/tests/sanity/extra/aliases.json b/tests/sanity/extra/aliases.json deleted file mode 100644 index dabdcd6a1d..0000000000 --- a/tests/sanity/extra/aliases.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "include_symlinks": false, - "prefixes": [ - ".azure-pipelines/azure-pipelines.yml", - "tests/integration/targets/" - ], - "output": "path-message", - "requirements": [ - "PyYAML" - ] -} diff --git a/tests/sanity/extra/aliases.py b/tests/sanity/extra/aliases.py old mode 100755 new mode 100644 index c1dcba0df5..8eb2e9aadb --- a/tests/sanity/extra/aliases.py +++ b/tests/sanity/extra/aliases.py @@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import glob import sys import yaml @@ -13,9 +14,6 @@ import yaml def main(): """Main entry point.""" - paths = sys.argv[1:] or sys.stdin.read().splitlines() - paths = [path for path in paths if path.endswith('/aliases')] - with open('.azure-pipelines/azure-pipelines.yml', 'rb') as f: azp = yaml.safe_load(f) @@ -27,6 +25,9 @@ def main(): for group in job['parameters']['groups']: allowed_targets.add('azp/posix/{0}'.format(group)) + paths = glob.glob("tests/integration/targets/*/aliases") + + has_errors = False for path in paths: targets = [] skip = False @@ -56,10 +57,14 @@ def main(): if 'targets/setup_' in path: continue print('%s: %s' % (path, 'found no targets')) + has_errors = True for target in targets: if target not in allowed_targets: print('%s: %s' % (path, 'found invalid target "{0}"'.format(target))) + has_errors = True + + return 1 if has_errors else 0 if __name__ == '__main__': - main() + sys.exit(main()) diff --git a/tests/sanity/extra/botmeta.json b/tests/sanity/extra/botmeta.json deleted file mode 100644 index c546ab5fd7..0000000000 --- a/tests/sanity/extra/botmeta.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "include_symlinks": false, - "output": "path-line-column-message", - "requirements": [ - "PyYAML", - "voluptuous==0.12.1" - ] -} diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py old mode 100755 new mode 100644 index 9f7e977ea2..b9cdbd8a6c --- a/tests/sanity/extra/botmeta.py +++ b/tests/sanity/extra/botmeta.py @@ -54,184 +54,194 @@ IGNORE_NO_MAINTAINERS = [ 'plugins/filter/random_mac.py', ] -FILENAME = '.github/BOTMETA.yml' -LIST_ENTRIES = frozenset(('supershipit', 'maintainers', 'labels', 'keywords', 'notify', 'ignore')) +class BotmetaCheck: + def __init__(self): + self.errors: list[str] = [] + self.botmeta_filename = '.github/BOTMETA.yml' + self.list_entries = frozenset(('supershipit', 'maintainers', 'labels', 'keywords', 'notify', 'ignore')) + self.author_regex = re.compile(r'^\w.*\(@([\w-]+)\)(?![\w.])') -AUTHOR_REGEX = re.compile(r'^\w.*\(@([\w-]+)\)(?![\w.])') + def report_error(self, error: str) -> None: + self.errors.append(error) + def read_authors(self, filename: str) -> list[str]: + data = {} + try: + documentation = [] + in_docs = False + with open(filename, 'r', encoding='utf-8') as f: + for line in f: + if line.startswith('DOCUMENTATION ='): + in_docs = True + elif line.startswith(("'''", '"""')) and in_docs: + in_docs = False + elif in_docs: + documentation.append(line) + if in_docs: + self.report_error(f'{filename}: cannot find DOCUMENTATION end') + return [] + if not documentation: + self.report_error(f'{filename}: cannot find DOCUMENTATION') + return [] -def read_authors(filename): - data = {} - try: - documentation = [] - in_docs = False - with open(filename, 'r', encoding='utf-8') as f: - for line in f: - if line.startswith('DOCUMENTATION ='): - in_docs = True - elif line.startswith(("'''", '"""')) and in_docs: - in_docs = False - elif in_docs: - documentation.append(line) - if in_docs: - print(f'{filename}: cannot find DOCUMENTATION end') - return [] - if not documentation: - print(f'{filename}: cannot find DOCUMENTATION') + data = yaml.safe_load('\n'.join(documentation)) + + except Exception as e: + self.report_error(f'{filename}:0:0: Cannot load DOCUMENTATION: {e}') return [] - data = yaml.safe_load('\n'.join(documentation)) + author = data.get('author') or [] + if isinstance(author, str): + author = [author] + return author - except Exception as e: - print(f'{filename}:0:0: Cannot load DOCUMENTATION: {e}') - return [] + def extract_author_name(self, author: str) -> str | None: + m = self.author_regex.match(author) + if m: + return m.group(1) + if author == 'Ansible Core Team': + return '$team_ansible_core' + return None - author = data.get('author') or [] - if isinstance(author, str): - author = [author] - return author + def validate(self, filename: str, filedata: dict) -> None: + if not filename.startswith('plugins/'): + return + if filename.startswith(('plugins/doc_fragments/', 'plugins/module_utils/')): + return + # Compile list of all active and inactive maintainers + all_maintainers = filedata['maintainers'] + filedata['ignore'] + if not filename.startswith(('plugins/action/', 'plugins/doc_fragments/', 'plugins/filter/', 'plugins/module_utils/', 'plugins/plugin_utils/')): + maintainers = self.read_authors(filename) + for maintainer in maintainers: + maintainer = self.extract_author_name(maintainer) + if maintainer is not None and maintainer not in all_maintainers: + others = ', '.join(all_maintainers) + msg = f'Author {maintainer} not mentioned as active or inactive maintainer for {filename} (mentioned are: {others})' + self.report_error(f'{self.botmeta_filename}:0:0: {msg}') + should_have_no_maintainer = filename in IGNORE_NO_MAINTAINERS + if not all_maintainers and not should_have_no_maintainer: + self.report_error(f'{self.botmeta_filename}:0:0: No (active or inactive) maintainer mentioned for {filename}') + if all_maintainers and should_have_no_maintainer: + own_path = os.path.relpath(__file__, os.getcwd()) + self.report_error(f'{self.botmeta_filename}:0:0: Please remove {filename} from the ignore list of {own_path}') + + def run(self) -> None: + try: + with open(self.botmeta_filename, 'rb') as f: + botmeta = yaml.safe_load(f) + except yaml.error.MarkedYAMLError as ex: + msg = re.sub(r'\s+', ' ', str(ex)) + self.report_error('f{self.botmeta_filename}:{ex.context_mark.line + 1}:{ex.context_mark.column + 1}: YAML load failed: {msg}') + return + except Exception as ex: # pylint: disable=broad-except + msg = re.sub(r'\s+', ' ', str(ex)) + self.report_error(f'{self.botmeta_filename}:0:0: YAML load failed: {msg}') + return + + # Validate schema + + MacroSchema = Schema({ + (str): Any(str, None), + }, extra=PREVENT_EXTRA) + + FilesSchema = Schema({ + (str): { + ('supershipit'): str, + ('support'): Any('community'), + ('maintainers'): str, + ('labels'): str, + ('keywords'): str, + ('notify'): str, + ('ignore'): str, + }, + }, extra=PREVENT_EXTRA) + + schema = Schema({ + ('notifications'): bool, + ('automerge'): bool, + ('macros'): MacroSchema, + ('files'): FilesSchema, + }, extra=PREVENT_EXTRA) + + try: + schema(botmeta) + except MultipleInvalid as ex: + for error in ex.errors: + # No way to get line/column numbers + self.report_error(f'{self.botmeta_filename}:0:0: {humanize_error(botmeta, error)}') + return + + # Preprocess (substitute macros, convert to lists) + macros = botmeta.get('macros') or {} + macro_re = re.compile(r'\$([a-zA-Z_]+)') + + def convert_macros(text, macros): + def f(m): + macro = m.group(1) + replacement = (macros[macro] or '') + if macro == 'team_ansible_core': + return f'$team_ansible_core {replacement}' + return replacement + + return macro_re.sub(f, text) + + files = {} + try: + for file, filedata in (botmeta.get('files') or {}).items(): + file = convert_macros(file, macros) + filedata = {k: convert_macros(v, macros) for k, v in filedata.items()} + files[file] = filedata + for k, v in filedata.items(): + if k in self.list_entries: + filedata[k] = v.split() + except KeyError as e: + self.report_error(f'{self.botmeta_filename}:0:0: Found unknown macro {e}') + return + + # Scan all files + unmatched = set(files) + for dirs in ('docs/docsite/rst', 'plugins', 'tests', 'changelogs'): + for dirpath, _dirnames, filenames in os.walk(dirs): + for file in sorted(filenames): + if file.endswith('.pyc'): + continue + filename = os.path.join(dirpath, file) + if os.path.islink(filename): + continue + if os.path.isfile(filename): + matching_files = [] + for file, filedata in files.items(): + if filename.startswith(file): + matching_files.append((file, filedata)) + if file in unmatched: + unmatched.remove(file) + if not matching_files: + self.report_error(f'{self.botmeta_filename}:0:0: Did not find any entry for {filename}') + + matching_files.sort(key=lambda kv: kv[0]) + filedata = {} + for k in self.list_entries: + filedata[k] = [] + for dummy, data in matching_files: + for k, v in data.items(): + if k in self.list_entries: + v = filedata[k] + v + filedata[k] = v + self.validate(filename, filedata) + + for file in unmatched: + self.report_error(f'{self.botmeta_filename}:0:0: Entry {file} was not used') -def extract_author_name(author): - m = AUTHOR_REGEX.match(author) - if m: - return m.group(1) - if author == 'Ansible Core Team': - return '$team_ansible_core' - return None - - -def validate(filename, filedata): - if not filename.startswith('plugins/'): - return - if filename.startswith(('plugins/doc_fragments/', 'plugins/module_utils/')): - return - # Compile list of all active and inactive maintainers - all_maintainers = filedata['maintainers'] + filedata['ignore'] - if not filename.startswith(('plugins/action/', 'plugins/doc_fragments/', 'plugins/filter/', 'plugins/module_utils/', 'plugins/plugin_utils/')): - maintainers = read_authors(filename) - for maintainer in maintainers: - maintainer = extract_author_name(maintainer) - if maintainer is not None and maintainer not in all_maintainers: - others = ', '.join(all_maintainers) - msg = f'Author {maintainer} not mentioned as active or inactive maintainer for {filename} (mentioned are: {others})' - print(f'{FILENAME}:0:0: {msg}') - should_have_no_maintainer = filename in IGNORE_NO_MAINTAINERS - if not all_maintainers and not should_have_no_maintainer: - print(f'{FILENAME}:0:0: No (active or inactive) maintainer mentioned for {filename}') - if all_maintainers and should_have_no_maintainer: - print(f'{FILENAME}:0:0: Please remove {filename} from the ignore list of {sys.argv[0]}') - - -def main(): +def main() -> int: """Main entry point.""" - try: - with open(FILENAME, 'rb') as f: - botmeta = yaml.safe_load(f) - except yaml.error.MarkedYAMLError as ex: - msg = re.sub(r'\s+', ' ', str(ex)) - print('f{FILENAME}:{ex.context_mark.line + 1}:{ex.context_mark.column + 1}: YAML load failed: {msg}') - return - except Exception as ex: # pylint: disable=broad-except - msg = re.sub(r'\s+', ' ', str(ex)) - print(f'{FILENAME}:0:0: YAML load failed: {msg}') - return - - # Validate schema - - MacroSchema = Schema({ - (str): Any(str, None), - }, extra=PREVENT_EXTRA) - - FilesSchema = Schema({ - (str): { - ('supershipit'): str, - ('support'): Any('community'), - ('maintainers'): str, - ('labels'): str, - ('keywords'): str, - ('notify'): str, - ('ignore'): str, - }, - }, extra=PREVENT_EXTRA) - - schema = Schema({ - ('notifications'): bool, - ('automerge'): bool, - ('macros'): MacroSchema, - ('files'): FilesSchema, - }, extra=PREVENT_EXTRA) - - try: - schema(botmeta) - except MultipleInvalid as ex: - for error in ex.errors: - # No way to get line/column numbers - print(f'{FILENAME}:0:0: {humanize_error(botmeta, error)}') - return - - # Preprocess (substitute macros, convert to lists) - macros = botmeta.get('macros') or {} - macro_re = re.compile(r'\$([a-zA-Z_]+)') - - def convert_macros(text, macros): - def f(m): - macro = m.group(1) - replacement = (macros[macro] or '') - if macro == 'team_ansible_core': - return f'$team_ansible_core {replacement}' - return replacement - - return macro_re.sub(f, text) - - files = {} - try: - for file, filedata in (botmeta.get('files') or {}).items(): - file = convert_macros(file, macros) - filedata = {k: convert_macros(v, macros) for k, v in filedata.items()} - files[file] = filedata - for k, v in filedata.items(): - if k in LIST_ENTRIES: - filedata[k] = v.split() - except KeyError as e: - print(f'{FILENAME}:0:0: Found unknown macro {e}') - return - - # Scan all files - unmatched = set(files) - for dirs in ('docs/docsite/rst', 'plugins', 'tests', 'changelogs'): - for dirpath, _dirnames, filenames in os.walk(dirs): - for file in sorted(filenames): - if file.endswith('.pyc'): - continue - filename = os.path.join(dirpath, file) - if os.path.islink(filename): - continue - if os.path.isfile(filename): - matching_files = [] - for file, filedata in files.items(): - if filename.startswith(file): - matching_files.append((file, filedata)) - if file in unmatched: - unmatched.remove(file) - if not matching_files: - print(f'{FILENAME}:0:0: Did not find any entry for {filename}') - - matching_files.sort(key=lambda kv: kv[0]) - filedata = {} - for k in LIST_ENTRIES: - filedata[k] = [] - for dummy, data in matching_files: - for k, v in data.items(): - if k in LIST_ENTRIES: - v = filedata[k] + v - filedata[k] = v - validate(filename, filedata) - - for file in unmatched: - print(f'{FILENAME}:0:0: Entry {file} was not used') + check = BotmetaCheck() + check.run() + for error in sorted(check.errors): + print(error) + return 1 if check.errors else 0 if __name__ == '__main__': - main() + sys.exit(main()) diff --git a/tests/sanity/extra/extra-docs.json b/tests/sanity/extra/extra-docs.json deleted file mode 100644 index 9a28d174fd..0000000000 --- a/tests/sanity/extra/extra-docs.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "include_symlinks": false, - "prefixes": [ - "docs/docsite/", - "plugins/", - "roles/" - ], - "output": "path-line-column-message", - "requirements": [ - "ansible-core", - "antsibull-docs" - ] -} diff --git a/tests/sanity/extra/extra-docs.py b/tests/sanity/extra/extra-docs.py deleted file mode 100755 index 251e6d70f5..0000000000 --- a/tests/sanity/extra/extra-docs.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -"""Check extra collection docs with antsibull-docs.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -import sys -import subprocess - - -def main(): - """Main entry point.""" - env = os.environ.copy() - suffix = ':{env}'.format(env=env["ANSIBLE_COLLECTIONS_PATH"]) if 'ANSIBLE_COLLECTIONS_PATH' in env else '' - env['ANSIBLE_COLLECTIONS_PATH'] = '{root}{suffix}'.format(root=os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd()))), suffix=suffix) - p = subprocess.run( - ['antsibull-docs', 'lint-collection-docs', '--plugin-docs', '--skip-rstcheck', '.'], - env=env, - check=False, - ) - if p.returncode not in (0, 3): - print('{0}:0:0: unexpected return code {1}'.format(sys.argv[0], p.returncode)) - - -if __name__ == '__main__': - main() diff --git a/tests/sanity/extra/licenses.json b/tests/sanity/extra/licenses.json deleted file mode 100644 index 50e47ca88b..0000000000 --- a/tests/sanity/extra/licenses.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "include_symlinks": false, - "output": "path-message" -} diff --git a/tests/sanity/extra/licenses.py b/tests/sanity/extra/licenses.py deleted file mode 100755 index 6227ee22f2..0000000000 --- a/tests/sanity/extra/licenses.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2022, Felix Fontein -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -"""Prevent files without a correct license identifier from being added to the source tree.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -import glob -import sys - - -def format_license_list(licenses): - if not licenses: - return '(empty)' - return ', '.join(['"%s"' % license for license in licenses]) - - -def find_licenses(filename, relax=False): - spdx_license_identifiers = [] - other_license_identifiers = [] - has_copyright = False - try: - with open(filename, 'r', encoding='utf-8') as f: - for line in f: - line = line.rstrip() - if 'Copyright ' in line: - has_copyright = True - if 'Copyright: ' in line: - print('%s: found copyright line with "Copyright:". Please remove the colon.' % (filename, )) - if 'SPDX-FileCopyrightText: ' in line: - has_copyright = True - idx = line.find('SPDX-License-Identifier: ') - if idx >= 0: - lic_id = line[idx + len('SPDX-License-Identifier: '):] - spdx_license_identifiers.extend(lic_id.split(' OR ')) - if 'GNU General Public License' in line: - if 'v3.0+' in line: - other_license_identifiers.append('GPL-3.0-or-later') - if 'version 3 or later' in line: - other_license_identifiers.append('GPL-3.0-or-later') - if 'Simplified BSD License' in line: - other_license_identifiers.append('BSD-2-Clause') - if 'Apache License 2.0' in line: - other_license_identifiers.append('Apache-2.0') - if 'PSF License' in line or 'Python-2.0' in line: - other_license_identifiers.append('PSF-2.0') - if 'MIT License' in line: - other_license_identifiers.append('MIT') - except Exception as exc: - print('%s: error while processing file: %s' % (filename, exc)) - if len(set(spdx_license_identifiers)) < len(spdx_license_identifiers): - print('%s: found identical SPDX-License-Identifier values' % (filename, )) - if other_license_identifiers and set(other_license_identifiers) != set(spdx_license_identifiers): - print('%s: SPDX-License-Identifier yielded the license list %s, while manual guessing yielded the license list %s' % ( - filename, format_license_list(spdx_license_identifiers), format_license_list(other_license_identifiers))) - if not has_copyright and not relax: - print('%s: found no copyright notice' % (filename, )) - return sorted(spdx_license_identifiers) - - -def main(): - """Main entry point.""" - paths = sys.argv[1:] or sys.stdin.read().splitlines() - - # The following paths are allowed to have no license identifier - no_comments_allowed = [ - 'changelogs/fragments/*.yml', - 'changelogs/fragments/*.yaml', - ] - - # These files are completely ignored - ignore_paths = [ - '.ansible-test-timeout.json', - '.reuse/dep5', - 'LICENSES/*.txt', - 'COPYING', - ] - - no_comments_allowed = [fn for pattern in no_comments_allowed for fn in glob.glob(pattern)] - ignore_paths = [fn for pattern in ignore_paths for fn in glob.glob(pattern)] - - valid_licenses = [license_file[len('LICENSES/'):-len('.txt')] for license_file in glob.glob('LICENSES/*.txt')] - - for path in paths: - if path.startswith('./'): - path = path[2:] - if path in ignore_paths or path.startswith('tests/output/'): - continue - if os.stat(path).st_size == 0: - continue - if not path.endswith('.license') and os.path.exists(path + '.license'): - path = path + '.license' - valid_licenses_for_path = valid_licenses - if path.startswith('plugins/') and not path.startswith(('plugins/modules/', 'plugins/module_utils/', 'plugins/doc_fragments/')): - valid_licenses_for_path = [license for license in valid_licenses if license == 'GPL-3.0-or-later'] - licenses = find_licenses(path, relax=path in no_comments_allowed) - if not licenses: - if path not in no_comments_allowed: - print('%s: must have at least one license' % (path, )) - else: - for license in licenses: - if license not in valid_licenses_for_path: - print('%s: found not allowed license "%s", must be one of %s' % ( - path, license, format_license_list(valid_licenses_for_path))) - - -if __name__ == '__main__': - main() diff --git a/tests/sanity/extra/licenses.py.license b/tests/sanity/extra/licenses.py.license deleted file mode 100644 index 6c4958feba..0000000000 --- a/tests/sanity/extra/licenses.py.license +++ /dev/null @@ -1,3 +0,0 @@ -GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -SPDX-License-Identifier: GPL-3.0-or-later -SPDX-FileCopyrightText: 2022, Felix Fontein diff --git a/tests/sanity/extra/no-unwanted-files.json b/tests/sanity/extra/no-unwanted-files.json deleted file mode 100644 index c789a7fd39..0000000000 --- a/tests/sanity/extra/no-unwanted-files.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "include_symlinks": true, - "prefixes": [ - "plugins/" - ], - "output": "path-message" -} diff --git a/tests/sanity/extra/no-unwanted-files.py b/tests/sanity/extra/no-unwanted-files.py deleted file mode 100755 index b39df83a18..0000000000 --- a/tests/sanity/extra/no-unwanted-files.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -"""Prevent unwanted files from being added to the source tree.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -import os.path -import sys - - -def main(): - """Main entry point.""" - paths = sys.argv[1:] or sys.stdin.read().splitlines() - - allowed_extensions = ( - '.cs', - '.ps1', - '.psm1', - '.py', - ) - - skip_paths = set([ - ]) - - skip_directories = ( - ) - - yaml_directories = ( - 'plugins/test/', - 'plugins/filter/', - ) - - for path in paths: - if path in skip_paths: - continue - - if any(path.startswith(skip_directory) for skip_directory in skip_directories): - continue - - if os.path.islink(path): - print('%s: is a symbolic link' % (path, )) - elif not os.path.isfile(path): - print('%s: is not a regular file' % (path, )) - - ext = os.path.splitext(path)[1] - - if ext in ('.yml', ) and any(path.startswith(yaml_directory) for yaml_directory in yaml_directories): - continue - - if ext not in allowed_extensions: - print('%s: extension must be one of: %s' % (path, ', '.join(allowed_extensions))) - - -if __name__ == '__main__': - main() diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt deleted file mode 100644 index 6f6495dd17..0000000000 --- a/tests/sanity/ignore-2.13.txt +++ /dev/null @@ -1,14 +0,0 @@ -.azure-pipelines/scripts/publish-codecov.py replace-urlopen -plugins/callback/timestamp.py validate-modules:invalid-documentation -plugins/callback/yaml.py validate-modules:invalid-documentation -plugins/lookup/etcd.py validate-modules:invalid-documentation -plugins/lookup/etcd3.py validate-modules:invalid-documentation -plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin -plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen -plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice -plugins/modules/parted.py validate-modules:parameter-state-invalid-choice -plugins/modules/read_csv.py validate-modules:invalid-documentation -plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/xfconf.py validate-modules:return-syntax-error -tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.15.txt b/tests/sanity/ignore-2.15.txt deleted file mode 100644 index f624f28e01..0000000000 --- a/tests/sanity/ignore-2.15.txt +++ /dev/null @@ -1,12 +0,0 @@ -.azure-pipelines/scripts/publish-codecov.py replace-urlopen -plugins/lookup/dependent.py validate-modules:unidiomatic-typecheck -plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt' -plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin -plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen -plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice -plugins/modules/parted.py validate-modules:parameter-state-invalid-choice -plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' -plugins/modules/xfconf.py validate-modules:return-syntax-error -tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.15.txt.license b/tests/sanity/ignore-2.15.txt.license deleted file mode 100644 index edff8c7685..0000000000 --- a/tests/sanity/ignore-2.15.txt.license +++ /dev/null @@ -1,3 +0,0 @@ -GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -SPDX-License-Identifier: GPL-3.0-or-later -SPDX-FileCopyrightText: Ansible Project diff --git a/tests/sanity/ignore-2.16.txt b/tests/sanity/ignore-2.16.txt index 665101becf..1a4c8f89b1 100644 --- a/tests/sanity/ignore-2.16.txt +++ b/tests/sanity/ignore-2.16.txt @@ -1,3 +1,12 @@ +plugins/callback/tasks_only.py yamllint:unparsable-with-libyaml +plugins/connection/wsl.py yamllint:unparsable-with-libyaml +plugins/inventory/gitlab_runners.py yamllint:unparsable-with-libyaml +plugins/inventory/iocage.py yamllint:unparsable-with-libyaml +plugins/inventory/linode.py yamllint:unparsable-with-libyaml +plugins/inventory/lxd.py yamllint:unparsable-with-libyaml +plugins/inventory/nmap.py yamllint:unparsable-with-libyaml +plugins/inventory/scaleway.py yamllint:unparsable-with-libyaml +plugins/inventory/virtualbox.py yamllint:unparsable-with-libyaml plugins/lookup/dependent.py validate-modules:unidiomatic-typecheck plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt' @@ -10,4 +19,5 @@ plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' plugins/modules/xfconf.py validate-modules:return-syntax-error +plugins/test/ansible_type.py yamllint:unparsable-with-libyaml tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.17.txt b/tests/sanity/ignore-2.17.txt index 39048d2718..97751e5a92 100644 --- a/tests/sanity/ignore-2.17.txt +++ b/tests/sanity/ignore-2.17.txt @@ -10,5 +10,5 @@ plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' plugins/modules/xfconf.py validate-modules:return-syntax-error plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 -tests/unit/plugins/modules/helper.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/uthelper.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.18.txt b/tests/sanity/ignore-2.18.txt index 39048d2718..97751e5a92 100644 --- a/tests/sanity/ignore-2.18.txt +++ b/tests/sanity/ignore-2.18.txt @@ -10,5 +10,5 @@ plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' plugins/modules/xfconf.py validate-modules:return-syntax-error plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 -tests/unit/plugins/modules/helper.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/uthelper.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.19.txt b/tests/sanity/ignore-2.19.txt index 39048d2718..97751e5a92 100644 --- a/tests/sanity/ignore-2.19.txt +++ b/tests/sanity/ignore-2.19.txt @@ -10,5 +10,5 @@ plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' plugins/modules/xfconf.py validate-modules:return-syntax-error plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 -tests/unit/plugins/modules/helper.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/uthelper.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.20.txt similarity index 67% rename from tests/sanity/ignore-2.14.txt rename to tests/sanity/ignore-2.20.txt index 24d7521036..97751e5a92 100644 --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.20.txt @@ -1,16 +1,14 @@ -.azure-pipelines/scripts/publish-codecov.py replace-urlopen -plugins/callback/timestamp.py validate-modules:invalid-documentation -plugins/callback/yaml.py validate-modules:invalid-documentation -plugins/lookup/etcd.py validate-modules:invalid-documentation -plugins/lookup/etcd3.py validate-modules:invalid-documentation plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt' +plugins/modules/homectl.py import-3.12 # Uses deprecated stdlib library 'crypt' plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/parted.py validate-modules:parameter-state-invalid-choice -plugins/modules/read_csv.py validate-modules:invalid-documentation plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt' +plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt' plugins/modules/xfconf.py validate-modules:return-syntax-error +plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2 +tests/unit/plugins/modules/uthelper.py pylint:use-yield-from # suggested construct does not work with Python 2 tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes diff --git a/tests/sanity/extra/botmeta.json.license b/tests/sanity/ignore-2.20.txt.license similarity index 100% rename from tests/sanity/extra/botmeta.json.license rename to tests/sanity/ignore-2.20.txt.license diff --git a/tests/unit/compat/__init__.py b/tests/unit/compat/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/compat/builtins.py b/tests/unit/compat/builtins.py deleted file mode 100644 index d548601d46..0000000000 --- a/tests/unit/compat/builtins.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2014, Toshio Kuratomi -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# -# Compat for python2.7 -# - -# One unittest needs to import builtins via __import__() so we need to have -# the string that represents it -try: - import __builtin__ # noqa: F401, pylint: disable=unused-import -except ImportError: - BUILTINS = 'builtins' -else: - BUILTINS = '__builtin__' diff --git a/tests/unit/compat/mock.py b/tests/unit/compat/mock.py deleted file mode 100644 index 97534e0cae..0000000000 --- a/tests/unit/compat/mock.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2014, Toshio Kuratomi -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Compat module for Python3.x's unittest.mock module -''' -# Python 2.7 - -# Note: Could use the pypi mock library on python3.x as well as python2.x. It -# is the same as the python3 stdlib mock library - -try: - # Allow wildcard import because we really do want to import all of mock's - # symbols into this compat shim - # pylint: disable=wildcard-import,unused-wildcard-import - from unittest.mock import * # noqa: F401, pylint: disable=unused-import -except ImportError: - # Python 2 - # pylint: disable=wildcard-import,unused-wildcard-import - try: - from mock import * # noqa: F401, pylint: disable=unused-import - except ImportError: - print('You need the mock library installed on python2.x to run tests') diff --git a/tests/unit/compat/unittest.py b/tests/unit/compat/unittest.py deleted file mode 100644 index 732929a165..0000000000 --- a/tests/unit/compat/unittest.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) 2014, Toshio Kuratomi -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Compat module for Python2.7's unittest module -''' - -# Allow wildcard import because we really do want to import all of -# unittests's symbols into this compat shim -# pylint: disable=wildcard-import,unused-wildcard-import -from unittest import * # noqa: F401, pylint: disable=unused-import diff --git a/tests/unit/mock/loader.py b/tests/unit/mock/loader.py deleted file mode 100644 index f7aff17c32..0000000000 --- a/tests/unit/mock/loader.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) 2012-2014, Michael DeHaan -# -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os - -from ansible.errors import AnsibleParserError -from ansible.parsing.dataloader import DataLoader -from ansible.module_utils.common.text.converters import to_bytes, to_text - - -class DictDataLoader(DataLoader): - - def __init__(self, file_mapping=None): - file_mapping = {} if file_mapping is None else file_mapping - assert isinstance(file_mapping, dict) - - super(DictDataLoader, self).__init__() - - self._file_mapping = file_mapping - self._build_known_directories() - self._vault_secrets = None - - def load_from_file(self, path, cache=True, unsafe=False): - path = to_text(path) - if path in self._file_mapping: - return self.load(self._file_mapping[path], path) - return None - - # TODO: the real _get_file_contents returns a bytestring, so we actually convert the - # unicode/text it's created with to utf-8 - def _get_file_contents(self, file_name): - path = to_text(file_name) - if path in self._file_mapping: - return (to_bytes(self._file_mapping[path]), False) - else: - raise AnsibleParserError("file not found: %s" % path) - - def path_exists(self, path): - path = to_text(path) - return path in self._file_mapping or path in self._known_directories - - def is_file(self, path): - path = to_text(path) - return path in self._file_mapping - - def is_directory(self, path): - path = to_text(path) - return path in self._known_directories - - def list_directory(self, path): - ret = [] - path = to_text(path) - for x in (list(self._file_mapping.keys()) + self._known_directories): - if x.startswith(path): - if os.path.dirname(x) == path: - ret.append(os.path.basename(x)) - return ret - - def is_executable(self, path): - # FIXME: figure out a way to make paths return true for this - return False - - def _add_known_directory(self, directory): - if directory not in self._known_directories: - self._known_directories.append(directory) - - def _build_known_directories(self): - self._known_directories = [] - for path in self._file_mapping: - dirname = os.path.dirname(path) - while dirname not in ('/', ''): - self._add_known_directory(dirname) - dirname = os.path.dirname(dirname) - - def push(self, path, content): - rebuild_dirs = False - if path not in self._file_mapping: - rebuild_dirs = True - - self._file_mapping[path] = content - - if rebuild_dirs: - self._build_known_directories() - - def pop(self, path): - if path in self._file_mapping: - del self._file_mapping[path] - self._build_known_directories() - - def clear(self): - self._file_mapping = dict() - self._known_directories = [] - - def get_basedir(self): - return os.getcwd() - - def set_vault_secrets(self, vault_secrets): - self._vault_secrets = vault_secrets diff --git a/tests/unit/mock/path.py b/tests/unit/mock/path.py deleted file mode 100644 index 62ae023431..0000000000 --- a/tests/unit/mock/path.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible_collections.community.general.tests.unit.compat.mock import MagicMock -from ansible.utils.path import unfrackpath - - -mock_unfrackpath_noop = MagicMock(spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x) diff --git a/tests/unit/mock/procenv.py b/tests/unit/mock/procenv.py deleted file mode 100644 index 4646d7f355..0000000000 --- a/tests/unit/mock/procenv.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2016, Matt Davis -# Copyright (c) 2016, Toshio Kuratomi -# -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import json - -from contextlib import contextmanager -from io import BytesIO, StringIO -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible.module_utils.six import PY3 -from ansible.module_utils.common.text.converters import to_bytes - - -@contextmanager -def swap_stdin_and_argv(stdin_data='', argv_data=tuple()): - """ - context manager that temporarily masks the test runner's values for stdin and argv - """ - real_stdin = sys.stdin - real_argv = sys.argv - - if PY3: - fake_stream = StringIO(stdin_data) - fake_stream.buffer = BytesIO(to_bytes(stdin_data)) - else: - fake_stream = BytesIO(to_bytes(stdin_data)) - - try: - sys.stdin = fake_stream - sys.argv = argv_data - - yield - finally: - sys.stdin = real_stdin - sys.argv = real_argv - - -@contextmanager -def swap_stdout(): - """ - context manager that temporarily replaces stdout for tests that need to verify output - """ - old_stdout = sys.stdout - - if PY3: - fake_stream = StringIO() - else: - fake_stream = BytesIO() - - try: - sys.stdout = fake_stream - - yield fake_stream - finally: - sys.stdout = old_stdout - - -class ModuleTestCase(unittest.TestCase): - def setUp(self, module_args=None): - if module_args is None: - module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False} - - args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args)) - - # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually - self.stdin_swap = swap_stdin_and_argv(stdin_data=args) - self.stdin_swap.__enter__() - - def tearDown(self): - # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually - self.stdin_swap.__exit__(None, None, None) diff --git a/tests/unit/mock/vault_helper.py b/tests/unit/mock/vault_helper.py deleted file mode 100644 index 2b116129f5..0000000000 --- a/tests/unit/mock/vault_helper.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) Ansible project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.module_utils.common.text.converters import to_bytes - -from ansible.parsing.vault import VaultSecret - - -class TextVaultSecret(VaultSecret): - '''A secret piece of text. ie, a password. Tracks text encoding. - - The text encoding of the text may not be the default text encoding so - we keep track of the encoding so we encode it to the same bytes.''' - - def __init__(self, text, encoding=None, errors=None, _bytes=None): - super(TextVaultSecret, self).__init__() - self.text = text - self.encoding = encoding or 'utf-8' - self._bytes = _bytes - self.errors = errors or 'strict' - - @property - def bytes(self): - '''The text encoded with encoding, unless we specifically set _bytes.''' - return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors) diff --git a/tests/unit/mock/yaml_helper.py b/tests/unit/mock/yaml_helper.py deleted file mode 100644 index ce1bd719b8..0000000000 --- a/tests/unit/mock/yaml_helper.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) Ansible project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import io -import yaml - -from ansible.module_utils.six import PY3 -from ansible.parsing.yaml.loader import AnsibleLoader -from ansible.parsing.yaml.dumper import AnsibleDumper - - -class YamlTestUtils(object): - """Mixin class to combine with a unittest.TestCase subclass.""" - def _loader(self, stream): - """Vault related tests will want to override this. - - Vault cases should setup a AnsibleLoader that has the vault password.""" - return AnsibleLoader(stream) - - def _dump_stream(self, obj, stream, dumper=None): - """Dump to a py2-unicode or py3-string stream.""" - if PY3: - return yaml.dump(obj, stream, Dumper=dumper) - else: - return yaml.dump(obj, stream, Dumper=dumper, encoding=None) - - def _dump_string(self, obj, dumper=None): - """Dump to a py2-unicode or py3-string""" - if PY3: - return yaml.dump(obj, Dumper=dumper) - else: - return yaml.dump(obj, Dumper=dumper, encoding=None) - - def _dump_load_cycle(self, obj): - # Each pass though a dump or load revs the 'generation' - # obj to yaml string - string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper) - - # wrap a stream/file like StringIO around that yaml - stream_from_object_dump = io.StringIO(string_from_object_dump) - loader = self._loader(stream_from_object_dump) - # load the yaml stream to create a new instance of the object (gen 2) - obj_2 = loader.get_data() - - # dump the gen 2 objects directory to strings - string_from_object_dump_2 = self._dump_string(obj_2, - dumper=AnsibleDumper) - - # The gen 1 and gen 2 yaml strings - self.assertEqual(string_from_object_dump, string_from_object_dump_2) - # the gen 1 (orig) and gen 2 py object - self.assertEqual(obj, obj_2) - - # again! gen 3... load strings into py objects - stream_3 = io.StringIO(string_from_object_dump_2) - loader_3 = self._loader(stream_3) - obj_3 = loader_3.get_data() - - string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper) - - self.assertEqual(obj, obj_3) - # should be transitive, but... - self.assertEqual(obj_2, obj_3) - self.assertEqual(string_from_object_dump, string_from_object_dump_3) - - def _old_dump_load_cycle(self, obj): - '''Dump the passed in object to yaml, load it back up, dump again, compare.''' - stream = io.StringIO() - - yaml_string = self._dump_string(obj, dumper=AnsibleDumper) - self._dump_stream(obj, stream, dumper=AnsibleDumper) - - yaml_string_from_stream = stream.getvalue() - - # reset stream - stream.seek(0) - - loader = self._loader(stream) - # loader = AnsibleLoader(stream, vault_password=self.vault_password) - obj_from_stream = loader.get_data() - - stream_from_string = io.StringIO(yaml_string) - loader2 = self._loader(stream_from_string) - # loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password) - obj_from_string = loader2.get_data() - - stream_obj_from_stream = io.StringIO() - stream_obj_from_string = io.StringIO() - - if PY3: - yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper) - yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper) - else: - yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None) - yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None) - - yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue() - yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue() - - stream_obj_from_stream.seek(0) - stream_obj_from_string.seek(0) - - if PY3: - yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper) - yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper) - else: - yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None) - yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None) - - assert yaml_string == yaml_string_obj_from_stream - assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string - assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream == - yaml_string_stream_obj_from_string) - assert obj == obj_from_stream - assert obj == obj_from_string - assert obj == yaml_string_obj_from_stream - assert obj == yaml_string_obj_from_string - assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string - return {'obj': obj, - 'yaml_string': yaml_string, - 'yaml_string_from_stream': yaml_string_from_stream, - 'obj_from_stream': obj_from_stream, - 'obj_from_string': obj_from_string, - 'yaml_string_obj_from_string': yaml_string_obj_from_string} diff --git a/tests/unit/plugins/callback/test_elastic.py b/tests/unit/plugins/callback/test_elastic.py index 73f4a6c27c..e90186789b 100644 --- a/tests/unit/plugins/callback/test_elastic.py +++ b/tests/unit/plugins/callback/test_elastic.py @@ -7,8 +7,8 @@ __metaclass__ = type from ansible.playbook.task import Task from ansible.executor.task_result import TaskResult -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, MagicMock, Mock from ansible_collections.community.general.plugins.callback.elastic import ElasticSource, TaskData from collections import OrderedDict import sys diff --git a/tests/unit/plugins/callback/test_loganalytics.py b/tests/unit/plugins/callback/test_loganalytics.py index 061701ab6d..8cc5c941d7 100644 --- a/tests/unit/plugins/callback/test_loganalytics.py +++ b/tests/unit/plugins/callback/test_loganalytics.py @@ -6,8 +6,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.executor.task_result import TaskResult -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch, Mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, Mock from ansible_collections.community.general.plugins.callback.loganalytics import AzureLogAnalyticsSource from datetime import datetime diff --git a/tests/unit/plugins/callback/test_opentelemetry.py b/tests/unit/plugins/callback/test_opentelemetry.py index dea2e29d41..1da506c262 100644 --- a/tests/unit/plugins/callback/test_opentelemetry.py +++ b/tests/unit/plugins/callback/test_opentelemetry.py @@ -8,8 +8,8 @@ __metaclass__ = type from ansible.playbook.task import Task from ansible.executor.task_result import TaskResult -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, MagicMock, Mock from ansible_collections.community.general.plugins.callback.opentelemetry import OpenTelemetrySource, TaskData from collections import OrderedDict import sys @@ -95,22 +95,6 @@ class TestOpentelemetry(unittest.TestCase): self.assertEqual(host_data.uuid, 'include') self.assertEqual(host_data.name, 'include') self.assertEqual(host_data.status, 'ok') - self.assertEqual(self.opentelemetry.ansible_version, None) - - def test_finish_task_include_with_ansible_version(self): - task_fields = {'args': {'_ansible_version': '1.2.3'}} - result = TaskResult(host=None, task=self.mock_task, return_data={}, task_fields=task_fields) - tasks_data = OrderedDict() - tasks_data['myuuid'] = self.my_task - - self.opentelemetry.finish_task( - tasks_data, - 'ok', - result, - "" - ) - - self.assertEqual(self.opentelemetry.ansible_version, '1.2.3') def test_get_error_message(self): test_cases = ( diff --git a/tests/unit/plugins/callback/test_splunk.py b/tests/unit/plugins/callback/test_splunk.py index c09540fc00..09c56930f7 100644 --- a/tests/unit/plugins/callback/test_splunk.py +++ b/tests/unit/plugins/callback/test_splunk.py @@ -6,8 +6,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.executor.task_result import TaskResult -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch, Mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, Mock from ansible_collections.community.general.plugins.callback.splunk import SplunkHTTPCollectorSource from datetime import datetime diff --git a/tests/unit/plugins/connection/test_lxc.py b/tests/unit/plugins/connection/test_lxc.py index bebd42772a..e65df3315c 100644 --- a/tests/unit/plugins/connection/test_lxc.py +++ b/tests/unit/plugins/connection/test_lxc.py @@ -14,7 +14,7 @@ from io import StringIO from ansible.errors import AnsibleError from ansible.playbook.play_context import PlayContext from ansible.plugins.loader import connection_loader -from ansible_collections.community.general.tests.unit.compat import mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import mock @pytest.fixture(autouse=True) @@ -117,14 +117,14 @@ class TestLXCConnectionClass(): # first call initializes the connection conn._connect() - assert conn.container_name is container1_name + assert conn.container_name == container1_name assert conn.container is not None assert conn.container.name == container1_name container1 = conn.container # second call is basically a no-op conn._connect() - assert conn.container_name is container1_name + assert conn.container_name == container1_name assert conn.container is container1 assert conn.container.name == container1_name diff --git a/tests/unit/plugins/connection/test_wsl.py b/tests/unit/plugins/connection/test_wsl.py new file mode 100644 index 0000000000..c28d1fbec9 --- /dev/null +++ b/tests/unit/plugins/connection/test_wsl.py @@ -0,0 +1,588 @@ +# -*- coding: utf-8 -*- +# Derived from test_proxmox_pct_remote.py (c) 2024 Nils Stein (@mietzen) +# Copyright (c) 2025 Rui Lopes (@rgl) +# Copyright (c) 2025 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +import os +import pytest + +from ansible_collections.community.general.plugins.connection.wsl import authenticity_msg, MyAddPolicy +from ansible_collections.community.general.plugins.module_utils._filelock import FileLock, LockTimeout +from ansible.errors import AnsibleError, AnsibleAuthenticationFailure, AnsibleConnectionFailure +from ansible.module_utils.common.text.converters import to_bytes +from ansible.playbook.play_context import PlayContext +from ansible.plugins.loader import connection_loader +from io import StringIO +from pathlib import Path +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, MagicMock, mock_open + + +paramiko = pytest.importorskip('paramiko') + + +@pytest.fixture +def connection(): + play_context = PlayContext() + in_stream = StringIO() + conn = connection_loader.get('community.general.wsl', play_context, in_stream) + conn.set_option('remote_addr', '192.168.1.100') + conn.set_option('remote_user', 'root') + conn.set_option('password', 'password') + conn.set_option('wsl_distribution', 'test') + return conn + + +def test_connection_options(connection): + """ Test that connection options are properly set """ + assert connection.get_option('remote_addr') == '192.168.1.100' + assert connection.get_option('remote_user') == 'root' + assert connection.get_option('password') == 'password' + assert connection.get_option('wsl_distribution') == 'test' + + +def test_authenticity_msg(): + """ Test authenticity message formatting """ + msg = authenticity_msg('test.host', 'ssh-rsa', 'AA:BB:CC:DD') + assert 'test.host' in msg + assert 'ssh-rsa' in msg + assert 'AA:BB:CC:DD' in msg + + +def test_missing_host_key(connection): + """ Test MyAddPolicy missing_host_key method """ + + client = MagicMock() + key = MagicMock() + key.get_fingerprint.return_value = b'fingerprint' + key.get_name.return_value = 'ssh-rsa' + + policy = MyAddPolicy(connection) + + connection.set_option('host_key_auto_add', True) + policy.missing_host_key(client, 'test.host', key) + assert hasattr(key, '_added_by_ansible_this_time') + + connection.set_option('host_key_auto_add', False) + connection.set_option('host_key_checking', False) + policy.missing_host_key(client, 'test.host', key) + + connection.set_option('host_key_checking', True) + connection.set_option('host_key_auto_add', False) + connection.set_option('use_persistent_connections', False) + + with patch('ansible.utils.display.Display.prompt_until', return_value='yes'): + policy.missing_host_key(client, 'test.host', key) + + with patch('ansible.utils.display.Display.prompt_until', return_value='no'): + with pytest.raises(AnsibleError, match='host connection rejected by user'): + policy.missing_host_key(client, 'test.host', key) + + +def test_set_log_channel(connection): + """ Test setting log channel """ + connection._set_log_channel('test_channel') + assert connection._log_channel == 'test_channel' + + +def test_parse_proxy_command(connection): + """ Test proxy command parsing """ + connection.set_option('proxy_command', 'ssh -W %h:%p proxy.example.com') + connection.set_option('remote_addr', 'target.example.com') + connection.set_option('remote_user', 'testuser') + + result = connection._parse_proxy_command(port=2222) + assert 'sock' in result + assert isinstance(result['sock'], paramiko.ProxyCommand) + + +@patch('paramiko.SSHClient') +def test_connect_with_rsa_sha2_disabled(mock_ssh, connection): + """ Test connection with RSA SHA2 algorithms disabled """ + connection.set_option('use_rsa_sha2_algorithms', False) + mock_client = MagicMock() + mock_ssh.return_value = mock_client + + connection._connect() + + call_kwargs = mock_client.connect.call_args[1] + assert 'disabled_algorithms' in call_kwargs + assert 'pubkeys' in call_kwargs['disabled_algorithms'] + + +@patch('paramiko.SSHClient') +def test_connect_with_bad_host_key(mock_ssh, connection): + """ Test connection with bad host key """ + mock_client = MagicMock() + mock_ssh.return_value = mock_client + mock_client.connect.side_effect = paramiko.ssh_exception.BadHostKeyException( + 'hostname', MagicMock(), MagicMock()) + + with pytest.raises(AnsibleConnectionFailure, match='host key mismatch'): + connection._connect() + + +@patch('paramiko.SSHClient') +def test_connect_with_invalid_host_key(mock_ssh, connection): + """ Test connection with bad host key """ + connection.set_option('host_key_checking', True) + mock_client = MagicMock() + mock_ssh.return_value = mock_client + mock_client.load_system_host_keys.side_effect = paramiko.hostkeys.InvalidHostKey( + "Bad Line!", Exception('Something crashed!')) + + with pytest.raises(AnsibleConnectionFailure, match="Invalid host key: Bad Line!"): + connection._connect() + + +@patch('paramiko.SSHClient') +def test_connect_success(mock_ssh, connection): + """ Test successful SSH connection establishment """ + mock_client = MagicMock() + mock_ssh.return_value = mock_client + + connection._connect() + + assert mock_client.connect.called + assert connection._connected + + +@patch('paramiko.SSHClient') +def test_connect_authentication_failure(mock_ssh, connection): + """ Test SSH connection with authentication failure """ + mock_client = MagicMock() + mock_ssh.return_value = mock_client + mock_client.connect.side_effect = paramiko.ssh_exception.AuthenticationException('Auth failed') + + with pytest.raises(AnsibleAuthenticationFailure): + connection._connect() + + +def test_any_keys_added(connection): + """ Test checking for added host keys """ + connection.ssh = MagicMock() + connection.ssh._host_keys = { + 'host1': { + 'ssh-rsa': MagicMock(_added_by_ansible_this_time=True), + 'ssh-ed25519': MagicMock(_added_by_ansible_this_time=False) + } + } + + assert connection._any_keys_added() is True + + connection.ssh._host_keys = { + 'host1': { + 'ssh-rsa': MagicMock(_added_by_ansible_this_time=False) + } + } + assert connection._any_keys_added() is False + + +@patch('os.path.exists') +@patch('os.stat') +@patch('tempfile.NamedTemporaryFile') +def test_save_ssh_host_keys(mock_tempfile, mock_stat, mock_exists, connection): + """ Test saving SSH host keys """ + mock_exists.return_value = True + mock_stat.return_value = MagicMock(st_mode=0o644, st_uid=1000, st_gid=1000) + mock_tempfile.return_value.__enter__.return_value.name = '/tmp/test_keys' + + connection.ssh = MagicMock() + connection.ssh._host_keys = { + 'host1': { + 'ssh-rsa': MagicMock( + get_base64=lambda: 'KEY1', + _added_by_ansible_this_time=True + ) + } + } + + mock_open_obj = mock_open() + with patch('builtins.open', mock_open_obj): + connection._save_ssh_host_keys('/tmp/test_keys') + + mock_open_obj().write.assert_called_with('host1 ssh-rsa KEY1\n') + + +def test_build_wsl_command(connection): + """ Test wsl command building with different users """ + cmd = connection._build_wsl_command('/bin/sh -c "ls -la"') + assert cmd == 'wsl.exe --distribution test -- /bin/sh -c "ls -la"' + + connection.set_option('wsl_user', 'test-user') + cmd = connection._build_wsl_command('/bin/sh -c "ls -la"') + assert cmd == 'wsl.exe --distribution test --user test-user -- /bin/sh -c "ls -la"' + + connection.set_option('become', True) + connection.set_option('become_user', 'test-become-user') + cmd = connection._build_wsl_command('/bin/sh -c "ls -la"') + assert cmd == 'wsl.exe --distribution test --user test-become-user -- /bin/sh -c "ls -la"' + + +@patch('paramiko.SSHClient') +def test_exec_command_success(mock_ssh, connection): + """ Test successful command execution """ + mock_client = MagicMock() + mock_ssh.return_value = mock_client + mock_channel = MagicMock() + mock_transport = MagicMock() + + mock_client.get_transport.return_value = mock_transport + mock_transport.open_session.return_value = mock_channel + mock_channel.recv_exit_status.return_value = 0 + mock_channel.makefile.return_value = [to_bytes('stdout')] + mock_channel.makefile_stderr.return_value = [to_bytes("")] + + connection._connected = True + connection.ssh = mock_client + + returncode, stdout, stderr = connection.exec_command('ls -la') + + mock_transport.open_session.assert_called_once() + mock_transport.set_keepalive.assert_called_once_with(5) + + +@patch('paramiko.SSHClient') +def test_exec_command_wsl_not_found(mock_ssh, connection): + """ Test command execution when wsl.exe is not found """ + mock_client = MagicMock() + mock_ssh.return_value = mock_client + mock_channel = MagicMock() + mock_transport = MagicMock() + + mock_client.get_transport.return_value = mock_transport + mock_transport.open_session.return_value = mock_channel + mock_channel.recv_exit_status.return_value = 1 + mock_channel.makefile.return_value = [to_bytes("")] + mock_channel.makefile_stderr.return_value = [to_bytes("'wsl.exe' is not recognized")] + + connection._connected = True + connection.ssh = mock_client + + with pytest.raises(AnsibleError, match='wsl.exe not found in path of host'): + connection.exec_command('ls -la') + + +@patch('paramiko.SSHClient') +def test_exec_command_session_open_failure(mock_ssh, connection): + """ Test exec_command when session opening fails """ + mock_client = MagicMock() + mock_transport = MagicMock() + mock_transport.open_session.side_effect = Exception('Failed to open session') + mock_client.get_transport.return_value = mock_transport + + connection._connected = True + connection.ssh = mock_client + + with pytest.raises(AnsibleConnectionFailure, match='Failed to open session'): + connection.exec_command('test command') + + +@patch('paramiko.SSHClient') +def test_exec_command_with_privilege_escalation(mock_ssh, connection): + """ Test exec_command with privilege escalation """ + mock_client = MagicMock() + mock_channel = MagicMock() + mock_transport = MagicMock() + + mock_client.get_transport.return_value = mock_transport + mock_transport.open_session.return_value = mock_channel + connection._connected = True + connection.ssh = mock_client + + connection.become = MagicMock() + connection.become.expect_prompt.return_value = True + connection.become.check_success.return_value = False + connection.become.check_password_prompt.return_value = True + connection.become.get_option.return_value = 'sudo_password' + + mock_channel.recv.return_value = b'[sudo] password:' + mock_channel.recv_exit_status.return_value = 0 + mock_channel.makefile.return_value = [b""] + mock_channel.makefile_stderr.return_value = [b""] + + returncode, stdout, stderr = connection.exec_command('sudo test command') + + mock_channel.sendall.assert_called_once_with(b'sudo_password\n') + + +def test_put_file(connection): + """ Test putting a file to the remote system """ + connection.exec_command = MagicMock() + connection.exec_command.return_value = (0, b"", b"") + + with patch('builtins.open', create=True) as mock_open: + mock_open.return_value.__enter__.return_value.read.return_value = b'test content' + connection.put_file('/local/path', '/remote/path') + + connection.exec_command.assert_called_once_with("/bin/sh -c 'cat > /remote/path'", in_data=b'test content', sudoable=False) + + +@patch('paramiko.SSHClient') +def test_put_file_general_error(mock_ssh, connection): + """ Test put_file with general error """ + mock_client = MagicMock() + mock_ssh.return_value = mock_client + mock_channel = MagicMock() + mock_transport = MagicMock() + + mock_client.get_transport.return_value = mock_transport + mock_transport.open_session.return_value = mock_channel + mock_channel.recv_exit_status.return_value = 1 + mock_channel.makefile.return_value = [to_bytes("")] + mock_channel.makefile_stderr.return_value = [to_bytes('Some error')] + + connection._connected = True + connection.ssh = mock_client + + with pytest.raises(AnsibleError, match='error occurred while putting file from /remote/path to /local/path'): + connection.put_file('/remote/path', '/local/path') + + +@patch('paramiko.SSHClient') +def test_put_file_cat_not_found(mock_ssh, connection): + """ Test command execution when cat is not found """ + mock_client = MagicMock() + mock_ssh.return_value = mock_client + mock_channel = MagicMock() + mock_transport = MagicMock() + + mock_client.get_transport.return_value = mock_transport + mock_transport.open_session.return_value = mock_channel + mock_channel.recv_exit_status.return_value = 1 + mock_channel.makefile.return_value = [to_bytes("")] + mock_channel.makefile_stderr.return_value = [to_bytes('cat: not found')] + + connection._connected = True + connection.ssh = mock_client + + with pytest.raises(AnsibleError, match='cat not found in path of WSL distribution'): + connection.fetch_file('/remote/path', '/local/path') + + +def test_fetch_file(connection): + """ Test fetching a file from the remote system """ + connection.exec_command = MagicMock() + connection.exec_command.return_value = (0, b'test content', b"") + + with patch('builtins.open', create=True) as mock_open: + connection.fetch_file('/remote/path', '/local/path') + + connection.exec_command.assert_called_once_with("/bin/sh -c 'cat /remote/path'", sudoable=False) + mock_open.assert_called_with('/local/path', 'wb') + + +@patch('paramiko.SSHClient') +def test_fetch_file_general_error(mock_ssh, connection): + """ Test fetch_file with general error """ + mock_client = MagicMock() + mock_ssh.return_value = mock_client + mock_channel = MagicMock() + mock_transport = MagicMock() + + mock_client.get_transport.return_value = mock_transport + mock_transport.open_session.return_value = mock_channel + mock_channel.recv_exit_status.return_value = 1 + mock_channel.makefile.return_value = [to_bytes("")] + mock_channel.makefile_stderr.return_value = [to_bytes('Some error')] + + connection._connected = True + connection.ssh = mock_client + + with pytest.raises(AnsibleError, match='error occurred while fetching file from /remote/path to /local/path'): + connection.fetch_file('/remote/path', '/local/path') + + +@patch('paramiko.SSHClient') +def test_fetch_file_cat_not_found(mock_ssh, connection): + """ Test command execution when cat is not found """ + mock_client = MagicMock() + mock_ssh.return_value = mock_client + mock_channel = MagicMock() + mock_transport = MagicMock() + + mock_client.get_transport.return_value = mock_transport + mock_transport.open_session.return_value = mock_channel + mock_channel.recv_exit_status.return_value = 1 + mock_channel.makefile.return_value = [to_bytes("")] + mock_channel.makefile_stderr.return_value = [to_bytes('cat: not found')] + + connection._connected = True + connection.ssh = mock_client + + with pytest.raises(AnsibleError, match='cat not found in path of WSL distribution'): + connection.fetch_file('/remote/path', '/local/path') + + +def test_close(connection): + """ Test connection close """ + mock_ssh = MagicMock() + connection.ssh = mock_ssh + connection._connected = True + + connection.close() + + assert mock_ssh.close.called, 'ssh.close was not called' + assert not connection._connected, 'self._connected is still True' + + +def test_close_with_lock_file(connection): + """ Test close method with lock file creation """ + connection._any_keys_added = MagicMock(return_value=True) + connection._connected = True + connection.keyfile = '/tmp/wsl-known_hosts-test' + connection.set_option('host_key_checking', True) + connection.set_option('lock_file_timeout', 5) + connection.set_option('record_host_keys', True) + connection.ssh = MagicMock() + + lock_file_path = os.path.join(os.path.dirname(connection.keyfile), + f'ansible-{os.path.basename(connection.keyfile)}.lock') + + try: + connection.close() + assert os.path.exists(lock_file_path), 'Lock file was not created' + + lock_stat = os.stat(lock_file_path) + assert lock_stat.st_mode & 0o777 == 0o600, 'Incorrect lock file permissions' + finally: + Path(lock_file_path).unlink(missing_ok=True) + + +@patch('pathlib.Path.unlink') +@patch('os.path.exists') +def test_close_lock_file_time_out_error_handling(mock_exists, mock_unlink, connection): + """ Test close method with lock file timeout error """ + connection._any_keys_added = MagicMock(return_value=True) + connection._connected = True + connection._save_ssh_host_keys = MagicMock() + connection.keyfile = '/tmp/wsl-known_hosts-test' + connection.set_option('host_key_checking', True) + connection.set_option('lock_file_timeout', 5) + connection.set_option('record_host_keys', True) + connection.ssh = MagicMock() + + mock_exists.return_value = False + matcher = f'writing lock file for {connection.keyfile} ran in to the timeout of {connection.get_option("lock_file_timeout")}s' + with pytest.raises(AnsibleError, match=matcher): + with patch('os.getuid', return_value=1000), \ + patch('os.getgid', return_value=1000), \ + patch('os.chmod'), patch('os.chown'), \ + patch('os.rename'), \ + patch.object(FileLock, 'lock_file', side_effect=LockTimeout()): + connection.close() + + +@patch('ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file') +@patch('tempfile.NamedTemporaryFile') +@patch('os.chmod') +@patch('os.chown') +@patch('os.rename') +@patch('os.path.exists') +def test_tempfile_creation_and_move(mock_exists, mock_rename, mock_chown, mock_chmod, mock_tempfile, mock_lock_file, connection): + """ Test tempfile creation and move during close """ + connection._any_keys_added = MagicMock(return_value=True) + connection._connected = True + connection._save_ssh_host_keys = MagicMock() + connection.keyfile = '/tmp/wsl-known_hosts-test' + connection.set_option('host_key_checking', True) + connection.set_option('lock_file_timeout', 5) + connection.set_option('record_host_keys', True) + connection.ssh = MagicMock() + + mock_exists.return_value = False + + mock_lock_file_instance = MagicMock() + mock_lock_file.return_value = mock_lock_file_instance + mock_lock_file_instance.__enter__.return_value = None + + mock_tempfile_instance = MagicMock() + mock_tempfile_instance.name = '/tmp/mock_tempfile' + mock_tempfile.return_value.__enter__.return_value = mock_tempfile_instance + + mode = 0o644 + uid = 1000 + gid = 1000 + key_dir = os.path.dirname(connection.keyfile) + + with patch('os.getuid', return_value=uid), patch('os.getgid', return_value=gid): + connection.close() + + connection._save_ssh_host_keys.assert_called_once_with('/tmp/mock_tempfile') + mock_chmod.assert_called_once_with('/tmp/mock_tempfile', mode) + mock_chown.assert_called_once_with('/tmp/mock_tempfile', uid, gid) + mock_rename.assert_called_once_with('/tmp/mock_tempfile', connection.keyfile) + mock_tempfile.assert_called_once_with(dir=key_dir, delete=False) + + +@patch('pathlib.Path.unlink') +@patch('tempfile.NamedTemporaryFile') +@patch('ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file') +@patch('os.path.exists') +def test_close_tempfile_error_handling(mock_exists, mock_lock_file, mock_tempfile, mock_unlink, connection): + """ Test tempfile creation error """ + connection._any_keys_added = MagicMock(return_value=True) + connection._connected = True + connection._save_ssh_host_keys = MagicMock() + connection.keyfile = '/tmp/wsl-known_hosts-test' + connection.set_option('host_key_checking', True) + connection.set_option('lock_file_timeout', 5) + connection.set_option('record_host_keys', True) + connection.ssh = MagicMock() + + mock_exists.return_value = False + + mock_lock_file_instance = MagicMock() + mock_lock_file.return_value = mock_lock_file_instance + mock_lock_file_instance.__enter__.return_value = None + + mock_tempfile_instance = MagicMock() + mock_tempfile_instance.name = '/tmp/mock_tempfile' + mock_tempfile.return_value.__enter__.return_value = mock_tempfile_instance + + with pytest.raises(AnsibleError, match='error occurred while writing SSH host keys!'): + with patch.object(os, 'chmod', side_effect=Exception()): + connection.close() + mock_unlink.assert_called_with(missing_ok=True) + + +@patch('ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file') +@patch('os.path.exists') +def test_close_with_invalid_host_key(mock_exists, mock_lock_file, connection): + """ Test load_system_host_keys on close with InvalidHostKey error """ + connection._any_keys_added = MagicMock(return_value=True) + connection._connected = True + connection._save_ssh_host_keys = MagicMock() + connection.keyfile = '/tmp/wsl-known_hosts-test' + connection.set_option('host_key_checking', True) + connection.set_option('lock_file_timeout', 5) + connection.set_option('record_host_keys', True) + connection.ssh = MagicMock() + connection.ssh.load_system_host_keys.side_effect = paramiko.hostkeys.InvalidHostKey( + "Bad Line!", Exception('Something crashed!')) + + mock_exists.return_value = False + + mock_lock_file_instance = MagicMock() + mock_lock_file.return_value = mock_lock_file_instance + mock_lock_file_instance.__enter__.return_value = None + + with pytest.raises(AnsibleConnectionFailure, match="Invalid host key: Bad Line!"): + connection.close() + + +def test_reset(connection): + """ Test connection reset """ + connection._connected = True + connection.close = MagicMock() + connection._connect = MagicMock() + + connection.reset() + + connection.close.assert_called_once() + connection._connect.assert_called_once() + + connection._connected = False + connection.reset() + assert connection.close.call_count == 1 diff --git a/tests/unit/plugins/filter/test_crc32.py b/tests/unit/plugins/filter/test_crc32.py index 8201045136..7ec581f91f 100644 --- a/tests/unit/plugins/filter/test_crc32.py +++ b/tests/unit/plugins/filter/test_crc32.py @@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.filter.crc32 import crc32s diff --git a/tests/unit/plugins/filter/test_json_patch.py b/tests/unit/plugins/filter/test_json_patch.py new file mode 100644 index 0000000000..7bd4a08664 --- /dev/null +++ b/tests/unit/plugins/filter/test_json_patch.py @@ -0,0 +1,313 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type # pylint: disable=C0103 + +import unittest +from ansible_collections.community.general.plugins.filter.json_patch import FilterModule +from ansible.errors import AnsibleFilterError + + +class TestJsonPatch(unittest.TestCase): + def setUp(self): + self.filter = FilterModule() + self.json_patch = self.filter.filters()["json_patch"] + self.json_diff = self.filter.filters()["json_diff"] + self.json_patch_recipe = self.filter.filters()["json_patch_recipe"] + + # json_patch + + def test_patch_add_to_empty(self): + result = self.json_patch({}, "add", "/a", 1) + self.assertEqual(result, {"a": 1}) + + def test_patch_add_to_dict(self): + result = self.json_patch({"b": 2}, "add", "/a", 1) + self.assertEqual(result, {"a": 1, "b": 2}) + + def test_patch_add_to_array_index(self): + result = self.json_patch([1, 2, 3], "add", "/1", 99) + self.assertEqual(result, [1, 99, 2, 3]) + + def test_patch_add_to_array_last(self): + result = self.json_patch({"a": [1, 2, 3]}, "add", "/a/-", 99) + self.assertEqual(result, {"a": [1, 2, 3, 99]}) + + def test_patch_add_from_string(self): + result = self.json_patch("[1, 2, 3]", "add", "/-", 99) + self.assertEqual(result, [1, 2, 3, 99]) + + def test_patch_path_escape(self): + result = self.json_patch({}, "add", "/x~0~1y", 99) + self.assertEqual(result, {"x~/y": 99}) + + def test_patch_remove(self): + result = self.json_patch({"a": 1, "b": {"c": 2}, "d": 3}, "remove", "/b") + self.assertEqual(result, {"a": 1, "d": 3}) + + def test_patch_replace(self): + result = self.json_patch( + {"a": 1, "b": {"c": 2}, "d": 3}, "replace", "/b", {"x": 99} + ) + self.assertEqual(result, {"a": 1, "b": {"x": 99}, "d": 3}) + + def test_patch_copy(self): + result = self.json_patch( + {"a": 1, "b": {"c": 2}, "d": 3}, "copy", "/d", **{"from": "/b"} + ) + self.assertEqual(result, {"a": 1, "b": {"c": 2}, "d": {"c": 2}}) + + def test_patch_move(self): + result = self.json_patch( + {"a": 1, "b": {"c": 2}, "d": 3}, "move", "/d", **{"from": "/b"} + ) + self.assertEqual(result, {"a": 1, "d": {"c": 2}}) + + def test_patch_test_pass(self): + result = self.json_patch({"a": 1, "b": {"c": 2}, "d": 3}, "test", "/b/c", 2) + self.assertEqual(result, {"a": 1, "b": {"c": 2}, "d": 3}) + + def test_patch_test_fail_none(self): + result = self.json_patch({"a": 1, "b": {"c": 2}, "d": 3}, "test", "/b/c", 99) + self.assertIsNone(result) + + def test_patch_test_fail_fail(self): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch( + {"a": 1, "b": {"c": 2}, "d": 3}, "test", "/b/c", 99, fail_test=True + ) + self.assertTrue("json_patch: test operation failed" in str(context.exception)) + + def test_patch_remove_nonexisting(self): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch({"a": 1, "b": {"c": 2}, "d": 3}, "remove", "/e") + self.assertEqual( + str(context.exception), + "json_patch: patch failed: can't remove a non-existent object 'e'", + ) + + def test_patch_missing_lib(self): + with unittest.mock.patch( + "ansible_collections.community.general.plugins.filter.json_patch.HAS_LIB", + False, + ): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch({}, "add", "/a", 1) + self.assertEqual( + str(context.exception), + "You need to install 'jsonpatch' package prior to running 'json_patch' filter", + ) + + def test_patch_invalid_operation(self): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch({}, "invalid", "/a", 1) + self.assertEqual( + str(context.exception), + "json_patch: unsupported 'op' argument: invalid", + ) + + def test_patch_arg_checking(self): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch(1, "add", "/a", 1) + self.assertEqual( + str(context.exception), + "json_patch: input is not dictionary, list or string", + ) + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch({}, 1, "/a", 1) + self.assertEqual( + str(context.exception), + "json_patch: 'op' argument is not a string", + ) + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch({}, None, "/a", 1) + self.assertEqual( + str(context.exception), + "json_patch: 'op' argument is not a string", + ) + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch({}, "add", 1, 1) + self.assertEqual( + str(context.exception), + "json_patch: 'path' argument is not a string", + ) + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch({}, "copy", "/a", **{"from": 1}) + self.assertEqual( + str(context.exception), + "json_patch: 'from' argument is not a string", + ) + + def test_patch_extra_kwarg(self): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch({}, "add", "/a", 1, invalid=True) + self.assertEqual( + str(context.exception), + "json_patch: unexpected keywords arguments: invalid", + ) + + def test_patch_missing_from(self): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch({}, "copy", "/a", 1) + self.assertEqual( + str(context.exception), + "json_patch: 'from' argument missing for 'copy' operation", + ) + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch({}, "move", "/a", 1) + self.assertEqual( + str(context.exception), + "json_patch: 'from' argument missing for 'move' operation", + ) + + def test_patch_add_to_dict_binary(self): + result = self.json_patch(b'{"b": 2}', "add", "/a", 1) + self.assertEqual(result, {"a": 1, "b": 2}) + result = self.json_patch(bytearray(b'{"b": 2}'), "add", "/a", 1) + self.assertEqual(result, {"a": 1, "b": 2}) + + # json_patch_recipe + + def test_patch_recipe_process(self): + result = self.json_patch_recipe( + {}, + [ + {"op": "add", "path": "/foo", "value": 1}, + {"op": "add", "path": "/bar", "value": []}, + {"op": "add", "path": "/bar/-", "value": 2}, + {"op": "add", "path": "/bar/0", "value": 1}, + {"op": "remove", "path": "/bar/0"}, + {"op": "move", "from": "/foo", "path": "/baz"}, + {"op": "copy", "from": "/baz", "path": "/bax"}, + {"op": "copy", "from": "/baz", "path": "/bay"}, + {"op": "replace", "path": "/baz", "value": [10, 20, 30]}, + {"op": "add", "path": "/foo", "value": 1}, + {"op": "add", "path": "/foo", "value": 1}, + {"op": "test", "path": "/baz/1", "value": 20}, + ], + ) + self.assertEqual( + result, {"bar": [2], "bax": 1, "bay": 1, "baz": [10, 20, 30], "foo": 1} + ) + + def test_patch_recipe_test_fail(self): + result = self.json_patch_recipe( + {}, + [ + {"op": "add", "path": "/bar", "value": []}, + {"op": "add", "path": "/bar/-", "value": 2}, + {"op": "test", "path": "/bar/0", "value": 20}, + {"op": "add", "path": "/bar/0", "value": 1}, + ], + ) + self.assertIsNone(result) + + def test_patch_recipe_missing_lib(self): + with unittest.mock.patch( + "ansible_collections.community.general.plugins.filter.json_patch.HAS_LIB", + False, + ): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch_recipe({}, []) + self.assertEqual( + str(context.exception), + "You need to install 'jsonpatch' package prior to running 'json_patch_recipe' filter", + ) + + def test_patch_recipe_missing_from(self): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch_recipe({}, [{"op": "copy", "path": "/a"}]) + self.assertEqual( + str(context.exception), + "json_patch_recipe: 'from' argument missing for 'copy' operation", + ) + + def test_patch_recipe_incorrect_type(self): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch_recipe({}, "copy") + self.assertEqual( + str(context.exception), + "json_patch_recipe: 'operations' needs to be a list", + ) + + def test_patch_recipe_test_fail_none(self): + result = self.json_patch_recipe( + {"a": 1, "b": {"c": 2}, "d": 3}, + [{"op": "test", "path": "/b/c", "value": 99}], + ) + self.assertIsNone(result) + + def test_patch_recipe_test_fail_fail_pos(self): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch_recipe( + {"a": 1, "b": {"c": 2}, "d": 3}, + [{"op": "test", "path": "/b/c", "value": 99}], + True, + ) + self.assertTrue( + "json_patch_recipe: test operation failed" in str(context.exception) + ) + + def test_patch_recipe_test_fail_fail_kw(self): + with self.assertRaises(AnsibleFilterError) as context: + self.json_patch_recipe( + {"a": 1, "b": {"c": 2}, "d": 3}, + [{"op": "test", "path": "/b/c", "value": 99}], + fail_test=True, + ) + self.assertTrue( + "json_patch_recipe: test operation failed" in str(context.exception) + ) + + # json_diff + + def test_diff_process(self): + result = self.json_diff( + {"foo": 1, "bar": {"baz": 2}, "baw": [1, 2, 3], "hello": "day"}, + { + "foo": 1, + "bar": {"baz": 2}, + "baw": [1, 3], + "baq": {"baz": 2}, + "hello": "night", + }, + ) + + # Sort as the order is unstable + self.assertEqual( + sorted(result, key=lambda k: k["path"]), + [ + {"op": "add", "path": "/baq", "value": {"baz": 2}}, + {"op": "remove", "path": "/baw/1"}, + {"op": "replace", "path": "/hello", "value": "night"}, + ], + ) + + def test_diff_missing_lib(self): + with unittest.mock.patch( + "ansible_collections.community.general.plugins.filter.json_patch.HAS_LIB", + False, + ): + with self.assertRaises(AnsibleFilterError) as context: + self.json_diff({}, {}) + self.assertEqual( + str(context.exception), + "You need to install 'jsonpatch' package prior to running 'json_diff' filter", + ) + + def test_diff_arg_checking(self): + with self.assertRaises(AnsibleFilterError) as context: + self.json_diff(1, {}) + self.assertEqual( + str(context.exception), "json_diff: input is not dictionary, list or string" + ) + with self.assertRaises(AnsibleFilterError) as context: + self.json_diff({}, 1) + self.assertEqual( + str(context.exception), + "json_diff: target is not dictionary, list or string", + ) diff --git a/tests/unit/plugins/inventory/fixtures/iocage_inventory.yml b/tests/unit/plugins/inventory/fixtures/iocage/iocage_inventory.yml similarity index 96% rename from tests/unit/plugins/inventory/fixtures/iocage_inventory.yml rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_inventory.yml index 850a54f549..75cafc4e3b 100644 --- a/tests/unit/plugins/inventory/fixtures/iocage_inventory.yml +++ b/tests/unit/plugins/inventory/fixtures/iocage/iocage_inventory.yml @@ -5,6 +5,12 @@ all: test_101: iocage_basejail: 'yes' iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: vnet0 + ip: 10.1.0.101 + mask: '24' + msg: '' iocage_ip4: 10.1.0.101 iocage_ip6: '-' iocage_jid: '-' @@ -157,6 +163,12 @@ all: test_102: iocage_basejail: 'yes' iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: vnet0 + ip: 10.1.0.102 + mask: '24' + msg: '' iocage_ip4: 10.1.0.102 iocage_ip6: '-' iocage_jid: '-' @@ -309,6 +321,12 @@ all: test_103: iocage_basejail: 'yes' iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: vnet0 + ip: 10.1.0.103 + mask: '24' + msg: '' iocage_ip4: 10.1.0.103 iocage_ip6: '-' iocage_jid: '-' diff --git a/tests/unit/plugins/inventory/fixtures/iocage_inventory.yml.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_inventory.yml.license similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_inventory.yml.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_inventory.yml.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.txt b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.txt new file mode 100644 index 0000000000..4cd9d0999d --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.txt @@ -0,0 +1,9 @@ ++------+----------+------+-------+------+-----------------+---------------------+-----+----------------+----------+ +| JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | ++======+==========+======+=======+======+=================+=====================+=====+================+==========+ +| - | test_101 | off | down | jail | 13.4-RELEASE-p2 | vnet0|10.1.0.101/24 | - | ansible_client | yes | ++------+----------------+------+-------+------+-----------------+--------------------+-----+----------------+-----+ +| - | test_102 | off | down | jail | 13.4-RELEASE-p2 | vnet0|10.1.0.102/24 | - | ansible_client | yes | ++------+----------------+------+-------+------+-----------------+--------------------+-----+----------------+-----+ +| - | test_103 | off | down | jail | 13.4-RELEASE-p2 | vnet0|10.1.0.103/24 | - | ansible_client | yes | ++------+----------------+------+-------+------+-----------------+--------------------+-----+----------------+-----+ diff --git a/tests/unit/plugins/inventory/fixtures/iocage_jails.txt.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.txt.license similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_jails.txt.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.txt.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage_jails.yml b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.yml similarity index 68% rename from tests/unit/plugins/inventory/fixtures/iocage_jails.yml rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.yml index 08eaa2dce4..ebca159824 100644 --- a/tests/unit/plugins/inventory/fixtures/iocage_jails.yml +++ b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.yml @@ -3,6 +3,12 @@ _meta: test_101: iocage_basejail: 'yes' iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: vnet0 + ip: 10.1.0.101 + mask: '24' + msg: '' iocage_ip4: 10.1.0.101 iocage_ip6: '-' iocage_jid: '-' @@ -13,6 +19,12 @@ _meta: test_102: iocage_basejail: 'yes' iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: vnet0 + ip: 10.1.0.102 + mask: '24' + msg: '' iocage_ip4: 10.1.0.102 iocage_ip6: '-' iocage_jid: '-' @@ -23,6 +35,12 @@ _meta: test_103: iocage_basejail: 'yes' iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: vnet0 + ip: 10.1.0.103 + mask: '24' + msg: '' iocage_ip4: 10.1.0.103 iocage_ip6: '-' iocage_jid: '-' diff --git a/tests/unit/plugins/inventory/fixtures/iocage_jails.yml.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.yml.license similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_jails.yml.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.yml.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.txt b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.txt new file mode 100644 index 0000000000..b49428fc7a --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.txt @@ -0,0 +1,9 @@ ++------+----------------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ +| JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | ++======+================+======+=======+======+=================+====================+=====+================+==========+ +| 268 | test_111 | off | up | jail | 14.1-RELEASE-p6 | epair0b|10.1.0.174 | - | ansible_client | yes | ++------+----------------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ +| 269 | test_112 | off | up | jail | 14.1-RELEASE-p6 | epair0b|10.1.0.147 | - | ansible_client | yes | ++------+----------------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ +| 270 | test_113 | off | up | jail | 14.1-RELEASE-p6 | epair0b|10.1.0.231 | - | ansible_client | yes | ++------+----------------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ diff --git a/tests/sanity/extra/extra-docs.json.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.txt.license similarity index 100% rename from tests/sanity/extra/extra-docs.json.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.txt.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.yml b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.yml new file mode 100644 index 0000000000..fc01103753 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.yml @@ -0,0 +1,50 @@ +_meta: + hostvars: + test_111: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.174 + mask: '-' + msg: '' + iocage_ip4: 10.1.0.174 + iocage_ip6: '-' + iocage_jid: '268' + iocage_release: 14.1-RELEASE-p6 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + test_112: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.147 + mask: '-' + msg: '' + iocage_ip4: 10.1.0.147 + iocage_ip6: '-' + iocage_jid: '269' + iocage_release: 14.1-RELEASE-p6 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + test_113: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.231 + mask: '-' + msg: '' + iocage_ip4: 10.1.0.231 + iocage_ip6: '-' + iocage_jid: '270' + iocage_release: 14.1-RELEASE-p6 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail diff --git a/tests/sanity/extra/licenses.json.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.yml.license similarity index 100% rename from tests/sanity/extra/licenses.json.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.yml.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.txt b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.txt new file mode 100644 index 0000000000..71235ca4e5 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.txt @@ -0,0 +1,9 @@ ++------+----------------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ +| JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | ++======+================+======+=======+======+=================+====================+=====+================+==========+ +| None | test_111 | off | down | jail | 14.1-RELEASE-p6 | DHCP (not running) | - | ansible_client | yes | ++------+----------------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ +| None | test_112 | off | down | jail | 14.1-RELEASE-p6 | DHCP (not running) | - | ansible_client | yes | ++------+----------------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ +| None | test_113 | off | down | jail | 14.1-RELEASE-p6 | DHCP (not running) | - | ansible_client | yes | ++------+----------------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ diff --git a/tests/sanity/extra/no-unwanted-files.json.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.txt.license similarity index 100% rename from tests/sanity/extra/no-unwanted-files.json.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.txt.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.yml b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.yml new file mode 100644 index 0000000000..a8ef2ae881 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.yml @@ -0,0 +1,41 @@ +_meta: + hostvars: + test_111: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4_dict: + ip4: [] + msg: 'DHCP (not running)' + iocage_ip4: '-' + iocage_ip6: '-' + iocage_jid: 'None' + iocage_release: 14.1-RELEASE-p6 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + test_112: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4_dict: + ip4: [] + msg: 'DHCP (not running)' + iocage_ip4: '-' + iocage_ip6: '-' + iocage_jid: 'None' + iocage_release: 14.1-RELEASE-p6 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + test_113: + iocage_basejail: 'yes' + iocage_boot: 'off' + iocage_ip4_dict: + ip4: [] + msg: 'DHCP (not running)' + iocage_ip4: '-' + iocage_ip6: '-' + iocage_jid: 'None' + iocage_release: 14.1-RELEASE-p6 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail diff --git a/tests/sanity/ignore-2.13.txt.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.yml.license similarity index 100% rename from tests/sanity/ignore-2.13.txt.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.yml.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties.txt b/tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.txt similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_properties.txt rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.txt diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties.txt.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.txt.license similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_properties.txt.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.txt.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties.yml b/tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.yml similarity index 97% rename from tests/unit/plugins/inventory/fixtures/iocage_properties.yml rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.yml index ffae1bf9d1..bb20808ae8 100644 --- a/tests/unit/plugins/inventory/fixtures/iocage_properties.yml +++ b/tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.yml @@ -3,6 +3,12 @@ _meta: test_101: iocage_basejail: 'yes' iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: vnet0 + ip: 10.1.0.101 + mask: '24' + msg: '' iocage_ip4: 10.1.0.101 iocage_ip6: '-' iocage_jid: '-' @@ -155,6 +161,12 @@ _meta: test_102: iocage_basejail: 'yes' iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: vnet0 + ip: 10.1.0.102 + mask: '24' + msg: '' iocage_ip4: 10.1.0.102 iocage_ip6: '-' iocage_jid: '-' @@ -307,6 +319,12 @@ _meta: test_103: iocage_basejail: 'yes' iocage_boot: 'off' + iocage_ip4_dict: + ip4: + - ifc: vnet0 + ip: 10.1.0.103 + mask: '24' + msg: '' iocage_ip4: 10.1.0.103 iocage_ip6: '-' iocage_jid: '-' diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties.yml.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.yml.license similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_properties.yml.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.yml.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt b/tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_101.txt similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_101.txt diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_101.txt.license similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_101.txt.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt b/tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_102.txt similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_102.txt diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_102.txt.license similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_102.txt.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt b/tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_103.txt similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_103.txt diff --git a/tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt.license b/tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_103.txt.license similarity index 100% rename from tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt.license rename to tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_103.txt.license diff --git a/tests/unit/plugins/inventory/fixtures/iocage_jails.txt b/tests/unit/plugins/inventory/fixtures/iocage_jails.txt deleted file mode 100644 index 5152110550..0000000000 --- a/tests/unit/plugins/inventory/fixtures/iocage_jails.txt +++ /dev/null @@ -1,3 +0,0 @@ -- test_101 off down jail 13.4-RELEASE-p2 vnet0|10.1.0.101/24 - ansible_client yes -- test_102 off down jail 13.4-RELEASE-p2 vnet0|10.1.0.102/24 - ansible_client yes -- test_103 off down jail 13.4-RELEASE-p2 vnet0|10.1.0.103/24 - ansible_client yes diff --git a/tests/unit/plugins/inventory/test_cobbler.py b/tests/unit/plugins/inventory/test_cobbler.py index a09001ad62..7bc2b847c6 100644 --- a/tests/unit/plugins/inventory/test_cobbler.py +++ b/tests/unit/plugins/inventory/test_cobbler.py @@ -16,11 +16,6 @@ def inventory(): return InventoryModule() -def test_init_cache(inventory): - inventory._init_cache() - assert inventory._cache[inventory.cache_key] == {} - - def test_verify_file(tmp_path, inventory): file = tmp_path / "foobar.cobbler.yml" file.touch() diff --git a/tests/unit/plugins/inventory/test_iocage.py b/tests/unit/plugins/inventory/test_iocage.py index 1a0aa22d16..4bd56aeb92 100644 --- a/tests/unit/plugins/inventory/test_iocage.py +++ b/tests/unit/plugins/inventory/test_iocage.py @@ -13,6 +13,7 @@ import yaml from ansible.inventory.data import InventoryData from ansible.template import Templar from ansible_collections.community.general.plugins.inventory.iocage import InventoryModule +from ansible_collections.community.internal_test_tools.tests.unit.utils.trust import make_trusted @pytest.fixture @@ -20,34 +21,36 @@ def inventory(): inv = InventoryModule() inv.inventory = InventoryData() inv.templar = Templar(None) - inv.jails = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage_jails.txt') - inv.js_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage_jails.yml') - prpts_101 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage_properties_test_101.txt') - prpts_102 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage_properties_test_102.txt') - prpts_103 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage_properties_test_103.txt') + inv.jails = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.txt') + inv.js_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.yml') + inv.jails_dhcp = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.txt') + inv.js_dhcp_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.yml') + inv.jails_dhcp_nr = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.txt') + inv.js_dhcp_nr_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.yml') + prpts_101 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_101.txt') + prpts_102 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_102.txt') + prpts_103 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_103.txt') inv.prpts = {'test_101': prpts_101, 'test_102': prpts_102, 'test_103': prpts_103} - inv.ps_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage_properties.yml') - inv.ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage_inventory.yml') + inv.ps_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.yml') + inv.ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_inventory.yml') return inv def load_txt_data(path): - f = open(path, 'r') - s = f.read() - f.close() + with open(path, 'r') as f: + s = f.read() return s def load_yml_data(path): - f = open(path, 'r') - d = yaml.safe_load(f) - f.close() + with open(path, 'r') as f: + d = yaml.safe_load(f) return d def get_option(option): groups = {} - groups['test'] = "inventory_hostname.startswith('test')" + groups['test'] = make_trusted("inventory_hostname.startswith('test')") if option == 'groups': return groups @@ -72,10 +75,22 @@ def test_verify_file(tmp_path, inventory): def test_get_jails(inventory): + + # jails results = {'_meta': {'hostvars': {}}} inventory.get_jails(inventory.jails, results) assert results == inventory.js_ok + # jails_dhcp + results = {'_meta': {'hostvars': {}}} + inventory.get_jails(inventory.jails_dhcp, results) + assert results == inventory.js_dhcp_ok + + # jails_dhcp_not_running + results = {'_meta': {'hostvars': {}}} + inventory.get_jails(inventory.jails_dhcp_nr, results) + assert results == inventory.js_dhcp_nr_ok + def test_get_properties(inventory): results = {'_meta': {'hostvars': {}}} diff --git a/tests/unit/plugins/inventory/test_linode.py b/tests/unit/plugins/inventory/test_linode.py index 0f239f2dd9..ead41591a7 100644 --- a/tests/unit/plugins/inventory/test_linode.py +++ b/tests/unit/plugins/inventory/test_linode.py @@ -7,14 +7,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import pytest -import sys linode_apiv4 = pytest.importorskip('linode_api4') -mandatory_py_version = pytest.mark.skipif( - sys.version_info < (2, 7), - reason='The linode_api4 dependency requires python2.7 or higher' -) - from ansible.errors import AnsibleError from ansible.parsing.dataloader import DataLoader diff --git a/tests/unit/plugins/inventory/test_opennebula.py b/tests/unit/plugins/inventory/test_opennebula.py index 52ea934043..87ec4314de 100644 --- a/tests/unit/plugins/inventory/test_opennebula.py +++ b/tests/unit/plugins/inventory/test_opennebula.py @@ -11,14 +11,40 @@ __metaclass__ = type from collections import OrderedDict import json +import os import pytest +from ansible import constants as C from ansible.inventory.data import InventoryData -from ansible.parsing.dataloader import DataLoader -from ansible.template import Templar +from ansible.inventory.manager import InventoryManager +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.internal_test_tools.tests.unit.mock.loader import DictDataLoader +from ansible_collections.community.internal_test_tools.tests.unit.mock.path import mock_unfrackpath_noop + from ansible_collections.community.general.plugins.inventory.opennebula import InventoryModule -from ansible_collections.community.general.tests.unit.compat.mock import create_autospec + + +original_exists = os.path.exists +original_access = os.access + + +def exists_mock(path, exists=True): + def exists(f): + if to_native(f) == path: + return exists + return original_exists(f) + + return exists + + +def access_mock(path, can_access=True): + def access(f, m, *args, **kwargs): + if to_native(f) == path: + return can_access + return original_access(f, m, *args, **kwargs) # pragma: no cover + + return access class HistoryEntry(object): @@ -239,18 +265,6 @@ options_base_test = { 'filter_by_label': None, } -options_constructable_test = options_base_test.copy() -options_constructable_test.update({ - 'compose': {'is_linux': "GUEST_OS == 'linux'"}, - 'filter_by_label': 'bench', - 'groups': { - 'benchmark_clients': "TGROUP.endswith('clients')", - 'lin': 'is_linux == True' - }, - 'keyed_groups': [{'key': 'TGROUP', 'prefix': 'tgroup'}], - -}) - # given a dictionary `opts_dict`, return a function that behaves like ansible's inventory get_options def mk_get_options(opts_dict): @@ -266,24 +280,41 @@ def test_get_connection_info(inventory, mocker): assert (auth.username and auth.password) -def test_populate_constructable_templating(inventory, mocker): - # bypass API fetch call - inventory._get_vm_pool = mocker.MagicMock(side_effect=get_vm_pool_json) - inventory.get_option = mocker.MagicMock(side_effect=mk_get_options(options_constructable_test)) +def test_populate_constructable_templating(mocker): + inventory_filename = '/fake/opennebula.yml' + + mocker.patch.object(InventoryModule, '_get_vm_pool', side_effect=get_vm_pool_json) + mocker.patch('ansible_collections.community.general.plugins.inventory.opennebula.HAS_PYONE', True) + mocker.patch('ansible.inventory.manager.unfrackpath', mock_unfrackpath_noop) + mocker.patch('os.path.exists', exists_mock(inventory_filename)) + mocker.patch('os.access', access_mock(inventory_filename)) # the templating engine is needed for the constructable groups/vars # so give that some fake data and instantiate it. - fake_config_filepath = '/fake/opennebula.yml' - fake_cache = {fake_config_filepath: options_constructable_test.copy()} - fake_cache[fake_config_filepath]['plugin'] = 'community.general.opennebula' - dataloader = create_autospec(DataLoader, instance=True) - dataloader._FILE_CACHE = fake_cache - inventory.templar = Templar(loader=dataloader) - - inventory._populate() + C.INVENTORY_ENABLED = ['community.general.opennebula'] + inventory_file = {inventory_filename: r''' +--- +plugin: community.general.opennebula +api_url: https://opennebula:2633/RPC2 +api_username: username +api_password: password +api_authfile: '~/.one/one_auth' +hostname: v4_first_ip +group_by_labels: true +compose: + is_linux: GUEST_OS == 'linux' +filter_by_label: bench +groups: + benchmark_clients: TGROUP.endswith('clients') + lin: is_linux == true +keyed_groups: + - key: TGROUP + prefix: tgroup +'''} + im = InventoryManager(loader=DictDataLoader(inventory_file), sources=inventory_filename) # note the vm_pool (and json data file) has four hosts, - # but options_constructable_test asks ansible to filter it out + # but the options above asks ansible to filter one out assert len(get_vm_pool_json().VM) == 4 assert set([vm.NAME for vm in get_vm_pool_json().VM]) == set([ 'terraform_demo_00', @@ -291,31 +322,31 @@ def test_populate_constructable_templating(inventory, mocker): 'terraform_demo_srv_00', 'bs-windows', ]) - assert set(inventory.inventory.hosts) == set(['terraform_demo_00', 'terraform_demo_01', 'terraform_demo_srv_00']) + assert set(im._inventory.hosts) == set(['terraform_demo_00', 'terraform_demo_01', 'terraform_demo_srv_00']) - host_demo00 = inventory.inventory.get_host('terraform_demo_00') - host_demo01 = inventory.inventory.get_host('terraform_demo_01') - host_demosrv = inventory.inventory.get_host('terraform_demo_srv_00') + host_demo00 = im._inventory.get_host('terraform_demo_00') + host_demo01 = im._inventory.get_host('terraform_demo_01') + host_demosrv = im._inventory.get_host('terraform_demo_srv_00') - assert 'benchmark_clients' in inventory.inventory.groups - assert 'lin' in inventory.inventory.groups - assert inventory.inventory.groups['benchmark_clients'].hosts == [host_demo00, host_demo01] - assert inventory.inventory.groups['lin'].hosts == [host_demo00, host_demo01, host_demosrv] + assert 'benchmark_clients' in im._inventory.groups + assert 'lin' in im._inventory.groups + assert im._inventory.groups['benchmark_clients'].hosts == [host_demo00, host_demo01] + assert im._inventory.groups['lin'].hosts == [host_demo00, host_demo01, host_demosrv] # test group by label: - assert 'bench' in inventory.inventory.groups - assert 'foo' in inventory.inventory.groups - assert inventory.inventory.groups['bench'].hosts == [host_demo00, host_demo01, host_demosrv] - assert inventory.inventory.groups['serv'].hosts == [host_demosrv] - assert inventory.inventory.groups['foo'].hosts == [host_demo00, host_demo01] + assert 'bench' in im._inventory.groups + assert 'foo' in im._inventory.groups + assert im._inventory.groups['bench'].hosts == [host_demo00, host_demo01, host_demosrv] + assert im._inventory.groups['serv'].hosts == [host_demosrv] + assert im._inventory.groups['foo'].hosts == [host_demo00, host_demo01] # test `compose` transforms GUEST_OS=Linux to is_linux == True assert host_demo00.get_vars()['GUEST_OS'] == 'linux' assert host_demo00.get_vars()['is_linux'] is True # test `keyed_groups` - assert inventory.inventory.groups['tgroup_bench_clients'].hosts == [host_demo00, host_demo01] - assert inventory.inventory.groups['tgroup_bench_server'].hosts == [host_demosrv] + assert im._inventory.groups['tgroup_bench_clients'].hosts == [host_demo00, host_demo01] + assert im._inventory.groups['tgroup_bench_server'].hosts == [host_demosrv] def test_populate(inventory, mocker): diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py deleted file mode 100644 index b8358df226..0000000000 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ /dev/null @@ -1,786 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2020, Jeffrey van Pelt -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -# -# The API responses used in these tests were recorded from PVE version 6.2. - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import pytest - -from ansible.inventory.data import InventoryData -from ansible_collections.community.general.plugins.inventory.proxmox import InventoryModule - - -@pytest.fixture(scope="module") -def inventory(): - r = InventoryModule() - r.inventory = InventoryData() - return r - - -def test_verify_file(tmp_path, inventory): - file = tmp_path / "foobar.proxmox.yml" - file.touch() - assert inventory.verify_file(str(file)) is True - - -def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.proxmox.yml') is False - - -def get_auth(): - return True - - -# NOTE: when updating/adding replies to this function, -# be sure to only add only the _contents_ of the 'data' dict in the API reply -def get_json(url, ignore_errors=None): - if url == "https://localhost:8006/api2/json/nodes": - # _get_nodes - return [{"type": "node", - "cpu": 0.01, - "maxdisk": 500, - "mem": 500, - "node": "testnode", - "id": "node/testnode", - "maxcpu": 1, - "status": "online", - "ssl_fingerprint": "xx", - "disk": 1000, - "maxmem": 1000, - "uptime": 10000, - "level": ""}, - {"type": "node", - "node": "testnode2", - "id": "node/testnode2", - "status": "offline", - "ssl_fingerprint": "yy"}] - elif url == "https://localhost:8006/api2/json/pools": - # _get_pools - return [{"poolid": "test"}] - elif url == "https://localhost:8006/api2/json/nodes/testnode/lxc": - # _get_lxc_per_node - return [{"cpus": 1, - "name": "test-lxc", - "cpu": 0.01, - "diskwrite": 0, - "lock": "", - "maxmem": 1000, - "template": "", - "diskread": 0, - "mem": 1000, - "swap": 0, - "type": "lxc", - "maxswap": 0, - "maxdisk": "1000", - "netout": 1000, - "pid": "1000", - "netin": 1000, - "status": "running", - "vmid": "100", - "disk": "1000", - "uptime": 1000}] - elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu": - # _get_qemu_per_node - return [{"name": "test-qemu", - "cpus": 1, - "mem": 1000, - "template": "", - "diskread": 0, - "cpu": 0.01, - "maxmem": 1000, - "diskwrite": 0, - "netout": 1000, - "pid": "1001", - "netin": 1000, - "maxdisk": 1000, - "vmid": "101", - "uptime": 1000, - "disk": 0, - "status": "running"}, - {"name": "test-qemu-windows", - "cpus": 1, - "mem": 1000, - "template": "", - "diskread": 0, - "cpu": 0.01, - "maxmem": 1000, - "diskwrite": 0, - "netout": 1000, - "pid": "1001", - "netin": 1000, - "maxdisk": 1000, - "vmid": "102", - "uptime": 1000, - "disk": 0, - "status": "running"}, - {"name": "test-qemu-multi-nic", - "cpus": 1, - "mem": 1000, - "template": "", - "diskread": 0, - "cpu": 0.01, - "maxmem": 1000, - "diskwrite": 0, - "netout": 1000, - "pid": "1001", - "netin": 1000, - "maxdisk": 1000, - "vmid": "103", - "uptime": 1000, - "disk": 0, - "status": "running"}, - {"name": "test-qemu-template", - "cpus": 1, - "mem": 0, - "template": 1, - "diskread": 0, - "cpu": 0, - "maxmem": 1000, - "diskwrite": 0, - "netout": 0, - "pid": "1001", - "netin": 0, - "maxdisk": 1000, - "vmid": "9001", - "uptime": 0, - "disk": 0, - "status": "stopped"}] - elif url == "https://localhost:8006/api2/json/pools/test": - # _get_members_per_pool - return {"members": [{"uptime": 1000, - "template": 0, - "id": "qemu/101", - "mem": 1000, - "status": "running", - "cpu": 0.01, - "maxmem": 1000, - "diskwrite": 1000, - "name": "test-qemu", - "netout": 1000, - "netin": 1000, - "vmid": 101, - "node": "testnode", - "maxcpu": 1, - "type": "qemu", - "maxdisk": 1000, - "disk": 0, - "diskread": 1000}]} - elif url == "https://localhost:8006/api2/json/nodes/testnode/network": - # _get_node_ip - return [{"families": ["inet"], - "priority": 3, - "active": 1, - "cidr": "10.1.1.2/24", - "iface": "eth0", - "method": "static", - "exists": 1, - "type": "eth", - "netmask": "24", - "gateway": "10.1.1.1", - "address": "10.1.1.2", - "method6": "manual", - "autostart": 1}, - {"method6": "manual", - "autostart": 1, - "type": "OVSPort", - "exists": 1, - "method": "manual", - "iface": "eth1", - "ovs_bridge": "vmbr0", - "active": 1, - "families": ["inet"], - "priority": 5, - "ovs_type": "OVSPort"}, - {"type": "OVSBridge", - "method": "manual", - "iface": "vmbr0", - "families": ["inet"], - "priority": 4, - "ovs_ports": "eth1", - "ovs_type": "OVSBridge", - "method6": "manual", - "autostart": 1, - "active": 1}] - elif url == "https://localhost:8006/api2/json/nodes/testnode/lxc/100/config": - # _get_vm_config (lxc) - return { - "console": 1, - "rootfs": "local-lvm:vm-100-disk-0,size=4G", - "cmode": "tty", - "description": "A testnode", - "cores": 1, - "hostname": "test-lxc", - "arch": "amd64", - "tty": 2, - "swap": 0, - "cpulimit": "0", - "net0": "name=eth0,bridge=vmbr0,gw=10.1.1.1,hwaddr=FF:FF:FF:FF:FF:FF,ip=10.1.1.3/24,type=veth", - "ostype": "ubuntu", - "digest": "123456789abcdef0123456789abcdef01234567890", - "protection": 0, - "memory": 1000, - "onboot": 0, - "cpuunits": 1024, - "tags": "one, two, three", - } - elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/config": - # _get_vm_config (qemu) - return { - "tags": "one, two, three", - "cores": 1, - "ide2": "none,media=cdrom", - "memory": 1000, - "kvm": 1, - "digest": "0123456789abcdef0123456789abcdef0123456789", - "description": "A test qemu", - "sockets": 1, - "onboot": 1, - "vmgenid": "ffffffff-ffff-ffff-ffff-ffffffffffff", - "numa": 0, - "bootdisk": "scsi0", - "cpu": "host", - "name": "test-qemu", - "ostype": "l26", - "hotplug": "network,disk,usb", - "scsi0": "local-lvm:vm-101-disk-0,size=8G", - "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0,firewall=1", - "agent": "1,fstrim_cloned_disks=1", - "bios": "seabios", - "ide0": "local-lvm:vm-101-cloudinit,media=cdrom,size=4M", - "boot": "cdn", - "scsihw": "virtio-scsi-pci", - "smbios1": "uuid=ffffffff-ffff-ffff-ffff-ffffffffffff" - } - elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/102/config": - # _get_vm_config (qemu) - return { - "numa": 0, - "digest": "460add1531a7068d2ae62d54f67e8fb9493dece9", - "ide2": "none,media=cdrom", - "bootdisk": "sata0", - "name": "test-qemu-windows", - "balloon": 0, - "cpulimit": "4", - "agent": "1", - "cores": 6, - "sata0": "storage:vm-102-disk-0,size=100G", - "memory": 10240, - "smbios1": "uuid=127301fc-0122-48d5-8fc5-c04fa78d8146", - "scsihw": "virtio-scsi-pci", - "sockets": 1, - "ostype": "win8", - "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0", - "onboot": 1 - } - elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/103/config": - # _get_vm_config (qemu) - return { - 'scsi1': 'storage:vm-103-disk-3,size=30G', - 'sockets': 1, - 'memory': 8192, - 'ostype': 'l26', - 'scsihw': 'virtio-scsi-pci', - "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0", - "net1": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr1", - 'bootdisk': 'scsi0', - 'scsi0': 'storage:vm-103-disk-0,size=10G', - 'name': 'test-qemu-multi-nic', - 'cores': 4, - 'digest': '51b7599f869b9a3f564804a0aed290f3de803292', - 'smbios1': 'uuid=863b31c3-42ca-4a92-aed7-4111f342f70a', - 'agent': '1,type=virtio', - 'ide2': 'none,media=cdrom', - 'balloon': 0, - 'numa': 0, - 'scsi2': 'storage:vm-103-disk-2,size=10G', - 'serial0': 'socket', - 'vmgenid': 'ddfb79b2-b484-4d66-88e7-6e76f2d1be77', - 'onboot': 1, - 'tablet': 0 - } - - elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/agent/network-get-interfaces": - # _get_agent_network_interfaces - return {"result": [ - { - "hardware-address": "00:00:00:00:00:00", - "ip-addresses": [ - { - "prefix": 8, - "ip-address-type": "ipv4", - "ip-address": "127.0.0.1" - }, - { - "ip-address-type": "ipv6", - "ip-address": "::1", - "prefix": 128 - }], - "statistics": { - "rx-errs": 0, - "rx-bytes": 163244, - "rx-packets": 1623, - "rx-dropped": 0, - "tx-dropped": 0, - "tx-packets": 1623, - "tx-bytes": 163244, - "tx-errs": 0}, - "name": "lo"}, - { - "statistics": { - "rx-packets": 4025, - "rx-dropped": 12, - "rx-bytes": 324105, - "rx-errs": 0, - "tx-errs": 0, - "tx-bytes": 368860, - "tx-packets": 3479, - "tx-dropped": 0}, - "name": "eth0", - "ip-addresses": [ - { - "prefix": 24, - "ip-address-type": "ipv4", - "ip-address": "10.1.2.3" - }, - { - "prefix": 64, - "ip-address": "fd8c:4687:e88d:1be3:5b70:7b88:c79c:293", - "ip-address-type": "ipv6" - }], - "hardware-address": "ff:ff:ff:ff:ff:ff" - }, - { - "hardware-address": "ff:ff:ff:ff:ff:ff", - "ip-addresses": [ - { - "prefix": 16, - "ip-address": "10.10.2.3", - "ip-address-type": "ipv4" - }], - "name": "docker0", - "statistics": { - "rx-bytes": 0, - "rx-errs": 0, - "rx-dropped": 0, - "rx-packets": 0, - "tx-packets": 0, - "tx-dropped": 0, - "tx-errs": 0, - "tx-bytes": 0 - }}]} - elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/102/agent/network-get-interfaces": - # _get_agent_network_interfaces - return {"result": {'error': {'desc': 'this feature or command is not currently supported', 'class': 'Unsupported'}}} - elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/103/agent/network-get-interfaces": - # _get_agent_network_interfaces - return { - "result": [ - { - "statistics": { - "tx-errs": 0, - "rx-errs": 0, - "rx-dropped": 0, - "tx-bytes": 48132932372, - "tx-dropped": 0, - "rx-bytes": 48132932372, - "tx-packets": 178578980, - "rx-packets": 178578980 - }, - "hardware-address": "ff:ff:ff:ff:ff:ff", - "ip-addresses": [ - { - "ip-address-type": "ipv4", - "prefix": 8, - "ip-address": "127.0.0.1" - } - ], - "name": "lo" - }, - { - "name": "eth0", - "ip-addresses": [ - { - "ip-address-type": "ipv4", - "prefix": 24, - "ip-address": "172.16.0.143" - } - ], - "statistics": { - "rx-errs": 0, - "tx-errs": 0, - "rx-packets": 660028, - "tx-packets": 304599, - "tx-dropped": 0, - "rx-bytes": 1846743499, - "tx-bytes": 1287844926, - "rx-dropped": 0 - }, - "hardware-address": "ff:ff:ff:ff:ff:ff" - }, - { - "name": "eth1", - "hardware-address": "ff:ff:ff:ff:ff:ff", - "statistics": { - "rx-bytes": 235717091946, - "tx-dropped": 0, - "rx-dropped": 0, - "tx-bytes": 123411636251, - "rx-packets": 540431277, - "tx-packets": 468411864, - "rx-errs": 0, - "tx-errs": 0 - }, - "ip-addresses": [ - { - "ip-address": "10.0.0.133", - "prefix": 24, - "ip-address-type": "ipv4" - } - ] - }, - { - "name": "docker0", - "ip-addresses": [ - { - "ip-address": "172.17.0.1", - "prefix": 16, - "ip-address-type": "ipv4" - } - ], - "hardware-address": "ff:ff:ff:ff:ff:ff", - "statistics": { - "rx-errs": 0, - "tx-errs": 0, - "rx-packets": 0, - "tx-packets": 0, - "tx-dropped": 0, - "rx-bytes": 0, - "rx-dropped": 0, - "tx-bytes": 0 - } - }, - { - "hardware-address": "ff:ff:ff:ff:ff:ff", - "name": "datapath" - }, - { - "name": "weave", - "ip-addresses": [ - { - "ip-address": "10.42.0.1", - "ip-address-type": "ipv4", - "prefix": 16 - } - ], - "hardware-address": "ff:ff:ff:ff:ff:ff", - "statistics": { - "rx-bytes": 127289123306, - "tx-dropped": 0, - "rx-dropped": 0, - "tx-bytes": 43827573343, - "rx-packets": 132750542, - "tx-packets": 74218762, - "rx-errs": 0, - "tx-errs": 0 - } - }, - { - "name": "vethwe-datapath", - "hardware-address": "ff:ff:ff:ff:ff:ff" - }, - { - "name": "vethwe-bridge", - "hardware-address": "ff:ff:ff:ff:ff:ff" - }, - { - "hardware-address": "ff:ff:ff:ff:ff:ff", - "name": "vxlan-6784" - }, - { - "name": "vethwepl0dfe1fe", - "hardware-address": "ff:ff:ff:ff:ff:ff" - }, - { - "name": "vethweplf1e7715", - "hardware-address": "ff:ff:ff:ff:ff:ff" - }, - { - "hardware-address": "ff:ff:ff:ff:ff:ff", - "name": "vethwepl9d244a1" - }, - { - "hardware-address": "ff:ff:ff:ff:ff:ff", - "name": "vethwepl2ca477b" - }, - { - "name": "nomacorip", - } - ] - } - elif url == "https://localhost:8006/api2/json/nodes/testnode/lxc/100/status/current": - # _get_vm_status (lxc) - return { - "swap": 0, - "name": "test-lxc", - "diskread": 0, - "vmid": 100, - "diskwrite": 0, - "pid": 9000, - "mem": 89980928, - "netin": 1950776396424, - "disk": 4998168576, - "cpu": 0.00163430613110039, - "type": "lxc", - "uptime": 6793736, - "maxmem": 1073741824, - "status": "running", - "cpus": "1", - "ha": { - "group": 'null', - "state": "started", - "managed": 1 - }, - "maxdisk": 3348329267200, - "netout": 1947793356037, - "maxswap": 1073741824 - } - elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/status/current": - # _get_vm_status (qemu) - return { - "status": "stopped", - "uptime": 0, - "maxmem": 5364514816, - "maxdisk": 34359738368, - "netout": 0, - "cpus": 2, - "ha": { - "managed": 0 - }, - "diskread": 0, - "vmid": 101, - "diskwrite": 0, - "name": "test-qemu", - "cpu": 0, - "disk": 0, - "netin": 0, - "mem": 0, - "qmpstatus": "stopped" - } - elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/102/status/current": - # _get_vm_status (qemu) - return { - "status": "stopped", - "uptime": 0, - "maxmem": 5364514816, - "maxdisk": 34359738368, - "netout": 0, - "cpus": 2, - "ha": { - "managed": 0 - }, - "diskread": 0, - "vmid": 102, - "diskwrite": 0, - "name": "test-qemu-windows", - "cpu": 0, - "disk": 0, - "netin": 0, - "mem": 0, - "qmpstatus": "prelaunch" - } - elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/103/status/current": - # _get_vm_status (qemu) - return { - "status": "stopped", - "uptime": 0, - "maxmem": 5364514816, - "maxdisk": 34359738368, - "netout": 0, - "cpus": 2, - "ha": { - "managed": 0 - }, - "diskread": 0, - "vmid": 103, - "diskwrite": 0, - "name": "test-qemu-multi-nic", - "cpu": 0, - "disk": 0, - "netin": 0, - "mem": 0, - "qmpstatus": "paused" - } - - -def get_vm_snapshots(node, properties, vmtype, vmid, name): - return [ - {"description": "", - "name": "clean", - "snaptime": 1000, - "vmstate": 0 - }, - {"name": "current", - "digest": "1234689abcdf", - "running": 0, - "description": "You are here!", - "parent": "clean" - }] - - -def get_option(opts): - def fn(option): - default = opts.get('default', False) - return opts.get(option, default) - return fn - - -def test_populate(inventory, mocker): - # module settings - inventory.proxmox_user = 'root@pam' - inventory.proxmox_password = 'password' - inventory.proxmox_url = 'https://localhost:8006' - inventory.group_prefix = 'proxmox_' - inventory.facts_prefix = 'proxmox_' - inventory.strict = False - inventory.exclude_nodes = False - - opts = { - 'group_prefix': 'proxmox_', - 'facts_prefix': 'proxmox_', - 'want_facts': True, - 'want_proxmox_nodes_ansible_host': True, - 'qemu_extended_statuses': True, - 'exclude_nodes': False - } - - # bypass authentication and API fetch calls - inventory._get_auth = mocker.MagicMock(side_effect=get_auth) - inventory._get_json = mocker.MagicMock(side_effect=get_json) - inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots) - inventory.get_option = mocker.MagicMock(side_effect=get_option(opts)) - inventory._can_add_host = mocker.MagicMock(return_value=True) - inventory._populate() - - # get different hosts - host_qemu = inventory.inventory.get_host('test-qemu') - host_qemu_windows = inventory.inventory.get_host('test-qemu-windows') - host_qemu_multi_nic = inventory.inventory.get_host('test-qemu-multi-nic') - host_qemu_template = inventory.inventory.get_host('test-qemu-template') - host_lxc = inventory.inventory.get_host('test-lxc') - - # check if qemu-test is in the proxmox_pool_test group - assert 'proxmox_pool_test' in inventory.inventory.groups - group_qemu = inventory.inventory.groups['proxmox_pool_test'] - assert group_qemu.hosts == [host_qemu] - - # check if qemu-test has eth0 interface in agent_interfaces fact - assert 'eth0' in [d['name'] for d in host_qemu.get_vars()['proxmox_agent_interfaces']] - - # check if qemu-multi-nic has multiple network interfaces - for iface_name in ['eth0', 'eth1', 'weave']: - assert iface_name in [d['name'] for d in host_qemu_multi_nic.get_vars()['proxmox_agent_interfaces']] - - # check if interface with no mac-address or ip-address defaults correctly - assert [iface for iface in host_qemu_multi_nic.get_vars()['proxmox_agent_interfaces'] - if iface['name'] == 'nomacorip' - and iface['mac-address'] == '' - and iface['ip-addresses'] == [] - ] - - # check to make sure qemu-windows doesn't have proxmox_agent_interfaces - assert "proxmox_agent_interfaces" not in host_qemu_windows.get_vars() - - # check if lxc-test has been discovered correctly - group_lxc = inventory.inventory.groups['proxmox_all_lxc'] - assert group_lxc.hosts == [host_lxc] - - # check if qemu template is not present - assert host_qemu_template is None - - # check that offline node is in inventory - assert inventory.inventory.get_host('testnode2') - - # make sure that ['prelaunch', 'paused'] are in the group list - for group in ['paused', 'prelaunch']: - assert ('%sall_%s' % (inventory.group_prefix, group)) in inventory.inventory.groups - - # check if qemu-windows is in the prelaunch group - group_prelaunch = inventory.inventory.groups['proxmox_all_prelaunch'] - assert group_prelaunch.hosts == [host_qemu_windows] - - # check if qemu-multi-nic is in the paused group - group_paused = inventory.inventory.groups['proxmox_all_paused'] - assert group_paused.hosts == [host_qemu_multi_nic] - - -def test_populate_missing_qemu_extended_groups(inventory, mocker): - # module settings - inventory.proxmox_user = 'root@pam' - inventory.proxmox_password = 'password' - inventory.proxmox_url = 'https://localhost:8006' - inventory.group_prefix = 'proxmox_' - inventory.facts_prefix = 'proxmox_' - inventory.strict = False - inventory.exclude_nodes = False - - opts = { - 'group_prefix': 'proxmox_', - 'facts_prefix': 'proxmox_', - 'want_facts': True, - 'want_proxmox_nodes_ansible_host': True, - 'qemu_extended_statuses': False, - 'exclude_nodes': False - } - - # bypass authentication and API fetch calls - inventory._get_auth = mocker.MagicMock(side_effect=get_auth) - inventory._get_json = mocker.MagicMock(side_effect=get_json) - inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots) - inventory.get_option = mocker.MagicMock(side_effect=get_option(opts)) - inventory._can_add_host = mocker.MagicMock(return_value=True) - inventory._populate() - - # make sure that ['prelaunch', 'paused'] are not in the group list - for group in ['paused', 'prelaunch']: - assert ('%sall_%s' % (inventory.group_prefix, group)) not in inventory.inventory.groups - - -def test_populate_exclude_nodes(inventory, mocker): - # module settings - inventory.proxmox_user = 'root@pam' - inventory.proxmox_password = 'password' - inventory.proxmox_url = 'https://localhost:8006' - inventory.group_prefix = 'proxmox_' - inventory.facts_prefix = 'proxmox_' - inventory.strict = False - inventory.exclude_nodes = True - - opts = { - 'group_prefix': 'proxmox_', - 'facts_prefix': 'proxmox_', - 'want_facts': True, - 'want_proxmox_nodes_ansible_host': True, - 'qemu_extended_statuses': False, - 'exclude_nodes': True - } - - # bypass authentication and API fetch calls - inventory._get_auth = mocker.MagicMock(side_effect=get_auth) - inventory._get_json = mocker.MagicMock(side_effect=get_json) - inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots) - inventory.get_option = mocker.MagicMock(side_effect=get_option(opts)) - inventory._can_add_host = mocker.MagicMock(return_value=True) - inventory._populate() - - # make sure that nodes are not in the inventory - for node in ['testnode', 'testnode2']: - assert node not in inventory.inventory.hosts - # make sure that nodes group is absent - assert ('%s_nodes' % (inventory.group_prefix)) not in inventory.inventory.groups - # make sure that nodes are not in the "ungrouped" group - for node in ['testnode', 'testnode2']: - assert node not in inventory.inventory.get_groups_dict()["ungrouped"] diff --git a/tests/unit/plugins/inventory/test_stackpath_compute.py b/tests/unit/plugins/inventory/test_stackpath_compute.py deleted file mode 100644 index 781db50b73..0000000000 --- a/tests/unit/plugins/inventory/test_stackpath_compute.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright (c) 2020 Shay Rybak -# Copyright (c) 2020 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import pytest - -from ansible.errors import AnsibleError -from ansible.inventory.data import InventoryData -from ansible_collections.community.general.plugins.inventory.stackpath_compute import InventoryModule - - -@pytest.fixture(scope="module") -def inventory(): - r = InventoryModule() - r.inventory = InventoryData() - return r - - -def test_get_stack_slugs(inventory): - stacks = [ - { - 'status': 'ACTIVE', - 'name': 'test1', - 'id': 'XXXX', - 'updatedAt': '2020-07-08T01:00:00.000000Z', - 'slug': 'test1', - 'createdAt': '2020-07-08T00:00:00.000000Z', - 'accountId': 'XXXX', - }, { - 'status': 'ACTIVE', - 'name': 'test2', - 'id': 'XXXX', - 'updatedAt': '2019-10-22T18:00:00.000000Z', - 'slug': 'test2', - 'createdAt': '2019-10-22T18:00:00.000000Z', - 'accountId': 'XXXX', - }, { - 'status': 'DISABLED', - 'name': 'test3', - 'id': 'XXXX', - 'updatedAt': '2020-01-16T20:00:00.000000Z', - 'slug': 'test3', - 'createdAt': '2019-10-15T13:00:00.000000Z', - 'accountId': 'XXXX', - }, { - 'status': 'ACTIVE', - 'name': 'test4', - 'id': 'XXXX', - 'updatedAt': '2019-11-20T22:00:00.000000Z', - 'slug': 'test4', - 'createdAt': '2019-11-20T22:00:00.000000Z', - 'accountId': 'XXXX', - } - ] - inventory._get_stack_slugs(stacks) - assert len(inventory.stack_slugs) == 4 - assert inventory.stack_slugs == [ - "test1", - "test2", - "test3", - "test4" - ] - - -def test_verify_file(tmp_path, inventory): - file = tmp_path / "foobar.stackpath_compute.yml" - file.touch() - assert inventory.verify_file(str(file)) is True - - -def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.stackpath_compute.yml') is False - - -def test_validate_config(inventory): - config = { - "client_secret": "short_client_secret", - "use_internal_ip": False, - "stack_slugs": ["test1"], - "client_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", - "plugin": "community.general.stackpath_compute", - } - with pytest.raises(AnsibleError) as error_message: - inventory._validate_config(config) - assert "client_secret must be 64 characters long" in error_message - - config = { - "client_secret": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", - "use_internal_ip": True, - "stack_slugs": ["test1"], - "client_id": "short_client_id", - "plugin": "community.general.stackpath_compute", - } - with pytest.raises(AnsibleError) as error_message: - inventory._validate_config(config) - assert "client_id must be 32 characters long" in error_message - - config = { - "use_internal_ip": True, - "stack_slugs": ["test1"], - "client_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", - "plugin": "community.general.stackpath_compute", - } - with pytest.raises(AnsibleError) as error_message: - inventory._validate_config(config) - assert "config missing client_secret, a required parameter" in error_message - - config = { - "client_secret": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", - "use_internal_ip": False, - "plugin": "community.general.stackpath_compute", - } - with pytest.raises(AnsibleError) as error_message: - inventory._validate_config(config) - assert "config missing client_id, a required parameter" in error_message - - -def test_populate(inventory): - instances = [ - { - "name": "instance1", - "countryCode": "SE", - "workloadSlug": "wokrload1", - "continent": "Europe", - "workloadId": "id1", - "cityCode": "ARN", - "externalIpAddress": "20.0.0.1", - "target": "target1", - "stackSlug": "stack1", - "ipAddress": "10.0.0.1", - }, - { - "name": "instance2", - "countryCode": "US", - "workloadSlug": "wokrload2", - "continent": "America", - "workloadId": "id2", - "cityCode": "JFK", - "externalIpAddress": "20.0.0.2", - "target": "target2", - "stackSlug": "stack1", - "ipAddress": "10.0.0.2", - }, - { - "name": "instance3", - "countryCode": "SE", - "workloadSlug": "workload3", - "continent": "Europe", - "workloadId": "id3", - "cityCode": "ARN", - "externalIpAddress": "20.0.0.3", - "target": "target1", - "stackSlug": "stack2", - "ipAddress": "10.0.0.3", - }, - { - "name": "instance4", - "countryCode": "US", - "workloadSlug": "workload3", - "continent": "America", - "workloadId": "id4", - "cityCode": "JFK", - "externalIpAddress": "20.0.0.4", - "target": "target2", - "stackSlug": "stack2", - "ipAddress": "10.0.0.4", - }, - ] - inventory.hostname_key = "externalIpAddress" - inventory._populate(instances) - # get different hosts - host1 = inventory.inventory.get_host('20.0.0.1') - host2 = inventory.inventory.get_host('20.0.0.2') - host3 = inventory.inventory.get_host('20.0.0.3') - host4 = inventory.inventory.get_host('20.0.0.4') - - # get different groups - assert 'citycode_arn' in inventory.inventory.groups - group_citycode_arn = inventory.inventory.groups['citycode_arn'] - assert 'countrycode_se' in inventory.inventory.groups - group_countrycode_se = inventory.inventory.groups['countrycode_se'] - assert 'continent_america' in inventory.inventory.groups - group_continent_america = inventory.inventory.groups['continent_america'] - assert 'name_instance1' in inventory.inventory.groups - group_name_instance1 = inventory.inventory.groups['name_instance1'] - assert 'stackslug_stack1' in inventory.inventory.groups - group_stackslug_stack1 = inventory.inventory.groups['stackslug_stack1'] - assert 'target_target1' in inventory.inventory.groups - group_target_target1 = inventory.inventory.groups['target_target1'] - assert 'workloadslug_workload3' in inventory.inventory.groups - group_workloadslug_workload3 = inventory.inventory.groups['workloadslug_workload3'] - assert 'workloadid_id1' in inventory.inventory.groups - group_workloadid_id1 = inventory.inventory.groups['workloadid_id1'] - - assert group_citycode_arn.hosts == [host1, host3] - assert group_countrycode_se.hosts == [host1, host3] - assert group_continent_america.hosts == [host2, host4] - assert group_name_instance1.hosts == [host1] - assert group_stackslug_stack1.hosts == [host1, host2] - assert group_target_target1.hosts == [host1, host3] - assert group_workloadslug_workload3.hosts == [host3, host4] - assert group_workloadid_id1.hosts == [host1] diff --git a/tests/unit/plugins/inventory/test_xen_orchestra.py b/tests/unit/plugins/inventory/test_xen_orchestra.py index d626fb988b..f24c072705 100644 --- a/tests/unit/plugins/inventory/test_xen_orchestra.py +++ b/tests/unit/plugins/inventory/test_xen_orchestra.py @@ -158,6 +158,8 @@ def test_verify_file_bad_config(inventory): def test_populate(inventory, mocker): + inventory.host_entry_name_type = 'uuid' + inventory.vm_entry_name_type = 'uuid' inventory.get_option = mocker.MagicMock(side_effect=get_option) inventory._populate(objects) actual = sorted(inventory.inventory.hosts.keys()) diff --git a/tests/unit/plugins/lookup/onepassword_common.py b/tests/unit/plugins/lookup/onepassword_common.py index bf0cc35c12..0759e0abff 100644 --- a/tests/unit/plugins/lookup/onepassword_common.py +++ b/tests/unit/plugins/lookup/onepassword_common.py @@ -293,3 +293,39 @@ MOCK_ENTRIES = { }, ], } + +SSH_KEY_MOCK_ENTRIES = [ + # loads private key in PKCS#8 format by default + { + "vault_name": "Personal", + "queries": ["ssh key"], + "expected": [ + "-----BEGIN PRIVATE KEY-----\n..........=\n-----END PRIVATE KEY-----\n" + ], + "output": load_file("ssh_key_output.json"), + }, + # loads private key in PKCS#8 format becasue ssh_format=false + { + "vault_name": "Personal", + "queries": ["ssh key"], + "kwargs": { + "ssh_format": False, + }, + "expected": [ + "-----BEGIN PRIVATE KEY-----\n..........=\n-----END PRIVATE KEY-----\n" + ], + "output": load_file("ssh_key_output.json"), + }, + # loads private key in ssh format + { + "vault_name": "Personal", + "queries": ["ssh key"], + "kwargs": { + "ssh_format": True, + }, + "expected": [ + "-----BEGIN OPENSSH PRIVATE KEY-----\r\n.....\r\n-----END OPENSSH PRIVATE KEY-----\r\n" + ], + "output": load_file("ssh_key_output.json"), + }, +] diff --git a/tests/unit/plugins/lookup/onepassword_fixtures/ssh_key_output.json b/tests/unit/plugins/lookup/onepassword_fixtures/ssh_key_output.json new file mode 100644 index 0000000000..f14066b941 --- /dev/null +++ b/tests/unit/plugins/lookup/onepassword_fixtures/ssh_key_output.json @@ -0,0 +1,57 @@ +{ + "id": "wdtryfeh3jlx2dlanqgg4dqxmy", + "title": "ssh key", + "version": 1, + "vault": { + "id": "5auhrjy66hc7ndhe2wvym6gadv", + "name": "Personal" + }, + "category": "SSH_KEY", + "last_edited_by": "LSGPJERUYBH7BFPHMZ2KKGL6AU", + "created_at": "2025-01-10T16:57:16Z", + "updated_at": "2025-01-10T16:57:16Z", + "additional_information": "SHA256:frHmQAgblahD5HHgNj2O714", + "fields": [ + { + "id": "public_key", + "type": "STRING", + "label": "public key", + "value": "ssh-ed255.....", + "reference": "op://Personal/ssh key/public key" + }, + { + "id": "fingerprint", + "type": "STRING", + "label": "fingerprint", + "value": "SHA256:frHmQAgy7zBKeFDxHMW0QltZ/5O4N8gD5HHgNj2O614", + "reference": "op://Personal/ssh key/fingerprint" + }, + { + "id": "private_key", + "type": "SSHKEY", + "label": "private key", + "value": "-----BEGIN PRIVATE KEY-----\n..........=\n-----END PRIVATE KEY-----\n", + "reference": "op://Personal/ssh key/private key", + "ssh_formats": { + "openssh": { + "reference": "op://Personal/ssh key/private key?ssh-format=openssh", + "value": "-----BEGIN OPENSSH PRIVATE KEY-----\r\n.....\r\n-----END OPENSSH PRIVATE KEY-----\r\n" + } + } + }, + { + "id": "key_type", + "type": "STRING", + "label": "key type", + "value": "ed25519", + "reference": "op://Personal/ssh key/key type" + }, + { + "id": "notesPlain", + "type": "STRING", + "purpose": "NOTES", + "label": "notesPlain", + "reference": "op://Personal/ssh key/notesPlain" + } + ] + } \ No newline at end of file diff --git a/tests/sanity/ignore-2.14.txt.license b/tests/unit/plugins/lookup/onepassword_fixtures/ssh_key_output.json.license similarity index 77% rename from tests/sanity/ignore-2.14.txt.license rename to tests/unit/plugins/lookup/onepassword_fixtures/ssh_key_output.json.license index edff8c7685..59021c33ed 100644 --- a/tests/sanity/ignore-2.14.txt.license +++ b/tests/unit/plugins/lookup/onepassword_fixtures/ssh_key_output.json.license @@ -1,3 +1,3 @@ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) SPDX-License-Identifier: GPL-3.0-or-later -SPDX-FileCopyrightText: Ansible Project +SPDX-FileCopyrightText: 2025, Ansible Project diff --git a/tests/unit/plugins/lookup/test_bitwarden.py b/tests/unit/plugins/lookup/test_bitwarden.py index 04cad8d6c8..04a70837f6 100644 --- a/tests/unit/plugins/lookup/test_bitwarden.py +++ b/tests/unit/plugins/lookup/test_bitwarden.py @@ -7,13 +7,13 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible.errors import AnsibleError from ansible.module_utils import six from ansible.plugins.loader import lookup_loader -from ansible_collections.community.general.plugins.lookup.bitwarden import Bitwarden +from ansible_collections.community.general.plugins.lookup.bitwarden import Bitwarden, BitwardenException from ansible.parsing.ajson import AnsibleJSONEncoder MOCK_COLLECTION_ID = "3b12a9da-7c49-40b8-ad33-aede017a7ead" @@ -131,7 +131,21 @@ MOCK_RECORDS = [ "reprompt": 0, "revisionDate": "2024-14-15T11:30:00.000Z", "type": 1 - } + }, + { + "object": "collection", + "id": MOCK_COLLECTION_ID, + "organizationId": MOCK_ORGANIZATION_ID, + "name": "MOCK_COLLECTION", + "externalId": None + }, + { + "object": "collection", + "id": "3b12a9da-7c49-40b8-ad33-aede017a8ead", + "organizationId": "3b12a9da-7c49-40b8-ad33-aede017a9ead", + "name": "some/other/collection", + "externalId": None + }, ] @@ -164,6 +178,9 @@ class MockBitwarden(Bitwarden): items = [] for item in MOCK_RECORDS: + if item.get('object') != 'item': + continue + if search_value and not re.search(search_value, item.get('name')): continue if collection_to_filter and collection_to_filter not in item.get('collectionIds', []): @@ -172,6 +189,35 @@ class MockBitwarden(Bitwarden): continue items.append(item) return AnsibleJSONEncoder().encode(items), '' + elif args[1] == 'collections': + try: + search_value = args[args.index('--search') + 1] + except ValueError: + search_value = None + + try: + collection_to_filter = args[args.index('--collectionid') + 1] + except ValueError: + collection_to_filter = None + + try: + organization_to_filter = args[args.index('--organizationid') + 1] + except ValueError: + organization_to_filter = None + + collections = [] + for item in MOCK_RECORDS: + if item.get('object') != 'collection': + continue + + if search_value and not re.search(search_value, item.get('name')): + continue + if collection_to_filter and collection_to_filter not in item.get('collectionIds', []): + continue + if organization_to_filter and item.get('organizationId') != organization_to_filter: + continue + collections.append(item) + return AnsibleJSONEncoder().encode(collections), '' return '[]', '' @@ -261,3 +307,26 @@ class TestLookupModule(unittest.TestCase): def test_bitwarden_plugin_full_collection_organization(self): self.assertEqual([MOCK_RECORDS[0], MOCK_RECORDS[2]], self.lookup.run(None, collection_id=MOCK_COLLECTION_ID, organization_id=MOCK_ORGANIZATION_ID)[0]) + + @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + def test_bitwarden_plugin_collection_name_filter(self): + # all passwords from MOCK_COLLECTION + self.assertEqual([MOCK_RECORDS[0], MOCK_RECORDS[2]], self.lookup.run(None, + collection_name="MOCK_COLLECTION")[0]) + # Existing collection, no results + self.assertEqual([], self.lookup.run(None, collection_name="some/other/collection")[0]) + + # Non-Existent collection + with self.assertRaises(BitwardenException): + self.lookup.run(None, collection_name="nonexistent") + + @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + def test_bitwarden_plugin_result_count_check(self): + self.lookup.run(None, collection_id=MOCK_COLLECTION_ID, organization_id=MOCK_ORGANIZATION_ID, result_count=2) + with self.assertRaises(BitwardenException): + self.lookup.run(None, collection_id=MOCK_COLLECTION_ID, organization_id=MOCK_ORGANIZATION_ID, + result_count=1) + + self.lookup.run(None, organization_id=MOCK_ORGANIZATION_ID, result_count=3) + with self.assertRaises(BitwardenException): + self.lookup.run(None, organization_id=MOCK_ORGANIZATION_ID, result_count=0) diff --git a/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py b/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py index 5d2abeffa8..c2f8b0a5e5 100644 --- a/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py +++ b/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py @@ -8,8 +8,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible.errors import AnsibleLookupError from ansible.plugins.loader import lookup_loader diff --git a/tests/unit/plugins/lookup/test_dependent.py b/tests/unit/plugins/lookup/test_dependent.py index 74d7c41239..fef53dec00 100644 --- a/tests/unit/plugins/lookup/test_dependent.py +++ b/tests/unit/plugins/lookup/test_dependent.py @@ -9,18 +9,17 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type +from ansible.template import Templar + from ansible_collections.community.internal_test_tools.tests.unit.compat.unittest import TestCase -from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import ( - MagicMock, -) +from ansible_collections.community.internal_test_tools.tests.unit.utils.trust import make_trusted from ansible.plugins.loader import lookup_loader class TestLookupModule(TestCase): def setUp(self): - templar = MagicMock() - templar._loader = None + templar = Templar(loader=None) self.lookup = lookup_loader.get("community.general.dependent", templar=templar) def test_empty(self): @@ -30,9 +29,9 @@ class TestLookupModule(TestCase): self.assertListEqual( self.lookup.run( [ - {'a': '[1, 2]'}, - {'b': '[item.a + 3, item.a + 6]'}, - {'c': '[item.a + item.b * 10]'}, + {'a': make_trusted('[1, 2]')}, + {'b': make_trusted('[item.a + 3, item.a + 6]')}, + {'c': make_trusted('[item.a + item.b * 10]')}, ], {}, ), diff --git a/tests/unit/plugins/lookup/test_dsv.py b/tests/unit/plugins/lookup/test_dsv.py index a9a2d30ee6..fb23a74548 100644 --- a/tests/unit/plugins/lookup/test_dsv.py +++ b/tests/unit/plugins/lookup/test_dsv.py @@ -8,8 +8,8 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.unittest import TestCase -from ansible_collections.community.general.tests.unit.compat.mock import ( +from ansible_collections.community.internal_test_tools.tests.unit.compat.unittest import TestCase +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import ( patch, MagicMock, ) diff --git a/tests/unit/plugins/lookup/test_etcd3.py b/tests/unit/plugins/lookup/test_etcd3.py index e9ac777ebe..62061620e9 100644 --- a/tests/unit/plugins/lookup/test_etcd3.py +++ b/tests/unit/plugins/lookup/test_etcd3.py @@ -8,8 +8,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, MagicMock from ansible_collections.community.general.plugins.lookup import etcd3 from ansible.plugins.loader import lookup_loader diff --git a/tests/unit/plugins/lookup/test_github_app_access_token.py b/tests/unit/plugins/lookup/test_github_app_access_token.py index 7971335a47..7dd907c9ee 100644 --- a/tests/unit/plugins/lookup/test_github_app_access_token.py +++ b/tests/unit/plugins/lookup/test_github_app_access_token.py @@ -7,8 +7,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import ( +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import ( patch, MagicMock, mock_open diff --git a/tests/unit/plugins/lookup/test_lastpass.py b/tests/unit/plugins/lookup/test_lastpass.py index 5f65c9f633..fd38928c94 100644 --- a/tests/unit/plugins/lookup/test_lastpass.py +++ b/tests/unit/plugins/lookup/test_lastpass.py @@ -8,8 +8,8 @@ __metaclass__ = type from argparse import ArgumentParser -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible.errors import AnsibleError from ansible.module_utils import six diff --git a/tests/unit/plugins/lookup/test_manifold.py b/tests/unit/plugins/lookup/test_manifold.py deleted file mode 100644 index 4fa3562763..0000000000 --- a/tests/unit/plugins/lookup/test_manifold.py +++ /dev/null @@ -1,537 +0,0 @@ -# Copyright (c) 2018, Arigato Machine Inc. -# Copyright (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch, call -from ansible.errors import AnsibleError -from ansible.module_utils.urls import ConnectionError, SSLValidationError -from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError -from ansible.module_utils import six -from ansible.plugins.loader import lookup_loader -from ansible_collections.community.general.plugins.lookup.manifold import ManifoldApiClient, ApiError -import json -import os - - -API_FIXTURES = { - 'https://api.marketplace.manifold.co/v1/resources': - [ - { - "body": { - "label": "resource-1", - "name": "Resource 1" - }, - "id": "rid-1" - }, - { - "body": { - "label": "resource-2", - "name": "Resource 2" - }, - "id": "rid-2" - } - ], - 'https://api.marketplace.manifold.co/v1/resources?label=resource-1': - [ - { - "body": { - "label": "resource-1", - "name": "Resource 1" - }, - "id": "rid-1" - } - ], - 'https://api.marketplace.manifold.co/v1/resources?label=resource-2': - [ - { - "body": { - "label": "resource-2", - "name": "Resource 2" - }, - "id": "rid-2" - } - ], - 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1': - [ - { - "body": { - "label": "resource-1", - "name": "Resource 1" - }, - "id": "rid-1" - } - ], - 'https://api.marketplace.manifold.co/v1/resources?project_id=pid-1': - [ - { - "body": { - "label": "resource-2", - "name": "Resource 2" - }, - "id": "rid-2" - } - ], - 'https://api.marketplace.manifold.co/v1/resources?project_id=pid-2': - [ - { - "body": { - "label": "resource-1", - "name": "Resource 1" - }, - "id": "rid-1" - }, - { - "body": { - "label": "resource-3", - "name": "Resource 3" - }, - "id": "rid-3" - } - ], - 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1': - [ - { - "body": { - "label": "resource-1", - "name": "Resource 1" - }, - "id": "rid-1" - } - ], - 'https://api.marketplace.manifold.co/v1/projects': - [ - { - "body": { - "label": "project-1", - "name": "Project 1", - }, - "id": "pid-1", - }, - { - "body": { - "label": "project-2", - "name": "Project 2", - }, - "id": "pid-2", - } - ], - 'https://api.marketplace.manifold.co/v1/projects?label=project-2': - [ - { - "body": { - "label": "project-2", - "name": "Project 2", - }, - "id": "pid-2", - } - ], - 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1': - [ - { - "body": { - "resource_id": "rid-1", - "values": { - "RESOURCE_TOKEN_1": "token-1", - "RESOURCE_TOKEN_2": "token-2" - } - }, - "id": "cid-1", - } - ], - 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-2': - [ - { - "body": { - "resource_id": "rid-2", - "values": { - "RESOURCE_TOKEN_3": "token-3", - "RESOURCE_TOKEN_4": "token-4" - } - }, - "id": "cid-2", - } - ], - 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-3': - [ - { - "body": { - "resource_id": "rid-3", - "values": { - "RESOURCE_TOKEN_1": "token-5", - "RESOURCE_TOKEN_2": "token-6" - } - }, - "id": "cid-3", - } - ], - 'https://api.identity.manifold.co/v1/teams': - [ - { - "id": "tid-1", - "body": { - "name": "Team 1", - "label": "team-1" - } - }, - { - "id": "tid-2", - "body": { - "name": "Team 2", - "label": "team-2" - } - } - ] -} - - -def mock_fixture(open_url_mock, fixture=None, data=None, headers=None): - if not headers: - headers = {} - if fixture: - data = json.dumps(API_FIXTURES[fixture]) - if 'content-type' not in headers: - headers['content-type'] = 'application/json' - - open_url_mock.return_value.read.return_value = data - open_url_mock.return_value.headers = headers - - -class TestManifoldApiClient(unittest.TestCase): - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_request_sends_default_headers(self, open_url_mock): - mock_fixture(open_url_mock, data='hello') - client = ManifoldApiClient('token-123') - client.request('test', 'endpoint') - open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint', - headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'}, - http_agent='python-manifold-ansible-1.0.0') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_request_decodes_json(self, open_url_mock): - mock_fixture(open_url_mock, fixture='https://api.marketplace.manifold.co/v1/resources') - client = ManifoldApiClient('token-123') - self.assertIsInstance(client.request('marketplace', 'resources'), list) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_request_streams_text(self, open_url_mock): - mock_fixture(open_url_mock, data='hello', headers={'content-type': "text/plain"}) - client = ManifoldApiClient('token-123') - self.assertEqual('hello', client.request('test', 'endpoint')) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_request_processes_parameterized_headers(self, open_url_mock): - mock_fixture(open_url_mock, data='hello') - client = ManifoldApiClient('token-123') - client.request('test', 'endpoint', headers={'X-HEADER': 'MANIFOLD'}) - open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint', - headers={'Accept': '*/*', 'Authorization': 'Bearer token-123', - 'X-HEADER': 'MANIFOLD'}, - http_agent='python-manifold-ansible-1.0.0') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_request_passes_arbitrary_parameters(self, open_url_mock): - mock_fixture(open_url_mock, data='hello') - client = ManifoldApiClient('token-123') - client.request('test', 'endpoint', use_proxy=False, timeout=5) - open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint', - headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'}, - http_agent='python-manifold-ansible-1.0.0', - use_proxy=False, timeout=5) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_request_raises_on_incorrect_json(self, open_url_mock): - mock_fixture(open_url_mock, data='noJson', headers={'content-type': "application/json"}) - client = ManifoldApiClient('token-123') - with self.assertRaises(ApiError) as context: - client.request('test', 'endpoint') - self.assertEqual('JSON response can\'t be parsed while requesting https://api.test.manifold.co/v1/endpoint:\n' - 'noJson', - str(context.exception)) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_request_raises_on_status_500(self, open_url_mock): - open_url_mock.side_effect = HTTPError('https://api.test.manifold.co/v1/endpoint', - 500, 'Server error', {}, six.StringIO('ERROR')) - client = ManifoldApiClient('token-123') - with self.assertRaises(ApiError) as context: - client.request('test', 'endpoint') - self.assertEqual('Server returned: HTTP Error 500: Server error while requesting ' - 'https://api.test.manifold.co/v1/endpoint:\nERROR', - str(context.exception)) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_request_raises_on_bad_url(self, open_url_mock): - open_url_mock.side_effect = URLError('URL is invalid') - client = ManifoldApiClient('token-123') - with self.assertRaises(ApiError) as context: - client.request('test', 'endpoint') - self.assertEqual('Failed lookup url for https://api.test.manifold.co/v1/endpoint : ', - str(context.exception)) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_request_raises_on_ssl_error(self, open_url_mock): - open_url_mock.side_effect = SSLValidationError('SSL Error') - client = ManifoldApiClient('token-123') - with self.assertRaises(ApiError) as context: - client.request('test', 'endpoint') - self.assertEqual('Error validating the server\'s certificate for https://api.test.manifold.co/v1/endpoint: ' - 'SSL Error', - str(context.exception)) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_request_raises_on_connection_error(self, open_url_mock): - open_url_mock.side_effect = ConnectionError('Unknown connection error') - client = ManifoldApiClient('token-123') - with self.assertRaises(ApiError) as context: - client.request('test', 'endpoint') - self.assertEqual('Error connecting to https://api.test.manifold.co/v1/endpoint: Unknown connection error', - str(context.exception)) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_get_resources_get_all(self, open_url_mock): - url = 'https://api.marketplace.manifold.co/v1/resources' - mock_fixture(open_url_mock, fixture=url) - client = ManifoldApiClient('token-123') - self.assertListEqual(API_FIXTURES[url], client.get_resources()) - open_url_mock.assert_called_with(url, - headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'}, - http_agent='python-manifold-ansible-1.0.0') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_get_resources_filter_label(self, open_url_mock): - url = 'https://api.marketplace.manifold.co/v1/resources?label=resource-1' - mock_fixture(open_url_mock, fixture=url) - client = ManifoldApiClient('token-123') - self.assertListEqual(API_FIXTURES[url], client.get_resources(label='resource-1')) - open_url_mock.assert_called_with(url, - headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'}, - http_agent='python-manifold-ansible-1.0.0') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_get_resources_filter_team_and_project(self, open_url_mock): - url = 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1' - mock_fixture(open_url_mock, fixture=url) - client = ManifoldApiClient('token-123') - self.assertListEqual(API_FIXTURES[url], client.get_resources(team_id='tid-1', project_id='pid-1')) - args, kwargs = open_url_mock.call_args - url_called = args[0] - # Dict order is not guaranteed, so an url may have querystring parameters order randomized - self.assertIn('team_id=tid-1', url_called) - self.assertIn('project_id=pid-1', url_called) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_get_teams_get_all(self, open_url_mock): - url = 'https://api.identity.manifold.co/v1/teams' - mock_fixture(open_url_mock, fixture=url) - client = ManifoldApiClient('token-123') - self.assertListEqual(API_FIXTURES[url], client.get_teams()) - open_url_mock.assert_called_with(url, - headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'}, - http_agent='python-manifold-ansible-1.0.0') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_get_teams_filter_label(self, open_url_mock): - url = 'https://api.identity.manifold.co/v1/teams' - mock_fixture(open_url_mock, fixture=url) - client = ManifoldApiClient('token-123') - self.assertListEqual(API_FIXTURES[url][1:2], client.get_teams(label='team-2')) - open_url_mock.assert_called_with(url, - headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'}, - http_agent='python-manifold-ansible-1.0.0') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_get_projects_get_all(self, open_url_mock): - url = 'https://api.marketplace.manifold.co/v1/projects' - mock_fixture(open_url_mock, fixture=url) - client = ManifoldApiClient('token-123') - self.assertListEqual(API_FIXTURES[url], client.get_projects()) - open_url_mock.assert_called_with(url, - headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'}, - http_agent='python-manifold-ansible-1.0.0') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_get_projects_filter_label(self, open_url_mock): - url = 'https://api.marketplace.manifold.co/v1/projects?label=project-2' - mock_fixture(open_url_mock, fixture=url) - client = ManifoldApiClient('token-123') - self.assertListEqual(API_FIXTURES[url], client.get_projects(label='project-2')) - open_url_mock.assert_called_with(url, - headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'}, - http_agent='python-manifold-ansible-1.0.0') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url') - def test_get_credentials(self, open_url_mock): - url = 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1' - mock_fixture(open_url_mock, fixture=url) - client = ManifoldApiClient('token-123') - self.assertListEqual(API_FIXTURES[url], client.get_credentials(resource_id='rid-1')) - open_url_mock.assert_called_with(url, - headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'}, - http_agent='python-manifold-ansible-1.0.0') - - -class TestLookupModule(unittest.TestCase): - def setUp(self): - self.lookup = lookup_loader.get('community.general.manifold') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_get_all(self, client_mock): - expected_result = [{'RESOURCE_TOKEN_1': 'token-1', - 'RESOURCE_TOKEN_2': 'token-2', - 'RESOURCE_TOKEN_3': 'token-3', - 'RESOURCE_TOKEN_4': 'token-4' - }] - client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources'] - client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/' - 'credentials?resource_id={0}'.format(x)] - self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123')) - client_mock.assert_called_with('token-123') - client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_get_one_resource(self, client_mock): - expected_result = [{'RESOURCE_TOKEN_3': 'token-3', - 'RESOURCE_TOKEN_4': 'token-4' - }] - client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?label=resource-2'] - client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/' - 'credentials?resource_id={0}'.format(x)] - self.assertListEqual(expected_result, self.lookup.run(['resource-2'], api_token='token-123')) - client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None, label='resource-2') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_get_two_resources(self, client_mock): - expected_result = [{'RESOURCE_TOKEN_1': 'token-1', - 'RESOURCE_TOKEN_2': 'token-2', - 'RESOURCE_TOKEN_3': 'token-3', - 'RESOURCE_TOKEN_4': 'token-4' - }] - client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources'] - client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/' - 'credentials?resource_id={0}'.format(x)] - self.assertListEqual(expected_result, self.lookup.run(['resource-1', 'resource-2'], api_token='token-123')) - client_mock.assert_called_with('token-123') - client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.display') - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_get_resources_with_same_credential_names(self, client_mock, display_mock): - expected_result = [{'RESOURCE_TOKEN_1': 'token-5', - 'RESOURCE_TOKEN_2': 'token-6' - }] - client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-2'] - client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects?label=project-2'] - client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/' - 'credentials?resource_id={0}'.format(x)] - self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-2')) - client_mock.assert_called_with('token-123') - display_mock.warning.assert_has_calls([ - call("'RESOURCE_TOKEN_1' with label 'resource-1' was replaced by resource data with label 'resource-3'"), - call("'RESOURCE_TOKEN_2' with label 'resource-1' was replaced by resource data with label 'resource-3'")], - any_order=True - ) - client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-2') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_filter_by_team(self, client_mock): - expected_result = [{'RESOURCE_TOKEN_1': 'token-1', - 'RESOURCE_TOKEN_2': 'token-2' - }] - client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1'] - client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1] - client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/' - 'credentials?resource_id={0}'.format(x)] - self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', team='team-1')) - client_mock.assert_called_with('token-123') - client_mock.return_value.get_resources.assert_called_with(team_id='tid-1', project_id=None) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_filter_by_project(self, client_mock): - expected_result = [{'RESOURCE_TOKEN_3': 'token-3', - 'RESOURCE_TOKEN_4': 'token-4' - }] - client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-1'] - client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1] - client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/' - 'credentials?resource_id={0}'.format(x)] - self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1')) - client_mock.assert_called_with('token-123') - client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_filter_by_team_and_project(self, client_mock): - expected_result = [{'RESOURCE_TOKEN_1': 'token-1', - 'RESOURCE_TOKEN_2': 'token-2' - }] - client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1'] - client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1] - client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1] - client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/' - 'credentials?resource_id={0}'.format(x)] - self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1')) - client_mock.assert_called_with('token-123') - client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_raise_team_doesnt_exist(self, client_mock): - client_mock.return_value.get_teams.return_value = [] - with self.assertRaises(AnsibleError) as context: - self.lookup.run([], api_token='token-123', team='no-team') - self.assertEqual("Team 'no-team' does not exist", - str(context.exception)) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_raise_project_doesnt_exist(self, client_mock): - client_mock.return_value.get_projects.return_value = [] - with self.assertRaises(AnsibleError) as context: - self.lookup.run([], api_token='token-123', project='no-project') - self.assertEqual("Project 'no-project' does not exist", - str(context.exception)) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_raise_resource_doesnt_exist(self, client_mock): - client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources'] - with self.assertRaises(AnsibleError) as context: - self.lookup.run(['resource-1', 'no-resource-1', 'no-resource-2'], api_token='token-123') - self.assertEqual("Resource(s) no-resource-1, no-resource-2 do not exist", - str(context.exception)) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_catch_api_error(self, client_mock): - client_mock.side_effect = ApiError('Generic error') - with self.assertRaises(AnsibleError) as context: - self.lookup.run([], api_token='token-123') - self.assertEqual("API Error: Generic error", - str(context.exception)) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_catch_unhandled_exception(self, client_mock): - client_mock.side_effect = Exception('Unknown error') - with self.assertRaises(AnsibleError) as context: - self.lookup.run([], api_token='token-123') - self.assertTrue('Exception: Unknown error' in str(context.exception)) - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_falls_back_to_env_var(self, client_mock): - client_mock.return_value.get_resources.return_value = [] - client_mock.return_value.get_credentials.return_value = [] - try: - os.environ['MANIFOLD_API_TOKEN'] = 'token-321' - self.lookup.run([]) - finally: - os.environ.pop('MANIFOLD_API_TOKEN', None) - client_mock.assert_called_with('token-321') - - @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient') - def test_falls_raises_on_no_token(self, client_mock): - client_mock.return_value.get_resources.return_value = [] - client_mock.return_value.get_credentials.return_value = [] - os.environ.pop('MANIFOLD_API_TOKEN', None) - with self.assertRaises(AnsibleError) as context: - self.lookup.run([]) - assert 'api_token' in str(context.exception) diff --git a/tests/unit/plugins/lookup/test_merge_variables.py b/tests/unit/plugins/lookup/test_merge_variables.py index ba8209439a..c8b0ca6fab 100644 --- a/tests/unit/plugins/lookup/test_merge_variables.py +++ b/tests/unit/plugins/lookup/test_merge_variables.py @@ -6,9 +6,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.mock.loader import DictDataLoader +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.mock.loader import DictDataLoader from ansible.plugins import AnsiblePlugin from ansible.template import Templar diff --git a/tests/unit/plugins/lookup/test_onepassword_ssh_key.py b/tests/unit/plugins/lookup/test_onepassword_ssh_key.py new file mode 100644 index 0000000000..864f58db6e --- /dev/null +++ b/tests/unit/plugins/lookup/test_onepassword_ssh_key.py @@ -0,0 +1,30 @@ +# Copyright (c) 2025 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import pytest + +from .onepassword_common import SSH_KEY_MOCK_ENTRIES + +from ansible.plugins.loader import lookup_loader + + +@pytest.mark.parametrize( + ("vault", "queries", "kwargs", "output", "expected"), + ( + (item["vault_name"], item["queries"], item.get("kwargs", {}), item["output"], item["expected"]) + for item in SSH_KEY_MOCK_ENTRIES + ) +) +def test_ssh_key(mocker, vault, queries, kwargs, output, expected): + mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePass.assert_logged_in", return_value=True) + mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePassCLIBase._run", return_value=(0, json.dumps(output), "")) + + op_lookup = lookup_loader.get("community.general.onepassword_ssh_key") + result = op_lookup.run(queries, vault=vault, **kwargs) + + assert result == expected diff --git a/tests/unit/plugins/lookup/test_revbitspss.py b/tests/unit/plugins/lookup/test_revbitspss.py index 5109992068..9f1372e49c 100644 --- a/tests/unit/plugins/lookup/test_revbitspss.py +++ b/tests/unit/plugins/lookup/test_revbitspss.py @@ -6,8 +6,8 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.unittest import TestCase -from ansible_collections.community.general.tests.unit.compat.mock import ( +from ansible_collections.community.internal_test_tools.tests.unit.compat.unittest import TestCase +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import ( patch, MagicMock, ) diff --git a/tests/unit/plugins/lookup/test_tss.py b/tests/unit/plugins/lookup/test_tss.py index 47ca79a697..28027d3177 100644 --- a/tests/unit/plugins/lookup/test_tss.py +++ b/tests/unit/plugins/lookup/test_tss.py @@ -8,8 +8,8 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.unittest import TestCase -from ansible_collections.community.general.tests.unit.compat.mock import ( +from ansible_collections.community.internal_test_tools.tests.unit.compat.unittest import TestCase +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import ( patch, DEFAULT, MagicMock, diff --git a/tests/unit/plugins/module_utils/cloud/test_backoff.py b/tests/unit/plugins/module_utils/cloud/test_backoff.py index 5a5188669a..e06f65af72 100644 --- a/tests/unit/plugins/module_utils/cloud/test_backoff.py +++ b/tests/unit/plugins/module_utils/cloud/test_backoff.py @@ -7,7 +7,7 @@ __metaclass__ = type import random -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.module_utils.cloud import _exponential_backoff, \ _full_jitter_backoff diff --git a/tests/unit/plugins/module_utils/cloud/test_scaleway.py b/tests/unit/plugins/module_utils/cloud/test_scaleway.py index dc53bc1261..1052783884 100644 --- a/tests/unit/plugins/module_utils/cloud/test_scaleway.py +++ b/tests/unit/plugins/module_utils/cloud/test_scaleway.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.module_utils.scaleway import SecretVariables, argon2 diff --git a/tests/unit/plugins/module_utils/conftest.py b/tests/unit/plugins/module_utils/conftest.py deleted file mode 100644 index 2217dd39f9..0000000000 --- a/tests/unit/plugins/module_utils/conftest.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import sys -from io import BytesIO - -import pytest - -import ansible.module_utils.basic -from ansible.module_utils.six import PY3, string_types -from ansible.module_utils.common.text.converters import to_bytes -from ansible.module_utils.common._collections_compat import MutableMapping - - -@pytest.fixture -def stdin(mocker, request): - old_args = ansible.module_utils.basic._ANSIBLE_ARGS - ansible.module_utils.basic._ANSIBLE_ARGS = None - old_argv = sys.argv - sys.argv = ['ansible_unittest'] - - if isinstance(request.param, string_types): - args = request.param - elif isinstance(request.param, MutableMapping): - if 'ANSIBLE_MODULE_ARGS' not in request.param: - request.param = {'ANSIBLE_MODULE_ARGS': request.param} - if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False - args = json.dumps(request.param) - else: - raise Exception('Malformed data to the stdin pytest fixture') - - fake_stdin = BytesIO(to_bytes(args, errors='surrogate_or_strict')) - if PY3: - mocker.patch('ansible.module_utils.basic.sys.stdin', mocker.MagicMock()) - mocker.patch('ansible.module_utils.basic.sys.stdin.buffer', fake_stdin) - else: - mocker.patch('ansible.module_utils.basic.sys.stdin', fake_stdin) - - yield fake_stdin - - ansible.module_utils.basic._ANSIBLE_ARGS = old_args - sys.argv = old_argv - - -@pytest.fixture -def am(stdin, request): - old_args = ansible.module_utils.basic._ANSIBLE_ARGS - ansible.module_utils.basic._ANSIBLE_ARGS = None - old_argv = sys.argv - sys.argv = ['ansible_unittest'] - - argspec = {} - if hasattr(request, 'param'): - if isinstance(request.param, dict): - argspec = request.param - - am = ansible.module_utils.basic.AnsibleModule( - argument_spec=argspec, - ) - am._name = 'ansible_unittest' - - yield am - - ansible.module_utils.basic._ANSIBLE_ARGS = old_args - sys.argv = old_argv diff --git a/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py b/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py index 037305d3f9..4f6358ace5 100644 --- a/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py +++ b/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py @@ -9,7 +9,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.module_utils.hwc_utils import are_different_dicts diff --git a/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py b/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py index 9b0be0bb48..6567bf576b 100644 --- a/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py +++ b/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py @@ -8,7 +8,7 @@ __metaclass__ = type import sys -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.module_utils.hwc_utils import (HwcModuleException, navigate_value) diff --git a/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py b/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py index 9a816cfe25..bbf5d6265f 100644 --- a/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py +++ b/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py @@ -160,6 +160,6 @@ def test_json_without_token_returned(mock_json_without_token_returned): with pytest.raises(KeycloakError) as raised_error: get_token(module_params_creds) assert str(raised_error.value) == ( - 'Could not obtain access token from http://keycloak.url' - '/auth/realms/master/protocol/openid-connect/token' + 'API did not include access_token field in response from ' + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token' ) diff --git a/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py b/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py index 6ddc827a14..a176a55768 100644 --- a/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py +++ b/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py @@ -13,7 +13,7 @@ from ansible.module_utils.six import iteritems from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl import ( api, ) -from mock import MagicMock +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import MagicMock __metaclass__ = type diff --git a/tests/unit/plugins/module_utils/test_cmd_runner.py b/tests/unit/plugins/module_utils/test_cmd_runner.py index 50d0a70094..d390f3bec4 100644 --- a/tests/unit/plugins/module_utils/test_cmd_runner.py +++ b/tests/unit/plugins/module_utils/test_cmd_runner.py @@ -10,7 +10,7 @@ from functools import partial import pytest -from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, PropertyMock +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import MagicMock, PropertyMock from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt diff --git a/tests/unit/plugins/module_utils/test_deps.py b/tests/unit/plugins/module_utils/test_deps.py index 70f0eac0fd..8691fc5f72 100644 --- a/tests/unit/plugins/module_utils/test_deps.py +++ b/tests/unit/plugins/module_utils/test_deps.py @@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import MagicMock +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import MagicMock import pytest diff --git a/tests/unit/plugins/module_utils/test_known_hosts.py b/tests/unit/plugins/module_utils/test_known_hosts.py index 25e76b66f5..0a1196de93 100644 --- a/tests/unit/plugins/module_utils/test_known_hosts.py +++ b/tests/unit/plugins/module_utils/test_known_hosts.py @@ -93,11 +93,12 @@ def test_get_fqdn_and_port(url, fqdn, port): assert known_hosts.get_fqdn_and_port(url) == (fqdn, port) -@pytest.mark.parametrize('fqdn, port, add_host_key_cmd, stdin', - ((URLS[k]['get_fqdn'], URLS[k]['port'], URLS[k]['add_host_key_cmd'], {}) - for k in sorted(URLS) if URLS[k]['is_ssh_url']), - indirect=['stdin']) -def test_add_host_key(am, mocker, fqdn, port, add_host_key_cmd): +@pytest.mark.parametrize('fqdn, port, add_host_key_cmd', + ((URLS[k]['get_fqdn'], URLS[k]['port'], URLS[k]['add_host_key_cmd']) + for k in sorted(URLS) if URLS[k]['is_ssh_url'])) +def test_add_host_key(mocker, fqdn, port, add_host_key_cmd): + am = mocker.MagicMock() + get_bin_path = mocker.MagicMock() get_bin_path.return_value = keyscan_cmd = "/custom/path/ssh-keyscan" am.get_bin_path = get_bin_path diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index cbcdaae788..a135a20d0a 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -10,123 +10,13 @@ __metaclass__ = type import pytest from ansible_collections.community.general.plugins.module_utils.module_helper import ( - DependencyCtxMgr, VarMeta, VarDict, cause_changes + cause_changes ) -# remove in 11.0.0 -def test_dependency_ctxmgr(): - ctx = DependencyCtxMgr("POTATOES", "Potatoes must be installed") - with ctx: - import potatoes_that_will_never_be_there # noqa: F401, pylint: disable=unused-import - print("POTATOES: ctx.text={0}".format(ctx.text)) - assert ctx.text == "Potatoes must be installed" - assert not ctx.has_it - - ctx = DependencyCtxMgr("POTATOES2") - with ctx: - import potatoes_that_will_never_be_there_again # noqa: F401, pylint: disable=unused-import - assert not ctx.has_it - print("POTATOES2: ctx.text={0}".format(ctx.text)) - assert ctx.text.startswith("No module named") - assert "potatoes_that_will_never_be_there_again" in ctx.text - - ctx = DependencyCtxMgr("TYPING") - with ctx: - import sys # noqa: F401, pylint: disable=unused-import - assert ctx.has_it - - -# remove in 11.0.0 -def test_variable_meta(): - meta = VarMeta() - assert meta.output is True - assert meta.diff is False - assert meta.value is None - meta.set_value("abc") - assert meta.initial_value == "abc" - assert meta.value == "abc" - assert meta.diff_result is None - meta.set_value("def") - assert meta.initial_value == "abc" - assert meta.value == "def" - assert meta.diff_result is None - - -# remove in 11.0.0 -def test_variable_meta_diff(): - meta = VarMeta(diff=True) - assert meta.output is True - assert meta.diff is True - assert meta.value is None - meta.set_value("abc") - assert meta.initial_value == "abc" - assert meta.value == "abc" - assert meta.diff_result is None - meta.set_value("def") - assert meta.initial_value == "abc" - assert meta.value == "def" - assert meta.diff_result == {"before": "abc", "after": "def"} - meta.set_value("ghi") - assert meta.initial_value == "abc" - assert meta.value == "ghi" - assert meta.diff_result == {"before": "abc", "after": "ghi"} - - -# remove in 11.0.0 -def test_vardict(): - vd = VarDict() - vd.set('a', 123) - assert vd['a'] == 123 - assert vd.a == 123 - assert 'a' in vd._meta - assert vd.meta('a').output is True - assert vd.meta('a').diff is False - assert vd.meta('a').change is False - vd['b'] = 456 - assert vd.meta('b').output is True - assert vd.meta('b').diff is False - assert vd.meta('b').change is False - vd.set_meta('a', diff=True, change=True) - vd.set_meta('b', diff=True, output=False) - vd['c'] = 789 - assert vd.has_changed('c') is False - vd['a'] = 'new_a' - assert vd.has_changed('a') is True - vd['c'] = 'new_c' - assert vd.has_changed('c') is False - vd['b'] = 'new_b' - assert vd.has_changed('b') is False - assert vd.a == 'new_a' - assert vd.c == 'new_c' - assert vd.output() == {'a': 'new_a', 'c': 'new_c'} - assert vd.diff() == {'before': {'a': 123}, 'after': {'a': 'new_a'}}, "diff={0}".format(vd.diff()) - - -# remove in 11.0.0 -def test_variable_meta_change(): - vd = VarDict() - vd.set('a', 123, change=True) - vd.set('b', [4, 5, 6], change=True) - vd.set('c', {'m': 7, 'n': 8, 'o': 9}, change=True) - vd.set('d', {'a1': {'a11': 33, 'a12': 34}}, change=True) - - vd.a = 1234 - assert vd.has_changed('a') is True - vd.b.append(7) - assert vd.b == [4, 5, 6, 7] - assert vd.has_changed('b') - vd.c.update({'p': 10}) - assert vd.c == {'m': 7, 'n': 8, 'o': 9, 'p': 10} - assert vd.has_changed('c') - vd.d['a1'].update({'a13': 35}) - assert vd.d == {'a1': {'a11': 33, 'a12': 34, 'a13': 35}} - assert vd.has_changed('d') - - # # DEPRECATION NOTICE -# Parameters on_success and on_failure are deprecated and will be removed in community.genral 12.0.0 +# Parameters on_success and on_failure are deprecated and will be removed in community.general 12.0.0 # Remove testcases with those params when releasing 12.0.0 # CAUSE_CHG_DECO_PARAMS = ['deco_args', 'expect_exception', 'expect_changed'] diff --git a/tests/unit/plugins/module_utils/test_ocapi_utils.py b/tests/unit/plugins/module_utils/test_ocapi_utils.py index 3c939b5586..70ab16786c 100644 --- a/tests/unit/plugins/module_utils/test_ocapi_utils.py +++ b/tests/unit/plugins/module_utils/test_ocapi_utils.py @@ -11,7 +11,7 @@ import re import shutil import tempfile -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils diff --git a/tests/unit/plugins/module_utils/test_python_runner.py b/tests/unit/plugins/module_utils/test_python_runner.py index 8572ee7d78..024d203c04 100644 --- a/tests/unit/plugins/module_utils/test_python_runner.py +++ b/tests/unit/plugins/module_utils/test_python_runner.py @@ -10,7 +10,7 @@ import os import pytest -from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, PropertyMock +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import MagicMock, PropertyMock from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner diff --git a/tests/unit/plugins/module_utils/xenserver/conftest.py b/tests/unit/plugins/module_utils/xenserver/conftest.py index 3fcea55617..6190bb1ea0 100644 --- a/tests/unit/plugins/module_utils/xenserver/conftest.py +++ b/tests/unit/plugins/module_utils/xenserver/conftest.py @@ -16,7 +16,7 @@ import pytest from .FakeAnsibleModule import FakeAnsibleModule from ansible.module_utils import six -from mock import MagicMock +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import MagicMock @pytest.fixture diff --git a/tests/unit/plugins/modules/conftest.py b/tests/unit/plugins/modules/conftest.py index d357137651..7eaba767a3 100644 --- a/tests/unit/plugins/modules/conftest.py +++ b/tests/unit/plugins/modules/conftest.py @@ -5,45 +5,44 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import contextlib as _contextlib import json import pytest from ansible.module_utils.six import string_types -from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.common._collections_compat import MutableMapping from ansible_collections.community.general.plugins.module_utils import deps +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args as _set_module_args -def fix_ansible_args(args): +def _fix_ansible_args(args): if isinstance(args, string_types): - return args + # This should be deprecated! + return json.loads(args) if isinstance(args, MutableMapping): - if 'ANSIBLE_MODULE_ARGS' not in args: - args = {'ANSIBLE_MODULE_ARGS': args} - if '_ansible_remote_tmp' not in args['ANSIBLE_MODULE_ARGS']: - args['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in args['ANSIBLE_MODULE_ARGS']: - args['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False - args = json.dumps(args) return args - else: - raise Exception('Malformed data to the patch_ansible_module pytest fixture') + raise Exception('Malformed data to the patch_ansible_module pytest fixture') @pytest.fixture -def patch_ansible_module(request, mocker): - if hasattr(request, "param"): - args = fix_ansible_args(request.param) - mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) - else: - def _patch(args): - args = fix_ansible_args(args) - mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) - return _patch +def patch_ansible_module(request): + args = _fix_ansible_args(request.param) + with _set_module_args(args): + yield + + +@pytest.fixture +def patch_ansible_module_uthelper(request): + @_contextlib.contextmanager + def _patch(args): + args = _fix_ansible_args(args) + with _set_module_args(args): + yield + return _patch @pytest.fixture(autouse=True) diff --git a/tests/unit/plugins/modules/gitlab.py b/tests/unit/plugins/modules/gitlab.py index 7a52dc3552..a66ecf856f 100644 --- a/tests/unit/plugins/modules/gitlab.py +++ b/tests/unit/plugins/modules/gitlab.py @@ -12,7 +12,7 @@ import sys from httmock import response # noqa from httmock import urlmatch # noqa -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest import gitlab @@ -287,11 +287,36 @@ def resp_delete_group(url, request): @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/access_tokens", method="get") def resp_list_group_access_tokens(url, request): headers = {'content-type': 'application/json'} - content = ('[{"user_id" : 1, "scopes" : ["api"], "name" : "token1", "expires_at" : "2021-01-31",' - '"id" : 1, "active" : false, "created_at" : "2021-01-20T22:11:48.151Z", "revoked" : true,' - '"access_level": 40},{"user_id" : 2, "scopes" : ["api"], "name" : "token2", "expires_at" : "2021-02-31",' - '"id" : 2, "active" : true, "created_at" : "2021-02-20T22:11:48.151Z", "revoked" : false,' - '"access_level": 40}]') + content = ( + '[{"id":689,"name":"test-token","revoked":true,"created_at":"2025-06-02T09:18:01.484Z",' + '"description":null,"scopes":["read_repository","write_repository"],"user_id":1779,' + '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,' + '"resource_type":"group","resource_id":1730},' + '{"id":690,"name":"test-token","revoked":true,"created_at":"2025-06-02T09:36:30.650Z",' + '"description":null,"scopes":["read_repository","write_repository"],"user_id":1780,' + '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,' + '"resource_type":"group","resource_id":1730},' + '{"id":691,"name":"test-token","revoked":false,"created_at":"2025-06-02T09:39:18.252Z",' + '"description":null,"scopes":["read_repository","write_repository"],"user_id":1781,' + '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,' + '"resource_type":"group","resource_id":1730},' + '{"id":695,"name":"test-token-no-revoked","created_at":"2025-06-02T09:39:18.252Z",' + '"description":null,"scopes":["read_repository","write_repository"],"user_id":1781,' + '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,' + '"resource_type":"group","resource_id":1730},' + '{"id":692,"name":"test-token-two","revoked":true,"created_at":"2025-06-02T09:41:18.442Z",' + '"description":null,"scopes":["read_repository","write_repository"],"user_id":1782,' + '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,' + '"resource_type":"group","resource_id":1730},' + '{"id":693,"name":"test-token-three","revoked":true,"created_at":"2025-06-02T09:50:00.976Z"' + ',"description":null,"scopes":["read_repository","write_repository"],"user_id":1783,' + '"last_used_at":null,"active":false,"expires_at":"2025-06-04","access_level":40,' + '"resource_type":"group","resource_id":1730},' + '{"id":694,"name":"test-token-three","revoked":true,"created_at":"2025-06-02T09:56:45.779Z"' + ',"description":null,"scopes":["read_repository","write_repository"],"user_id":1784,' + '"last_used_at":null,"active":false,"expires_at":"2025-06-04","access_level":40,' + '"resource_type":"group","resource_id":1730}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @@ -306,7 +331,7 @@ def resp_create_group_access_tokens(url, request): return response(201, content, headers, None, 5, request) -@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/access_tokens/1", method="delete") +@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/access_tokens/[0-9]+", method="delete") def resp_revoke_group_access_tokens(url, request): headers = {'content-type': 'application/json'} content = ('') @@ -567,11 +592,36 @@ def resp_delete_protected_branch(url, request): @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/access_tokens", method="get") def resp_list_project_access_tokens(url, request): headers = {'content-type': 'application/json'} - content = ('[{"user_id" : 1, "scopes" : ["api"], "name" : "token1", "expires_at" : "2021-01-31",' - '"id" : 1, "active" : false, "created_at" : "2021-01-20T22:11:48.151Z", "revoked" : true,' - '"access_level": 40},{"user_id" : 2, "scopes" : ["api"], "name" : "token2", "expires_at" : "2021-02-31",' - '"id" : 2, "active" : true, "created_at" : "2021-02-20T22:11:48.151Z", "revoked" : false,' - '"access_level": 40}]') + content = ( + '[{"id":689,"name":"test-token","revoked":true,"created_at":"2025-06-02T09:18:01.484Z",' + '"description":null,"scopes":["read_repository","write_repository"],"user_id":1779,' + '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,' + '"resource_type":"project","resource_id":1730},' + '{"id":690,"name":"test-token","revoked":true,"created_at":"2025-06-02T09:36:30.650Z",' + '"description":null,"scopes":["read_repository","write_repository"],"user_id":1780,' + '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,' + '"resource_type":"project","resource_id":1730},' + '{"id":691,"name":"test-token","revoked":false,"created_at":"2025-06-02T09:39:18.252Z",' + '"description":null,"scopes":["read_repository","write_repository"],"user_id":1781,' + '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,' + '"resource_type":"project","resource_id":1730},' + '{"id":695,"name":"test-token-no-revoked","created_at":"2025-06-02T09:39:18.252Z",' + '"description":null,"scopes":["read_repository","write_repository"],"user_id":1781,' + '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,' + '"resource_type":"group","resource_id":1730},' + '{"id":692,"name":"test-token-two","revoked":true,"created_at":"2025-06-02T09:41:18.442Z",' + '"description":null,"scopes":["read_repository","write_repository"],"user_id":1782,' + '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,' + '"resource_type":"project","resource_id":1730},' + '{"id":693,"name":"test-token-three","revoked":true,"created_at":"2025-06-02T09:50:00.976Z"' + ',"description":null,"scopes":["read_repository","write_repository"],"user_id":1783,' + '"last_used_at":null,"active":false,"expires_at":"2025-06-04","access_level":40,' + '"resource_type":"project","resource_id":1730},' + '{"id":694,"name":"test-token-three","revoked":true,"created_at":"2025-06-02T09:56:45.779Z"' + ',"description":null,"scopes":["read_repository","write_repository"],"user_id":1784,' + '"last_used_at":null,"active":false,"expires_at":"2025-06-04","access_level":40,' + '"resource_type":"project","resource_id":1730}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @@ -586,7 +636,7 @@ def resp_create_project_access_tokens(url, request): return response(201, content, headers, None, 5, request) -@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/access_tokens/1", method="delete") +@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/access_tokens/[0-9]+", method="delete") def resp_revoke_project_access_tokens(url, request): headers = {'content-type': 'application/json'} content = ('') diff --git a/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py b/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py index 94e10b75f2..bc64f9ed8e 100644 --- a/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py +++ b/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py @@ -8,7 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.modules import interfaces_file from shutil import copyfile, move import difflib diff --git a/tests/unit/plugins/modules/oneview_module_loader.py b/tests/unit/plugins/modules/oneview_module_loader.py index ae62d9ced1..52ca4f7b9d 100644 --- a/tests/unit/plugins/modules/oneview_module_loader.py +++ b/tests/unit/plugins/modules/oneview_module_loader.py @@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys -from ansible_collections.community.general.tests.unit.compat.mock import Mock +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import Mock # FIXME: These should be done inside of a fixture so that they're only mocked during # these unittests diff --git a/tests/unit/plugins/modules/test_alerta_customer.py b/tests/unit/plugins/modules/test_alerta_customer.py index ccd0ced50e..f8c8b7c889 100644 --- a/tests/unit/plugins/modules/test_alerta_customer.py +++ b/tests/unit/plugins/modules/test_alerta_customer.py @@ -7,9 +7,9 @@ __metaclass__ = type import json import pytest -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible_collections.community.general.plugins.modules import alerta_customer -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args class MockedReponse(object): @@ -82,169 +82,160 @@ class TestAlertaCustomerModule(ModuleTestCase): def test_without_parameters(self): """Failure if no parameters set""" with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() def test_without_content(self): """Failure if customer and match are missing""" - set_module_args({ + with set_module_args({ 'alerta_url': "http://localhost:8080", 'api_username': "admin@example.com", 'api_password': "password" - }) - with self.assertRaises(AnsibleFailJson): - self.module.main() + }): + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_successful_existing_customer_creation(self): """Test the customer creation (already exists).""" - set_module_args({ + with set_module_args({ 'alerta_url': "http://localhost:8080", 'api_username': "admin@example.com", 'api_password': "password", 'customer': 'Developer', 'match': 'dev@example.com' - }) - - with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = customer_response_page1() - with self.assertRaises(AnsibleExitJson): - self.module.main() - self.assertTrue(fetch_url_mock.call_count, 1) + }): + with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = customer_response_page1() + with self.assertRaises(AnsibleExitJson): + self.module.main() + self.assertTrue(fetch_url_mock.call_count, 1) def test_successful_customer_creation(self): """Test the customer creation.""" - set_module_args({ + with set_module_args({ 'alerta_url': "http://localhost:8080", 'api_username': "admin@example.com", 'api_password': "password", 'customer': 'Developer', 'match': 'dev2@example.com' - }) + }): + with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = customer_response_page1() + with self.assertRaises(AnsibleExitJson): + self.module.main() - with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = customer_response_page1() - with self.assertRaises(AnsibleExitJson): - self.module.main() - - self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['match'] == "dev2@example.com" - assert call_data['customer'] == "Developer" + self.assertTrue(fetch_url_mock.call_count, 1) + call_data = json.loads(fetch_url_mock.call_args[1]['data']) + assert call_data['match'] == "dev2@example.com" + assert call_data['customer'] == "Developer" def test_successful_customer_creation_key(self): """Test the customer creation using api_key.""" - set_module_args({ + with set_module_args({ 'alerta_url': "http://localhost:8080", 'api_key': "demo-key", 'customer': 'Developer', 'match': 'dev2@example.com' - }) + }): + with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = customer_response_page1() + with self.assertRaises(AnsibleExitJson): + self.module.main() - with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = customer_response_page1() - with self.assertRaises(AnsibleExitJson): - self.module.main() - - self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['match'] == "dev2@example.com" - assert call_data['customer'] == "Developer" + self.assertTrue(fetch_url_mock.call_count, 1) + call_data = json.loads(fetch_url_mock.call_args[1]['data']) + assert call_data['match'] == "dev2@example.com" + assert call_data['customer'] == "Developer" def test_failed_not_found(self): """Test failure with wrong URL.""" - set_module_args({ + with set_module_args({ 'alerta_url': "http://localhost:8080/s", 'api_username': "admin@example.com", 'api_password': "password", 'customer': 'Developer', 'match': 'dev@example.com' - }) - - with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'Not found for request GET on http://localhost:8080/a/api/customers'}) - with self.assertRaises(AnsibleFailJson): - self.module.main() + }): + with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'Not found for request GET on http://localhost:8080/a/api/customers'}) + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_failed_forbidden(self): """Test failure with wrong user.""" - set_module_args({ + with set_module_args({ 'alerta_url': "http://localhost:8080", 'api_username': "dev@example.com", 'api_password': "password", 'customer': 'Developer', 'match': 'dev@example.com' - }) - - with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 403, 'msg': 'Permission Denied for GET on http://localhost:8080/api/customers'}) - with self.assertRaises(AnsibleFailJson): - self.module.main() + }): + with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 403, 'msg': 'Permission Denied for GET on http://localhost:8080/api/customers'}) + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_failed_unauthorized(self): """Test failure with wrong username or password.""" - set_module_args({ + with set_module_args({ 'alerta_url': "http://localhost:8080", 'api_username': "admin@example.com", 'api_password': "password_wrong", 'customer': 'Developer', 'match': 'dev@example.com' - }) - - with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 401, 'msg': 'Unauthorized to request GET on http://localhost:8080/api/customers'}) - with self.assertRaises(AnsibleFailJson): - self.module.main() + }): + with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 401, 'msg': 'Unauthorized to request GET on http://localhost:8080/api/customers'}) + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_successful_customer_deletion(self): """Test the customer deletion.""" - set_module_args({ + with set_module_args({ 'alerta_url': "http://localhost:8080", 'api_username': "admin@example.com", 'api_password': "password", 'customer': 'Developer', 'match': 'dev@example.com', 'state': 'absent' - }) - - with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = customer_response_page1() - with self.assertRaises(AnsibleExitJson): - self.module.main() + }): + with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = customer_response_page1() + with self.assertRaises(AnsibleExitJson): + self.module.main() def test_successful_customer_deletion_page2(self): """Test the customer deletion on the second page.""" - set_module_args({ + with set_module_args({ 'alerta_url': "http://localhost:8080", 'api_username': "admin@example.com", 'api_password': "password", 'customer': 'Developer', 'match': 'dev@example.com', 'state': 'absent' - }) - - with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = customer_response_page2() - with self.assertRaises(AnsibleExitJson): - self.module.main() + }): + with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = customer_response_page2() + with self.assertRaises(AnsibleExitJson): + self.module.main() def test_successful_nonexisting_customer_deletion(self): """Test the customer deletion (non existing).""" - set_module_args({ + with set_module_args({ 'alerta_url': "http://localhost:8080", 'api_username': "admin@example.com", 'api_password': "password", 'customer': 'Billing', 'match': 'dev@example.com', 'state': 'absent' - }) - - with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = customer_response_page1() - with self.assertRaises(AnsibleExitJson): - self.module.main() + }): + with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = customer_response_page1() + with self.assertRaises(AnsibleExitJson): + self.module.main() diff --git a/tests/unit/plugins/modules/test_apk.py b/tests/unit/plugins/modules/test_apk.py index c952456eff..b25d10f207 100644 --- a/tests/unit/plugins/modules/test_apk.py +++ b/tests/unit/plugins/modules/test_apk.py @@ -5,8 +5,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import mock -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.modules import apk diff --git a/tests/unit/plugins/modules/test_archive.py b/tests/unit/plugins/modules/test_archive.py index 84a1360f1b..39f4ba6afd 100644 --- a/tests/unit/plugins/modules/test_archive.py +++ b/tests/unit/plugins/modules/test_archive.py @@ -9,8 +9,8 @@ __metaclass__ = type import pytest from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.tests.unit.compat.mock import Mock, patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import Mock, patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules.archive import get_archive, common_path @@ -25,27 +25,26 @@ class TestArchive(ModuleTestCase): self.os_path_isdir = self.mock_os_path_isdir.stop() def test_archive_removal_safety(self): - set_module_args( + with set_module_args( dict( path=['/foo', '/bar', '/baz'], dest='/foo/destination.tgz', remove=True ) - ) - - module = AnsibleModule( - argument_spec=dict( - path=dict(type='list', elements='path', required=True), - format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), - dest=dict(type='path'), - exclude_path=dict(type='list', elements='path', default=[]), - exclusion_patterns=dict(type='list', elements='path'), - force_archive=dict(type='bool', default=False), - remove=dict(type='bool', default=False), - ), - add_file_common_args=True, - supports_check_mode=True, - ) + ): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='list', elements='path', required=True), + format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), + dest=dict(type='path'), + exclude_path=dict(type='list', elements='path', default=[]), + exclusion_patterns=dict(type='list', elements='path'), + force_archive=dict(type='bool', default=False), + remove=dict(type='bool', default=False), + ), + add_file_common_args=True, + supports_check_mode=True, + ) self.os_path_isdir.side_effect = [True, False, False, True] diff --git a/tests/unit/plugins/modules/test_bitbucket_access_key.py b/tests/unit/plugins/modules/test_bitbucket_access_key.py index 71e28f653c..3b08b2bbbd 100644 --- a/tests/unit/plugins/modules/test_bitbucket_access_key.py +++ b/tests/unit/plugins/modules/test_bitbucket_access_key.py @@ -7,9 +7,9 @@ __metaclass__ = type from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper from ansible_collections.community.general.plugins.modules import bitbucket_access_key -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args class TestBucketAccessKeyModule(ModuleTestCase): @@ -19,15 +19,15 @@ class TestBucketAccessKeyModule(ModuleTestCase): def test_missing_key_with_present_state(self): with self.assertRaises(AnsibleFailJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'label': 'key name', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_key']) @@ -35,7 +35,7 @@ class TestBucketAccessKeyModule(ModuleTestCase): def test_create_deploy_key(self, *args): with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'user': 'ABC', 'password': 'XXX', 'workspace': 'name', @@ -43,8 +43,8 @@ class TestBucketAccessKeyModule(ModuleTestCase): 'key': 'public_key', 'label': 'key name', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(create_deploy_key_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -54,7 +54,7 @@ class TestBucketAccessKeyModule(ModuleTestCase): def test_create_deploy_key_check_mode(self, *args): with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -63,8 +63,8 @@ class TestBucketAccessKeyModule(ModuleTestCase): 'label': 'key name', 'state': 'present', '_ansible_check_mode': True, - }) - self.module.main() + }): + self.module.main() self.assertEqual(create_deploy_key_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -105,7 +105,7 @@ class TestBucketAccessKeyModule(ModuleTestCase): with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -113,8 +113,8 @@ class TestBucketAccessKeyModule(ModuleTestCase): 'key': 'new public key', 'label': 'mykey', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 1) self.assertEqual(create_deploy_key_mock.call_count, 1) @@ -156,7 +156,7 @@ class TestBucketAccessKeyModule(ModuleTestCase): with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -164,8 +164,8 @@ class TestBucketAccessKeyModule(ModuleTestCase): 'key': 'new public key', 'label': 'mykey', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 0) self.assertEqual(create_deploy_key_mock.call_count, 0) @@ -207,7 +207,7 @@ class TestBucketAccessKeyModule(ModuleTestCase): with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -216,8 +216,8 @@ class TestBucketAccessKeyModule(ModuleTestCase): 'label': 'mykey', 'state': 'present', '_ansible_check_mode': True, - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 0) self.assertEqual(create_deploy_key_mock.call_count, 0) @@ -258,15 +258,15 @@ class TestBucketAccessKeyModule(ModuleTestCase): def test_delete_deploy_key(self, *args): with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'label': 'mykey', 'state': 'absent', - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -276,15 +276,15 @@ class TestBucketAccessKeyModule(ModuleTestCase): def test_delete_absent_deploy_key(self, *args): with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'label': 'mykey', 'state': 'absent', - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], False) @@ -324,7 +324,7 @@ class TestBucketAccessKeyModule(ModuleTestCase): def test_delete_deploy_key_check_mode(self, *args): with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -332,8 +332,8 @@ class TestBucketAccessKeyModule(ModuleTestCase): 'label': 'mykey', 'state': 'absent', '_ansible_check_mode': True, - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], True) diff --git a/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py b/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py index a1f5478c20..68ded936ab 100644 --- a/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py +++ b/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py @@ -7,9 +7,9 @@ __metaclass__ = type from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper from ansible_collections.community.general.plugins.modules import bitbucket_pipeline_key_pair -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args class TestBucketPipelineKeyPairModule(ModuleTestCase): @@ -19,14 +19,14 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): def test_missing_keys_with_present_state(self): with self.assertRaises(AnsibleFailJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_keys']) @@ -34,7 +34,7 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): def test_create_keys(self, *args): with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'user': 'ABC', 'password': 'XXX', 'workspace': 'name', @@ -42,8 +42,8 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): 'public_key': 'public', 'private_key': 'PRIVATE', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(update_ssh_key_pair_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -53,7 +53,7 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): def test_create_keys_check_mode(self, *args): with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -62,8 +62,8 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): 'private_key': 'PRIVATE', 'state': 'present', '_ansible_check_mode': True, - }) - self.module.main() + }): + self.module.main() self.assertEqual(update_ssh_key_pair_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -76,7 +76,7 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): def test_update_keys(self, *args): with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -84,8 +84,8 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): 'public_key': 'public', 'private_key': 'PRIVATE', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(update_ssh_key_pair_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -98,7 +98,7 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): def test_dont_update_same_key(self, *args): with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -106,8 +106,8 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): 'public_key': 'public', 'private_key': 'PRIVATE', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(update_ssh_key_pair_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], False) @@ -120,7 +120,7 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): def test_update_keys_check_mode(self, *args): with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -129,8 +129,8 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): 'private_key': 'PRIVATE', 'state': 'present', '_ansible_check_mode': True, - }) - self.module.main() + }): + self.module.main() self.assertEqual(update_ssh_key_pair_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -143,14 +143,14 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): def test_delete_keys(self, *args): with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'state': 'absent', - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_ssh_key_pair_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -160,14 +160,14 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): def test_delete_absent_keys(self, *args): with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'state': 'absent', - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_ssh_key_pair_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], False) @@ -180,15 +180,15 @@ class TestBucketPipelineKeyPairModule(ModuleTestCase): def test_delete_keys_check_mode(self, *args): with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'state': 'absent', '_ansible_check_mode': True, - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_ssh_key_pair_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], True) diff --git a/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py b/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py index 07709f1a86..828ae00661 100644 --- a/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py +++ b/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py @@ -10,9 +10,9 @@ import pytest from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper from ansible_collections.community.general.plugins.modules import bitbucket_pipeline_known_host from ansible_collections.community.general.plugins.modules.bitbucket_pipeline_known_host import HAS_PARAMIKO -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args class TestBucketPipelineKnownHostModule(ModuleTestCase): @@ -26,15 +26,15 @@ class TestBucketPipelineKnownHostModule(ModuleTestCase): def test_create_known_host(self, *args): with patch.object(self.module, 'create_known_host') as create_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'name': 'bitbucket.org', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(create_known_host_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -44,7 +44,7 @@ class TestBucketPipelineKnownHostModule(ModuleTestCase): def test_create_known_host_with_key(self, *args): with patch.object(self.module, 'get_host_key') as get_host_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'user': 'ABC', 'password': 'XXX', 'workspace': 'name', @@ -52,8 +52,8 @@ class TestBucketPipelineKnownHostModule(ModuleTestCase): 'name': 'bitbucket.org', 'key': 'ssh-rsa public', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(get_host_key_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -75,15 +75,15 @@ class TestBucketPipelineKnownHostModule(ModuleTestCase): def test_dont_create_same_value(self, *args): with patch.object(self.module, 'create_known_host') as create_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'name': 'bitbucket.org', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(create_known_host_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], False) @@ -94,7 +94,7 @@ class TestBucketPipelineKnownHostModule(ModuleTestCase): def test_create_known_host_check_mode(self, *args): with patch.object(self.module, 'create_known_host') as create_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -102,8 +102,8 @@ class TestBucketPipelineKnownHostModule(ModuleTestCase): 'name': 'bitbucket.org', 'state': 'present', '_ansible_check_mode': True, - }) - self.module.main() + }): + self.module.main() self.assertEqual(create_known_host_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -125,15 +125,15 @@ class TestBucketPipelineKnownHostModule(ModuleTestCase): def test_delete_known_host(self, *args): with patch.object(self.module, 'delete_known_host') as delete_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'name': 'bitbucket.org', 'state': 'absent', - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_known_host_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -144,15 +144,15 @@ class TestBucketPipelineKnownHostModule(ModuleTestCase): def test_delete_absent_known_host(self, *args): with patch.object(self.module, 'delete_known_host') as delete_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'name': 'bitbucket.org', 'state': 'absent', - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_known_host_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], False) @@ -174,7 +174,7 @@ class TestBucketPipelineKnownHostModule(ModuleTestCase): def test_delete_known_host_check_mode(self, *args): with patch.object(self.module, 'delete_known_host') as delete_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -182,8 +182,8 @@ class TestBucketPipelineKnownHostModule(ModuleTestCase): 'name': 'bitbucket.org', 'state': 'absent', '_ansible_check_mode': True, - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_known_host_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], True) diff --git a/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py b/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py index 6f710189c9..47c77b2b0b 100644 --- a/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py +++ b/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py @@ -7,9 +7,9 @@ __metaclass__ = type from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper from ansible_collections.community.general.plugins.modules import bitbucket_pipeline_variable -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args class TestBucketPipelineVariableModule(ModuleTestCase): @@ -19,27 +19,27 @@ class TestBucketPipelineVariableModule(ModuleTestCase): def test_without_required_parameters(self): with self.assertRaises(AnsibleFailJson) as exec_info: - set_module_args({ + with set_module_args({ 'workspace': 'name', 'repository': 'repo', 'name': 'PIPELINE_VAR_NAME', 'state': 'absent', - }) - self.module.main() + }): + self.module.main() self.assertEqual(exec_info.exception.args[0]['failed'], True) def test_missing_value_with_present_state(self): with self.assertRaises(AnsibleFailJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'name': 'PIPELINE_VAR_NAME', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_value']) @@ -51,13 +51,13 @@ class TestBucketPipelineVariableModule(ModuleTestCase): @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None) def test_oauth_env_vars_params(self, *args): with self.assertRaises(AnsibleExitJson): - set_module_args({ + with set_module_args({ 'workspace': 'name', 'repository': 'repo', 'name': 'PIPELINE_VAR_NAME', 'state': 'absent', - }) - self.module.main() + }): + self.module.main() @patch.dict('os.environ', { 'BITBUCKET_USERNAME': 'ABC', @@ -66,19 +66,19 @@ class TestBucketPipelineVariableModule(ModuleTestCase): @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None) def test_basic_auth_env_vars_params(self, *args): with self.assertRaises(AnsibleExitJson): - set_module_args({ + with set_module_args({ 'workspace': 'name', 'repository': 'repo', 'name': 'PIPELINE_VAR_NAME', 'state': 'absent', - }) - self.module.main() + }): + self.module.main() @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None) def test_create_variable(self, *args): with patch.object(self.module, 'create_pipeline_variable') as create_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'user': 'ABC', 'password': 'XXX', 'workspace': 'name', @@ -86,8 +86,8 @@ class TestBucketPipelineVariableModule(ModuleTestCase): 'name': 'PIPELINE_VAR_NAME', 'value': '42', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(create_pipeline_variable_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -97,7 +97,7 @@ class TestBucketPipelineVariableModule(ModuleTestCase): def test_create_variable_check_mode(self, *args): with patch.object(self.module, 'create_pipeline_variable') as create_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -106,8 +106,8 @@ class TestBucketPipelineVariableModule(ModuleTestCase): 'value': '42', 'state': 'present', '_ansible_check_mode': True, - }) - self.module.main() + }): + self.module.main() self.assertEqual(create_pipeline_variable_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -123,7 +123,7 @@ class TestBucketPipelineVariableModule(ModuleTestCase): def test_update_variable(self, *args): with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -131,8 +131,8 @@ class TestBucketPipelineVariableModule(ModuleTestCase): 'name': 'PIPELINE_VAR_NAME', 'value': '42', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(update_pipeline_variable_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -147,7 +147,7 @@ class TestBucketPipelineVariableModule(ModuleTestCase): def test_update_secured_variable(self, *args): with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -156,8 +156,8 @@ class TestBucketPipelineVariableModule(ModuleTestCase): 'value': '42', 'secured': True, 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(update_pipeline_variable_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -173,7 +173,7 @@ class TestBucketPipelineVariableModule(ModuleTestCase): def test_update_secured_state(self, *args): with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -182,8 +182,8 @@ class TestBucketPipelineVariableModule(ModuleTestCase): 'value': '42', 'secured': True, 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(update_pipeline_variable_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -199,7 +199,7 @@ class TestBucketPipelineVariableModule(ModuleTestCase): def test_dont_update_same_value(self, *args): with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -207,8 +207,8 @@ class TestBucketPipelineVariableModule(ModuleTestCase): 'name': 'PIPELINE_VAR_NAME', 'value': '42', 'state': 'present', - }) - self.module.main() + }): + self.module.main() self.assertEqual(update_pipeline_variable_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], False) @@ -224,7 +224,7 @@ class TestBucketPipelineVariableModule(ModuleTestCase): def test_update_variable_check_mode(self, *args): with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -233,8 +233,8 @@ class TestBucketPipelineVariableModule(ModuleTestCase): 'value': '42', 'state': 'present', '_ansible_check_mode': True, - }) - self.module.main() + }): + self.module.main() self.assertEqual(update_pipeline_variable_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -250,15 +250,15 @@ class TestBucketPipelineVariableModule(ModuleTestCase): def test_delete_variable(self, *args): with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'name': 'PIPELINE_VAR_NAME', 'state': 'absent', - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_pipeline_variable_mock.call_count, 1) self.assertEqual(exec_info.exception.args[0]['changed'], True) @@ -268,15 +268,15 @@ class TestBucketPipelineVariableModule(ModuleTestCase): def test_delete_absent_variable(self, *args): with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', 'repository': 'repo', 'name': 'PIPELINE_VAR_NAME', 'state': 'absent', - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_pipeline_variable_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], False) @@ -292,7 +292,7 @@ class TestBucketPipelineVariableModule(ModuleTestCase): def test_delete_variable_check_mode(self, *args): with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - set_module_args({ + with set_module_args({ 'client_id': 'ABC', 'client_secret': 'XXX', 'workspace': 'name', @@ -300,8 +300,8 @@ class TestBucketPipelineVariableModule(ModuleTestCase): 'name': 'PIPELINE_VAR_NAME', 'state': 'absent', '_ansible_check_mode': True, - }) - self.module.main() + }): + self.module.main() self.assertEqual(delete_pipeline_variable_mock.call_count, 0) self.assertEqual(exec_info.exception.args[0]['changed'], True) diff --git a/tests/unit/plugins/modules/test_bootc_manage.py b/tests/unit/plugins/modules/test_bootc_manage.py index 5393a57a07..481fd8feaf 100644 --- a/tests/unit/plugins/modules/test_bootc_manage.py +++ b/tests/unit/plugins/modules/test_bootc_manage.py @@ -5,9 +5,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible_collections.community.general.plugins.modules import bootc_manage -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args class TestBootcManageModule(ModuleTestCase): @@ -21,52 +21,52 @@ class TestBootcManageModule(ModuleTestCase): def test_switch_without_image(self): """Failure if state is 'switch' but no image provided""" - set_module_args({'state': 'switch'}) - with self.assertRaises(AnsibleFailJson) as result: - self.module.main() + with set_module_args({'state': 'switch'}): + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() self.assertEqual(result.exception.args[0]['msg'], "state is switch but all of the following are missing: image") def test_switch_with_image(self): """Test successful switch with image provided""" - set_module_args({'state': 'switch', 'image': 'example.com/image:latest'}) - with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: - run_command_mock.return_value = (0, 'Queued for next boot: ', '') - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() + with set_module_args({'state': 'switch', 'image': 'example.com/image:latest'}): + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (0, 'Queued for next boot: ', '') + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() self.assertTrue(result.exception.args[0]['changed']) def test_latest_state(self): """Test successful upgrade to the latest state""" - set_module_args({'state': 'latest'}) - with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: - run_command_mock.return_value = (0, 'Queued for next boot: ', '') - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() + with set_module_args({'state': 'latest'}): + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (0, 'Queued for next boot: ', '') + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() self.assertTrue(result.exception.args[0]['changed']) def test_latest_state_no_change(self): """Test no change for latest state""" - set_module_args({'state': 'latest'}) - with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: - run_command_mock.return_value = (0, 'No changes in ', '') - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() + with set_module_args({'state': 'latest'}): + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (0, 'No changes in ', '') + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() self.assertFalse(result.exception.args[0]['changed']) def test_switch_image_failure(self): """Test failure during image switch""" - set_module_args({'state': 'switch', 'image': 'example.com/image:latest'}) - with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: - run_command_mock.return_value = (1, '', 'ERROR') - with self.assertRaises(AnsibleFailJson) as result: - self.module.main() + with set_module_args({'state': 'switch', 'image': 'example.com/image:latest'}): + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (1, '', 'ERROR') + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() self.assertEqual(result.exception.args[0]['msg'], 'ERROR: Command execution failed.') def test_latest_state_failure(self): """Test failure during upgrade""" - set_module_args({'state': 'latest'}) - with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: - run_command_mock.return_value = (1, '', 'ERROR') - with self.assertRaises(AnsibleFailJson) as result: - self.module.main() + with set_module_args({'state': 'latest'}): + with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: + run_command_mock.return_value = (1, '', 'ERROR') + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() self.assertEqual(result.exception.args[0]['msg'], 'ERROR: Command execution failed.') diff --git a/tests/unit/plugins/modules/test_campfire.py b/tests/unit/plugins/modules/test_campfire.py index ef0dca5ed6..797b2668bf 100644 --- a/tests/unit/plugins/modules/test_campfire.py +++ b/tests/unit/plugins/modules/test_campfire.py @@ -6,9 +6,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import pytest -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible_collections.community.general.plugins.modules import campfire -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args class TestCampfireModule(ModuleTestCase): @@ -27,70 +27,67 @@ class TestCampfireModule(ModuleTestCase): def test_without_required_parameters(self): """Failure must occurs when all parameters are missing""" with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() def test_successful_message(self): """Test failure message""" - set_module_args({ + with set_module_args({ 'subscription': 'test', 'token': 'abc', 'room': 'test', 'msg': 'test' - }) + }): + with patch.object(campfire, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 200}) + with self.assertRaises(AnsibleExitJson): + self.module.main() - with patch.object(campfire, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 200}) - with self.assertRaises(AnsibleExitJson): - self.module.main() + assert fetch_url_mock.call_count == 1 + url = fetch_url_mock.call_args[0][1] + data = fetch_url_mock.call_args[1]['data'] - assert fetch_url_mock.call_count == 1 - url = fetch_url_mock.call_args[0][1] - data = fetch_url_mock.call_args[1]['data'] - - assert url == 'https://test.campfirenow.com/room/test/speak.xml' - assert data == 'test' + assert url == 'https://test.campfirenow.com/room/test/speak.xml' + assert data == 'test' def test_successful_message_with_notify(self): """Test failure message""" - set_module_args({ + with set_module_args({ 'subscription': 'test', 'token': 'abc', 'room': 'test', 'msg': 'test', 'notify': 'bell' - }) + }): + with patch.object(campfire, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 200}) + with self.assertRaises(AnsibleExitJson): + self.module.main() - with patch.object(campfire, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 200}) - with self.assertRaises(AnsibleExitJson): - self.module.main() + assert fetch_url_mock.call_count == 2 + notify_call = fetch_url_mock.mock_calls[0] + url = notify_call[1][1] + data = notify_call[2]['data'] - assert fetch_url_mock.call_count == 2 - notify_call = fetch_url_mock.mock_calls[0] - url = notify_call[1][1] - data = notify_call[2]['data'] + assert url == 'https://test.campfirenow.com/room/test/speak.xml' + assert data == 'SoundMessagebell' - assert url == 'https://test.campfirenow.com/room/test/speak.xml' - assert data == 'SoundMessagebell' + message_call = fetch_url_mock.mock_calls[1] + url = message_call[1][1] + data = message_call[2]['data'] - message_call = fetch_url_mock.mock_calls[1] - url = message_call[1][1] - data = message_call[2]['data'] - - assert url == 'https://test.campfirenow.com/room/test/speak.xml' - assert data == 'test' + assert url == 'https://test.campfirenow.com/room/test/speak.xml' + assert data == 'test' def test_failure_message(self): """Test failure message""" - set_module_args({ + with set_module_args({ 'subscription': 'test', 'token': 'abc', 'room': 'test', 'msg': 'test' - }) - - with patch.object(campfire, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 403}) - with self.assertRaises(AnsibleFailJson): - self.module.main() + }): + with patch.object(campfire, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 403}) + with self.assertRaises(AnsibleFailJson): + self.module.main() diff --git a/tests/unit/plugins/modules/test_circonus_annotation.py b/tests/unit/plugins/modules/test_circonus_annotation.py index 7378e62a27..81feb8a7eb 100644 --- a/tests/unit/plugins/modules/test_circonus_annotation.py +++ b/tests/unit/plugins/modules/test_circonus_annotation.py @@ -12,10 +12,10 @@ import re import uuid from urllib3.response import HTTPResponse -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible.module_utils.common.text.converters import to_bytes from ansible_collections.community.general.plugins.modules import circonus_annotation -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args class TestCirconusAnnotation(ModuleTestCase): @@ -30,45 +30,46 @@ class TestCirconusAnnotation(ModuleTestCase): def test_without_required_parameters(self): """Failure must occurs when all parameters are missing""" with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() def test_add_annotation(self): """Check that result is changed""" - set_module_args({ + with set_module_args({ 'category': 'test category', 'description': 'test description', 'title': 'test title', 'api_key': str(uuid.uuid4()), - }) + }): - cid = '/annotation/100000' + cid = '/annotation/100000' - def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): - data = { - '_cid': cid, - '_created': 1502146995, - '_last_modified': 1502146995, - '_last_modified_by': '/user/1000', - 'category': 'test category', - 'description': 'test description', - 'rel_metrics': [], - 'start': 1502145480, - 'stop': None, - 'title': 'test title', - } - raw = to_bytes(json.dumps(data)) - resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False) - resp.status = 200 - resp.reason = 'OK' - resp.headers = {'X-Circonus-API-Version': '2.00'} - return self.build_response(request, resp) + def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): + data = { + '_cid': cid, + '_created': 1502146995, + '_last_modified': 1502146995, + '_last_modified_by': '/user/1000', + 'category': 'test category', + 'description': 'test description', + 'rel_metrics': [], + 'start': 1502145480, + 'stop': None, + 'title': 'test title', + } + raw = to_bytes(json.dumps(data)) + resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False) + resp.status = 200 + resp.reason = 'OK' + resp.headers = {'X-Circonus-API-Version': '2.00'} + return self.build_response(request, resp) - with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send: - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() - self.assertTrue(result.exception.args[0]['changed']) - self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid) + with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send: + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() + + self.assertTrue(result.exception.args[0]['changed']) + self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid) self.assertEqual(send.call_count, 1) def test_add_annotation_unicode(self): @@ -76,78 +77,80 @@ class TestCirconusAnnotation(ModuleTestCase): Note: it seems there is a bug which prevent to create an annotation with a non-ASCII category if this category already exists, in such case an Internal Server Error (500) occurs.""" - set_module_args({ + with set_module_args({ 'category': 'new catégorÿ', 'description': 'test description', 'title': 'test title', 'api_key': str(uuid.uuid4()), - }) + }): - cid = '/annotation/100000' + cid = '/annotation/100000' - def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): - data = { - '_cid': '/annotation/100000', - '_created': 1502236928, - '_last_modified': 1502236928, - '_last_modified_by': '/user/1000', - # use res['annotation']['category'].encode('latin1').decode('utf8') - 'category': u'new cat\xc3\xa9gor\xc3\xbf', - 'description': 'test description', - 'rel_metrics': [], - 'start': 1502236927, - 'stop': 1502236927, - 'title': 'test title', - } + def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): + data = { + '_cid': '/annotation/100000', + '_created': 1502236928, + '_last_modified': 1502236928, + '_last_modified_by': '/user/1000', + # use res['annotation']['category'].encode('latin1').decode('utf8') + 'category': u'new cat\xc3\xa9gor\xc3\xbf', + 'description': 'test description', + 'rel_metrics': [], + 'start': 1502236927, + 'stop': 1502236927, + 'title': 'test title', + } - raw = to_bytes(json.dumps(data), encoding='latin1') - resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False) - resp.status = 200 - resp.reason = 'OK' - resp.headers = {'X-Circonus-API-Version': '2.00'} - return self.build_response(request, resp) + raw = to_bytes(json.dumps(data), encoding='latin1') + resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False) + resp.status = 200 + resp.reason = 'OK' + resp.headers = {'X-Circonus-API-Version': '2.00'} + return self.build_response(request, resp) - with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send: - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() - self.assertTrue(result.exception.args[0]['changed']) - self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid) + with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send: + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() + + self.assertTrue(result.exception.args[0]['changed']) + self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid) self.assertEqual(send.call_count, 1) def test_auth_failure(self): """Check that an error is raised when authentication failed""" - set_module_args({ + with set_module_args({ 'category': 'test category', 'description': 'test description', 'title': 'test title', 'api_key': str(uuid.uuid4()), - }) + }): - cid = '/annotation/100000' + cid = '/annotation/100000' - def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): - data = { - '_cid': cid, - '_created': 1502146995, - '_last_modified': 1502146995, - '_last_modified_by': '/user/1000', - 'category': 'test category', - 'description': 'test description', - 'rel_metrics': [], - 'start': 1502145480, - 'stop': None, - 'title': 'test title', - } - raw = to_bytes(json.dumps(data)) - resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False) - resp.status = 403 - resp.reason = 'Forbidden' - resp.headers = {'X-Circonus-API-Version': '2.00'} - return self.build_response(request, resp) + def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): + data = { + '_cid': cid, + '_created': 1502146995, + '_last_modified': 1502146995, + '_last_modified_by': '/user/1000', + 'category': 'test category', + 'description': 'test description', + 'rel_metrics': [], + 'start': 1502145480, + 'stop': None, + 'title': 'test title', + } + raw = to_bytes(json.dumps(data)) + resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False) + resp.status = 403 + resp.reason = 'Forbidden' + resp.headers = {'X-Circonus-API-Version': '2.00'} + return self.build_response(request, resp) - with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send: - with self.assertRaises(AnsibleFailJson) as result: - self.module.main() - self.assertTrue(result.exception.args[0]['failed']) - self.assertTrue(re.match(r'\b403\b', result.exception.args[0]['reason'])) + with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send: + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() + + self.assertTrue(result.exception.args[0]['failed']) + self.assertTrue(re.match(r'\b403\b', result.exception.args[0]['reason'])) self.assertEqual(send.call_count, 1) diff --git a/tests/unit/plugins/modules/test_cpanm.py b/tests/unit/plugins/modules/test_cpanm.py index 28090455f0..a5df9158da 100644 --- a/tests/unit/plugins/modules/test_cpanm.py +++ b/tests/unit/plugins/modules/test_cpanm.py @@ -14,7 +14,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import cpanm -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(cpanm, __name__) +UTHelper.from_module(cpanm, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_cpanm.yaml b/tests/unit/plugins/modules/test_cpanm.yaml index ff4bd9c0f7..467429b10d 100644 --- a/tests/unit/plugins/modules/test_cpanm.yaml +++ b/tests/unit/plugins/modules/test_cpanm.yaml @@ -4,356 +4,400 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: install_dancer_compatibility - input: - name: Dancer - mode: compatibility - output: - changed: true - cpanm_version: "1.7047" - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: &env-def-true {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/perl, -le, 'use Dancer;'] - environ: &env-def-false {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} - rc: 2 - out: "" - err: "error, not installed" - - command: [/testbin/cpanm, Dancer] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_dancer_already_installed_compatibility - input: - name: Dancer - mode: compatibility - output: - changed: false - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/perl, -le, 'use Dancer;'] - environ: *env-def-false - rc: 0 - out: "" - err: "" -- id: install_dancer - input: - name: Dancer - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, Dancer] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_distribution_file_compatibility - input: - name: MIYAGAWA/Plack-0.99_05.tar.gz - mode: compatibility - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_distribution_file - input: - name: MIYAGAWA/Plack-0.99_05.tar.gz - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_into_locallib - input: - name: Dancer - mode: new - locallib: /srv/webapps/my_app/extlib - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, --local-lib, /srv/webapps/my_app/extlib, Dancer] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_from_local_directory - input: - from_path: /srv/webapps/my_app/src/ - mode: new - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, /srv/webapps/my_app/src/] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_into_locallib_no_unit_testing - input: - name: Dancer - notest: true - mode: new - locallib: /srv/webapps/my_app/extlib - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, --notest, --local-lib, /srv/webapps/my_app/extlib, Dancer] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_from_mirror - input: - name: Dancer - mode: new - mirror: "http://cpan.cpantesters.org/" - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, --mirror, "http://cpan.cpantesters.org/", Dancer] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_into_system_lib - input: - name: Dancer - mode: new - system_lib: true - output: - failed: true - mocks: - run_command: [] -- id: install_minversion_implicit - input: - name: Dancer - mode: new - version: "1.0" - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, Dancer~1.0] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_minversion_explicit - input: - name: Dancer - mode: new - version: "~1.5" - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, Dancer~1.5] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_specific_version - input: - name: Dancer - mode: new - version: "@1.7" - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, Dancer@1.7] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_specific_version_from_file_error - input: - name: MIYAGAWA/Plack-0.99_05.tar.gz - mode: new - version: "@1.7" - output: - failed: true - msg: parameter 'version' must not be used when installing from a file - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" -- id: install_specific_version_from_directory_error - input: - from_path: ~/ - mode: new - version: "@1.7" - output: - failed: true - msg: parameter 'version' must not be used when installing from a directory - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" -- id: install_specific_version_from_git_url_explicit - input: - name: "git://github.com/plack/Plack.git" - mode: new - version: "@1.7" - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, "git://github.com/plack/Plack.git@1.7"] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_specific_version_from_git_url_implicit - input: - name: "git://github.com/plack/Plack.git" - mode: new - version: "2.5" - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" - - command: [/testbin/cpanm, "git://github.com/plack/Plack.git@2.5"] - environ: *env-def-true - rc: 0 - out: "" - err: "" -- id: install_version_operator_from_git_url_error - input: - name: "git://github.com/plack/Plack.git" - mode: new - version: "~2.5" - output: - failed: true - msg: operator '~' not allowed in version parameter when installing from git repository - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: "" +anchors: + environ_true: &env-def-true {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + environ_false: &env-def-false {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} +test_cases: + - id: install_dancer_compatibility + input: + name: Dancer + mode: compatibility + output: + changed: true + cpanm_version: '1.7047' + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/perl, -le, use Dancer;] + environ: *env-def-false + rc: 2 + out: '' + err: error, not installed + - command: [/testbin/cpanm, Dancer] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_dancer_already_installed_compatibility + input: + name: Dancer + mode: compatibility + output: + changed: false + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/perl, -le, use Dancer;] + environ: *env-def-false + rc: 0 + out: '' + err: '' + - id: install_dancer + input: + name: Dancer + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, Dancer] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_distribution_file_compatibility + input: + name: MIYAGAWA/Plack-0.99_05.tar.gz + mode: compatibility + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_distribution_file + input: + name: MIYAGAWA/Plack-0.99_05.tar.gz + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_into_locallib + input: + name: Dancer + mode: new + locallib: /srv/webapps/my_app/extlib + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, --local-lib, /srv/webapps/my_app/extlib, Dancer] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_from_local_directory + input: + from_path: /srv/webapps/my_app/src/ + mode: new + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, /srv/webapps/my_app/src/] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_into_locallib_no_unit_testing + input: + name: Dancer + notest: true + mode: new + locallib: /srv/webapps/my_app/extlib + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, --notest, --local-lib, /srv/webapps/my_app/extlib, Dancer] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_from_mirror + input: + name: Dancer + mode: new + mirror: http://cpan.cpantesters.org/ + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, --mirror, http://cpan.cpantesters.org/, Dancer] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_into_system_lib + input: + name: Dancer + mode: new + system_lib: true + output: + failed: true + mocks: + run_command: [] + - id: install_minversion_implicit + input: + name: Dancer + mode: new + version: '1.0' + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, Dancer~1.0] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_minversion_explicit + input: + name: Dancer + mode: new + version: ~1.5 + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, Dancer~1.5] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_specific_version + input: + name: Dancer + mode: new + version: '@1.7' + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, Dancer@1.7] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_specific_version_from_file_error + input: + name: MIYAGAWA/Plack-0.99_05.tar.gz + mode: new + version: '@1.7' + output: + failed: true + msg: parameter 'version' must not be used when installing from a file + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - id: install_specific_version_from_directory_error + input: + from_path: ~/ + mode: new + version: '@1.7' + output: + failed: true + msg: parameter 'version' must not be used when installing from a directory + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - id: install_specific_version_from_git_url_explicit + input: + name: git://github.com/plack/Plack.git + mode: new + version: '@1.7' + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, git://github.com/plack/Plack.git@1.7] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_specific_version_from_git_url_implicit + input: + name: git://github.com/plack/Plack.git + mode: new + version: '2.5' + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, git://github.com/plack/Plack.git@2.5] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_version_operator_from_git_url_error + input: + name: git://github.com/plack/Plack.git + mode: new + version: ~2.5 + output: + failed: true + msg: operator '~' not allowed in version parameter when installing from git repository + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - id: install_dancer_with_recommends + input: + name: Dancer2 + install_recommendations: true + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, --with-recommends, Dancer2] + environ: *env-def-true + rc: 0 + out: '' + err: '' + - id: install_dancer_with_suggests + input: + name: Dancer2 + install_suggestions: true + output: + changed: true + mocks: + run_command: + - command: [/testbin/cpanm, --version] + environ: *env-def-true + rc: 0 + out: | + cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) + perl version 5.041005 (/usr/local/bin/perl) + err: '' + - command: [/testbin/cpanm, --with-suggests, Dancer2] + environ: *env-def-true + rc: 0 + out: '' + err: '' diff --git a/tests/unit/plugins/modules/test_datadog_downtime.py b/tests/unit/plugins/modules/test_datadog_downtime.py index e1ecbfa66f..a69e9986d5 100644 --- a/tests/unit/plugins/modules/test_datadog_downtime.py +++ b/tests/unit/plugins/modules/test_datadog_downtime.py @@ -8,8 +8,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible_collections.community.general.plugins.modules import datadog_downtime -from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import MagicMock, patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args ) @@ -33,12 +33,12 @@ class TestDatadogDowntime(ModuleTestCase): def test_without_required_parameters(self): """Failure must occurs when all parameters are missing""" with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_create_downtime_when_no_id(self, downtimes_api_mock): - set_module_args({ + with set_module_args({ "monitor_tags": ["foo:bar"], "scope": ["*"], "monitor_id": 12345, @@ -49,32 +49,32 @@ class TestDatadogDowntime(ModuleTestCase): "rrule": "rrule", "api_key": "an_api_key", "app_key": "an_app_key", - }) + }): + downtime = Downtime() + downtime.monitor_tags = ["foo:bar"] + downtime.scope = ["*"] + downtime.monitor_id = 12345 + downtime.message = "Message" + downtime.start = 1111 + downtime.end = 2222 + downtime.timezone = "UTC" + downtime.recurrence = DowntimeRecurrence( + rrule="rrule", + type="rrule" + ) - downtime = Downtime() - downtime.monitor_tags = ["foo:bar"] - downtime.scope = ["*"] - downtime.monitor_id = 12345 - downtime.message = "Message" - downtime.start = 1111 - downtime.end = 2222 - downtime.timezone = "UTC" - downtime.recurrence = DowntimeRecurrence( - rrule="rrule", - type="rrule" - ) + create_downtime_mock = MagicMock(return_value=self.__downtime_with_id(12345)) + downtimes_api_mock.return_value = MagicMock(create_downtime=create_downtime_mock) + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() - create_downtime_mock = MagicMock(return_value=self.__downtime_with_id(12345)) - downtimes_api_mock.return_value = MagicMock(create_downtime=create_downtime_mock) - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() self.assertTrue(result.exception.args[0]['changed']) self.assertEqual(result.exception.args[0]['downtime']['id'], 12345) create_downtime_mock.assert_called_once_with(downtime) @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_create_downtime_when_id_and_disabled(self, downtimes_api_mock): - set_module_args({ + with set_module_args({ "id": 1212, "monitor_tags": ["foo:bar"], "scope": ["*"], @@ -86,32 +86,32 @@ class TestDatadogDowntime(ModuleTestCase): "rrule": "rrule", "api_key": "an_api_key", "app_key": "an_app_key", - }) + }): + downtime = Downtime() + downtime.monitor_tags = ["foo:bar"] + downtime.scope = ["*"] + downtime.monitor_id = 12345 + downtime.message = "Message" + downtime.start = 1111 + downtime.end = 2222 + downtime.timezone = "UTC" + downtime.recurrence = DowntimeRecurrence( + rrule="rrule", + type="rrule" + ) - downtime = Downtime() - downtime.monitor_tags = ["foo:bar"] - downtime.scope = ["*"] - downtime.monitor_id = 12345 - downtime.message = "Message" - downtime.start = 1111 - downtime.end = 2222 - downtime.timezone = "UTC" - downtime.recurrence = DowntimeRecurrence( - rrule="rrule", - type="rrule" - ) + disabled_downtime = Downtime() + disabled_downtime.disabled = True + disabled_downtime.id = 1212 - disabled_downtime = Downtime() - disabled_downtime.disabled = True - disabled_downtime.id = 1212 + create_downtime_mock = MagicMock(return_value=self.__downtime_with_id(12345)) + get_downtime_mock = MagicMock(return_value=disabled_downtime) + downtimes_api_mock.return_value = MagicMock( + create_downtime=create_downtime_mock, get_downtime=get_downtime_mock + ) + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() - create_downtime_mock = MagicMock(return_value=self.__downtime_with_id(12345)) - get_downtime_mock = MagicMock(return_value=disabled_downtime) - downtimes_api_mock.return_value = MagicMock( - create_downtime=create_downtime_mock, get_downtime=get_downtime_mock - ) - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() self.assertTrue(result.exception.args[0]['changed']) self.assertEqual(result.exception.args[0]['downtime']['id'], 12345) create_downtime_mock.assert_called_once_with(downtime) @@ -119,7 +119,7 @@ class TestDatadogDowntime(ModuleTestCase): @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_update_downtime_when_not_disabled(self, downtimes_api_mock): - set_module_args({ + with set_module_args({ "id": 1212, "monitor_tags": ["foo:bar"], "scope": ["*"], @@ -131,32 +131,32 @@ class TestDatadogDowntime(ModuleTestCase): "rrule": "rrule", "api_key": "an_api_key", "app_key": "an_app_key", - }) + }): + downtime = Downtime() + downtime.monitor_tags = ["foo:bar"] + downtime.scope = ["*"] + downtime.monitor_id = 12345 + downtime.message = "Message" + downtime.start = 1111 + downtime.end = 2222 + downtime.timezone = "UTC" + downtime.recurrence = DowntimeRecurrence( + rrule="rrule", + type="rrule" + ) - downtime = Downtime() - downtime.monitor_tags = ["foo:bar"] - downtime.scope = ["*"] - downtime.monitor_id = 12345 - downtime.message = "Message" - downtime.start = 1111 - downtime.end = 2222 - downtime.timezone = "UTC" - downtime.recurrence = DowntimeRecurrence( - rrule="rrule", - type="rrule" - ) + enabled_downtime = Downtime() + enabled_downtime.disabled = False + enabled_downtime.id = 1212 - enabled_downtime = Downtime() - enabled_downtime.disabled = False - enabled_downtime.id = 1212 + update_downtime_mock = MagicMock(return_value=self.__downtime_with_id(1212)) + get_downtime_mock = MagicMock(return_value=enabled_downtime) + downtimes_api_mock.return_value = MagicMock( + update_downtime=update_downtime_mock, get_downtime=get_downtime_mock + ) + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() - update_downtime_mock = MagicMock(return_value=self.__downtime_with_id(1212)) - get_downtime_mock = MagicMock(return_value=enabled_downtime) - downtimes_api_mock.return_value = MagicMock( - update_downtime=update_downtime_mock, get_downtime=get_downtime_mock - ) - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() self.assertTrue(result.exception.args[0]['changed']) self.assertEqual(result.exception.args[0]['downtime']['id'], 1212) update_downtime_mock.assert_called_once_with(1212, downtime) @@ -164,7 +164,7 @@ class TestDatadogDowntime(ModuleTestCase): @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_update_downtime_no_change(self, downtimes_api_mock): - set_module_args({ + with set_module_args({ "id": 1212, "monitor_tags": ["foo:bar"], "scope": ["*"], @@ -176,42 +176,42 @@ class TestDatadogDowntime(ModuleTestCase): "rrule": "rrule", "api_key": "an_api_key", "app_key": "an_app_key", - }) + }): + downtime = Downtime() + downtime.monitor_tags = ["foo:bar"] + downtime.scope = ["*"] + downtime.monitor_id = 12345 + downtime.message = "Message" + downtime.start = 1111 + downtime.end = 2222 + downtime.timezone = "UTC" + downtime.recurrence = DowntimeRecurrence( + rrule="rrule", + type="rrule" + ) - downtime = Downtime() - downtime.monitor_tags = ["foo:bar"] - downtime.scope = ["*"] - downtime.monitor_id = 12345 - downtime.message = "Message" - downtime.start = 1111 - downtime.end = 2222 - downtime.timezone = "UTC" - downtime.recurrence = DowntimeRecurrence( - rrule="rrule", - type="rrule" - ) + downtime_get = Downtime() + downtime_get.id = 1212 + downtime_get.disabled = False + downtime_get.monitor_tags = ["foo:bar"] + downtime_get.scope = ["*"] + downtime_get.monitor_id = 12345 + downtime_get.message = "Message" + downtime_get.start = 1111 + downtime_get.end = 2222 + downtime_get.timezone = "UTC" + downtime_get.recurrence = DowntimeRecurrence( + rrule="rrule" + ) - downtime_get = Downtime() - downtime_get.id = 1212 - downtime_get.disabled = False - downtime_get.monitor_tags = ["foo:bar"] - downtime_get.scope = ["*"] - downtime_get.monitor_id = 12345 - downtime_get.message = "Message" - downtime_get.start = 1111 - downtime_get.end = 2222 - downtime_get.timezone = "UTC" - downtime_get.recurrence = DowntimeRecurrence( - rrule="rrule" - ) + update_downtime_mock = MagicMock(return_value=downtime_get) + get_downtime_mock = MagicMock(return_value=downtime_get) + downtimes_api_mock.return_value = MagicMock( + update_downtime=update_downtime_mock, get_downtime=get_downtime_mock + ) + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() - update_downtime_mock = MagicMock(return_value=downtime_get) - get_downtime_mock = MagicMock(return_value=downtime_get) - downtimes_api_mock.return_value = MagicMock( - update_downtime=update_downtime_mock, get_downtime=get_downtime_mock - ) - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() self.assertFalse(result.exception.args[0]['changed']) self.assertEqual(result.exception.args[0]['downtime']['id'], 1212) update_downtime_mock.assert_called_once_with(1212, downtime) @@ -219,20 +219,20 @@ class TestDatadogDowntime(ModuleTestCase): @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_delete_downtime(self, downtimes_api_mock): - set_module_args({ + with set_module_args({ "id": 1212, "state": "absent", "api_key": "an_api_key", "app_key": "an_app_key", - }) + }): + cancel_downtime_mock = MagicMock() + downtimes_api_mock.return_value = MagicMock( + get_downtime=self.__downtime_with_id, + cancel_downtime=cancel_downtime_mock + ) + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() - cancel_downtime_mock = MagicMock() - downtimes_api_mock.return_value = MagicMock( - get_downtime=self.__downtime_with_id, - cancel_downtime=cancel_downtime_mock - ) - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() self.assertTrue(result.exception.args[0]['changed']) cancel_downtime_mock.assert_called_once_with(1212) diff --git a/tests/unit/plugins/modules/test_discord.py b/tests/unit/plugins/modules/test_discord.py index 83069d279d..e8ee8f78cd 100644 --- a/tests/unit/plugins/modules/test_discord.py +++ b/tests/unit/plugins/modules/test_discord.py @@ -7,9 +7,9 @@ __metaclass__ = type import json import pytest -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible_collections.community.general.plugins.modules import discord -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args class TestDiscordModule(ModuleTestCase): @@ -28,30 +28,30 @@ class TestDiscordModule(ModuleTestCase): def test_without_parameters(self): """Failure if no parameters set""" with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() def test_without_content(self): """Failure if content and embeds both are missing""" - set_module_args({ + with set_module_args({ 'webhook_id': 'xxx', 'webhook_token': 'xxx' - }) - with self.assertRaises(AnsibleFailJson): - self.module.main() + }): + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_successful_message(self): """Test a basic message successfully.""" - set_module_args({ + with set_module_args({ 'webhook_id': 'xxx', 'webhook_token': 'xxx', 'content': 'test' - }) + }): - with patch.object(discord, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'}) - with self.assertRaises(AnsibleExitJson): - self.module.main() + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'}) + with self.assertRaises(AnsibleExitJson): + self.module.main() self.assertTrue(fetch_url_mock.call_count, 1) call_data = json.loads(fetch_url_mock.call_args[1]['data']) @@ -59,17 +59,17 @@ class TestDiscordModule(ModuleTestCase): def test_message_with_username(self): """Test a message with username set successfully.""" - set_module_args({ + with set_module_args({ 'webhook_id': 'xxx', 'webhook_token': 'xxx', 'content': 'test', 'username': 'Ansible Bot' - }) + }): - with patch.object(discord, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'}) - with self.assertRaises(AnsibleExitJson): - self.module.main() + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'}) + with self.assertRaises(AnsibleExitJson): + self.module.main() self.assertTrue(fetch_url_mock.call_count, 1) call_data = json.loads(fetch_url_mock.call_args[1]['data']) @@ -79,27 +79,30 @@ class TestDiscordModule(ModuleTestCase): def test_failed_message(self): """Test failure because webhook id is wrong.""" - set_module_args({ + with set_module_args({ 'webhook_id': 'wrong', 'webhook_token': 'xxx', 'content': 'test' - }) + }): - with patch.object(discord, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'HTTP Error 404: Not Found', 'body': '{"message": "Unknown Webhook", "code": 10015}'}) - with self.assertRaises(AnsibleFailJson): - self.module.main() + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = ( + None, + {"status": 404, 'msg': 'HTTP Error 404: Not Found', 'body': '{"message": "Unknown Webhook", "code": 10015}'}, + ) + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_failed_message_without_body(self): """Test failure with empty response body.""" - set_module_args({ + with set_module_args({ 'webhook_id': 'wrong', 'webhook_token': 'xxx', 'content': 'test' - }) + }): - with patch.object(discord, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'HTTP Error 404: Not Found'}) - with self.assertRaises(AnsibleFailJson): - self.module.main() + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'HTTP Error 404: Not Found'}) + with self.assertRaises(AnsibleFailJson): + self.module.main() diff --git a/tests/unit/plugins/modules/test_django_check.py b/tests/unit/plugins/modules/test_django_check.py index 52210bdb76..1562dbe8ea 100644 --- a/tests/unit/plugins/modules/test_django_check.py +++ b/tests/unit/plugins/modules/test_django_check.py @@ -7,7 +7,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import django_check -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(django_check, __name__) +UTHelper.from_module(django_check, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_django_check.yaml b/tests/unit/plugins/modules/test_django_check.yaml index 74374c01c9..fb39b6d62f 100644 --- a/tests/unit/plugins/modules/test_django_check.yaml +++ b/tests/unit/plugins/modules/test_django_check.yaml @@ -4,40 +4,43 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: success - input: - settings: whatever.settings - output: - version: "5.1.2" - mocks: - run_command: - - command: [/testbin/python, -m, django, --version] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} - rc: 0 - out: "5.1.2\n" - err: "" - - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings] - environ: *env-def - rc: 0 - out: "whatever\n" - err: "" -- id: multiple_databases - input: - settings: whatever.settings - database: - - abc - - def - output: - version: "5.1.2" - mocks: - run_command: - - command: [/testbin/python, -m, django, --version] - environ: *env-def - rc: 0 - out: "5.1.2\n" - err: "" - - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, --database, abc, --database, def] - environ: *env-def - rc: 0 - out: "whatever\n" - err: "" +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} +test_cases: + - id: success + input: + settings: whatever.settings + output: + version: 5.1.2 + mocks: + run_command: + - command: [/testbin/python, -m, django, --version] + environ: *env-def + rc: 0 + out: "5.1.2\n" + err: '' + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings] + environ: *env-def + rc: 0 + out: "whatever\n" + err: '' + - id: multiple_databases + input: + settings: whatever.settings + database: + - abc + - def + output: + version: 5.1.2 + mocks: + run_command: + - command: [/testbin/python, -m, django, --version] + environ: *env-def + rc: 0 + out: "5.1.2\n" + err: '' + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, --database, abc, --database, def] + environ: *env-def + rc: 0 + out: "whatever\n" + err: '' diff --git a/tests/unit/plugins/modules/test_django_command.py b/tests/unit/plugins/modules/test_django_command.py index 8be910fd27..3be2e475d8 100644 --- a/tests/unit/plugins/modules/test_django_command.py +++ b/tests/unit/plugins/modules/test_django_command.py @@ -7,7 +7,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import django_command -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(django_command, __name__) +UTHelper.from_module(django_command, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_django_command.yaml b/tests/unit/plugins/modules/test_django_command.yaml index 960dc1a24f..10da8753bd 100644 --- a/tests/unit/plugins/modules/test_django_command.yaml +++ b/tests/unit/plugins/modules/test_django_command.yaml @@ -4,47 +4,50 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: command_success - input: - command: check - extra_args: - - babaloo - - yaba - - daba - - doo - settings: whatever.settings - mocks: - run_command: - - command: [/testbin/python, -m, django, --version] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} - rc: 0 - out: "5.1.2\n" - err: "" - - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] - environ: *env-def - rc: 0 - out: "whatever\n" - err: "" -- id: command_fail - input: - command: check - extra_args: - - babaloo - - yaba - - daba - - doo - settings: whatever.settings - output: - failed: true - mocks: - run_command: - - command: [/testbin/python, -m, django, --version] - environ: *env-def - rc: 0 - out: "5.1.2\n" - err: "" - - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] - environ: *env-def - rc: 1 - out: "whatever\n" - err: "" +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} +test_cases: + - id: command_success + input: + command: check + extra_args: + - babaloo + - yaba + - daba + - doo + settings: whatever.settings + mocks: + run_command: + - command: [/testbin/python, -m, django, --version] + environ: *env-def + rc: 0 + out: "5.1.2\n" + err: '' + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] + environ: *env-def + rc: 0 + out: "whatever\n" + err: '' + - id: command_fail + input: + command: check + extra_args: + - babaloo + - yaba + - daba + - doo + settings: whatever.settings + output: + failed: true + mocks: + run_command: + - command: [/testbin/python, -m, django, --version] + environ: *env-def + rc: 0 + out: "5.1.2\n" + err: '' + - command: [/testbin/python, -m, django, check, --no-color, --settings=whatever.settings, babaloo, yaba, daba, doo] + environ: *env-def + rc: 1 + out: "whatever\n" + err: '' diff --git a/tests/unit/plugins/modules/test_django_createcachetable.py b/tests/unit/plugins/modules/test_django_createcachetable.py index 74bdf1cc63..3ed574e949 100644 --- a/tests/unit/plugins/modules/test_django_createcachetable.py +++ b/tests/unit/plugins/modules/test_django_createcachetable.py @@ -7,7 +7,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import django_createcachetable -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(django_createcachetable, __name__) +UTHelper.from_module(django_createcachetable, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_django_createcachetable.yaml b/tests/unit/plugins/modules/test_django_createcachetable.yaml index a58146144a..b8056e1b2e 100644 --- a/tests/unit/plugins/modules/test_django_createcachetable.yaml +++ b/tests/unit/plugins/modules/test_django_createcachetable.yaml @@ -4,18 +4,21 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: command_success - input: - settings: whatever.settings - mocks: - run_command: - - command: [/testbin/python, -m, django, --version] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} - rc: 0 - out: "5.1.2\n" - err: "" - - command: [/testbin/python, -m, django, createcachetable, --no-color, --settings=whatever.settings, --noinput, --database=default] - environ: *env-def - rc: 0 - out: "whatever\n" - err: "" +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} +test_cases: + - id: command_success + input: + settings: whatever.settings + mocks: + run_command: + - command: [/testbin/python, -m, django, --version] + environ: *env-def + rc: 0 + out: "5.1.2\n" + err: '' + - command: [/testbin/python, -m, django, createcachetable, --no-color, --settings=whatever.settings, --noinput, --database=default] + environ: *env-def + rc: 0 + out: "whatever\n" + err: '' diff --git a/tests/unit/plugins/modules/test_dnf_config_manager.py b/tests/unit/plugins/modules/test_dnf_config_manager.py index 7b231e10a5..9c0c0215a1 100644 --- a/tests/unit/plugins/modules/test_dnf_config_manager.py +++ b/tests/unit/plugins/modules/test_dnf_config_manager.py @@ -7,9 +7,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import patch, call +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, call from ansible_collections.community.general.plugins.modules import dnf_config_manager as dnf_config_manager_module -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, \ +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, \ ModuleTestCase, set_module_args # Return value on all-default arguments @@ -305,22 +305,22 @@ class TestDNFConfigManager(ModuleTestCase): return result def test_get_repo_states(self): - set_module_args({}) - self.set_command_mock(execute_return=(0, mock_repolist_crb_enabled, '')) - result = self.execute_module(changed=False) + with set_module_args({}): + self.set_command_mock(execute_return=(0, mock_repolist_crb_enabled, '')) + result = self.execute_module(changed=False) self.assertEqual(result['repo_states_pre'], expected_repo_states_crb_enabled) self.assertEqual(result['repo_states_post'], expected_repo_states_crb_enabled) self.assertEqual(result['changed_repos'], []) self.run_command.assert_has_calls(calls=[call_get_repo_states, call_get_repo_states], any_order=False) def test_enable_disabled_repo(self): - set_module_args({ + with set_module_args({ 'name': ['crb'], 'state': 'enabled' - }) - side_effects = [(0, mock_repolist_crb_disabled, ''), (0, '', ''), (0, mock_repolist_crb_enabled, '')] - self.set_command_mock(execute_side_effect=side_effects) - result = self.execute_module(changed=True) + }): + side_effects = [(0, mock_repolist_crb_disabled, ''), (0, '', ''), (0, mock_repolist_crb_enabled, '')] + self.set_command_mock(execute_side_effect=side_effects) + result = self.execute_module(changed=True) self.assertEqual(result['repo_states_pre'], expected_repo_states_crb_disabled) self.assertEqual(result['repo_states_post'], expected_repo_states_crb_enabled) self.assertEqual(result['changed_repos'], ['crb']) @@ -328,25 +328,25 @@ class TestDNFConfigManager(ModuleTestCase): self.run_command.assert_has_calls(calls=expected_calls, any_order=False) def test_enable_disabled_repo_check_mode(self): - set_module_args({ + with set_module_args({ 'name': ['crb'], 'state': 'enabled', '_ansible_check_mode': True - }) - side_effects = [(0, mock_repolist_crb_disabled, ''), (0, mock_repolist_crb_disabled, '')] - self.set_command_mock(execute_side_effect=side_effects) - result = self.execute_module(changed=True) + }): + side_effects = [(0, mock_repolist_crb_disabled, ''), (0, mock_repolist_crb_disabled, '')] + self.set_command_mock(execute_side_effect=side_effects) + result = self.execute_module(changed=True) self.assertEqual(result['changed_repos'], ['crb']) self.run_command.assert_has_calls(calls=[call_get_repo_states], any_order=False) def test_disable_enabled_repo(self): - set_module_args({ + with set_module_args({ 'name': ['crb'], 'state': 'disabled' - }) - side_effects = [(0, mock_repolist_crb_enabled, ''), (0, '', ''), (0, mock_repolist_crb_disabled, '')] - self.set_command_mock(execute_side_effect=side_effects) - result = self.execute_module(changed=True) + }): + side_effects = [(0, mock_repolist_crb_enabled, ''), (0, '', ''), (0, mock_repolist_crb_disabled, '')] + self.set_command_mock(execute_side_effect=side_effects) + result = self.execute_module(changed=True) self.assertEqual(result['repo_states_pre'], expected_repo_states_crb_enabled) self.assertEqual(result['repo_states_post'], expected_repo_states_crb_disabled) self.assertEqual(result['changed_repos'], ['crb']) @@ -354,49 +354,49 @@ class TestDNFConfigManager(ModuleTestCase): self.run_command.assert_has_calls(calls=expected_calls, any_order=False) def test_crb_already_enabled(self): - set_module_args({ + with set_module_args({ 'name': ['crb'], 'state': 'enabled' - }) - side_effects = [(0, mock_repolist_crb_enabled, ''), (0, mock_repolist_crb_enabled, '')] - self.set_command_mock(execute_side_effect=side_effects) - result = self.execute_module(changed=False) + }): + side_effects = [(0, mock_repolist_crb_enabled, ''), (0, mock_repolist_crb_enabled, '')] + self.set_command_mock(execute_side_effect=side_effects) + result = self.execute_module(changed=False) self.assertEqual(result['repo_states_pre'], expected_repo_states_crb_enabled) self.assertEqual(result['repo_states_post'], expected_repo_states_crb_enabled) self.assertEqual(result['changed_repos'], []) self.run_command.assert_has_calls(calls=[call_get_repo_states, call_get_repo_states], any_order=False) def test_get_repo_states_fail_no_status(self): - set_module_args({}) - self.set_command_mock(execute_return=(0, mock_repolist_no_status, '')) - result = self.execute_module(failed=True) + with set_module_args({}): + self.set_command_mock(execute_return=(0, mock_repolist_no_status, '')) + result = self.execute_module(failed=True) self.assertEqual(result['msg'], 'dnf repolist parse failure: parsed another repo id before next status') self.run_command.assert_has_calls(calls=[call_get_repo_states], any_order=False) def test_get_repo_states_fail_status_before_id(self): - set_module_args({}) - self.set_command_mock(execute_return=(0, mock_repolist_status_before_id, '')) - result = self.execute_module(failed=True) + with set_module_args({}): + self.set_command_mock(execute_return=(0, mock_repolist_status_before_id, '')) + result = self.execute_module(failed=True) self.assertEqual(result['msg'], 'dnf repolist parse failure: parsed status before repo id') self.run_command.assert_has_calls(calls=[call_get_repo_states], any_order=False) def test_failed__unknown_repo_id(self): - set_module_args({ + with set_module_args({ 'name': ['fake'] - }) - self.set_command_mock(execute_return=(0, mock_repolist_crb_disabled, '')) - result = self.execute_module(failed=True) + }): + self.set_command_mock(execute_return=(0, mock_repolist_crb_disabled, '')) + result = self.execute_module(failed=True) self.assertEqual(result['msg'], "did not find repo with ID 'fake' in dnf repolist --all --verbose") self.run_command.assert_has_calls(calls=[call_get_repo_states], any_order=False) def test_failed_state_change_ineffective(self): - set_module_args({ + with set_module_args({ 'name': ['crb'], 'state': 'enabled' - }) - side_effects = [(0, mock_repolist_crb_disabled, ''), (0, '', ''), (0, mock_repolist_crb_disabled, '')] - self.set_command_mock(execute_side_effect=side_effects) - result = self.execute_module(failed=True) + }): + side_effects = [(0, mock_repolist_crb_disabled, ''), (0, '', ''), (0, mock_repolist_crb_disabled, '')] + self.set_command_mock(execute_side_effect=side_effects) + result = self.execute_module(failed=True) self.assertEqual(result['msg'], "dnf config-manager failed to make 'crb' enabled") expected_calls = [call_get_repo_states, call_enable_crb, call_get_repo_states] self.run_command.assert_has_calls(calls=expected_calls, any_order=False) diff --git a/tests/unit/plugins/modules/test_dnsimple.py b/tests/unit/plugins/modules/test_dnsimple.py index d5578252dc..e67a5abfd4 100644 --- a/tests/unit/plugins/modules/test_dnsimple.py +++ b/tests/unit/plugins/modules/test_dnsimple.py @@ -9,8 +9,8 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible_collections.community.general.plugins.modules import dnsimple as dnsimple_module -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch import pytest import sys @@ -38,8 +38,8 @@ class TestDNSimple(ModuleTestCase): def test_without_required_parameters(self): """Failure must occurs when all parameters are missing""" with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() @patch('dnsimple.service.Identity.whoami') def test_account_token(self, mock_whoami): diff --git a/tests/unit/plugins/modules/test_dnsimple_info.py b/tests/unit/plugins/modules/test_dnsimple_info.py index 08c5296c81..b97039b9bd 100644 --- a/tests/unit/plugins/modules/test_dnsimple_info.py +++ b/tests/unit/plugins/modules/test_dnsimple_info.py @@ -9,7 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible_collections.community.general.plugins.modules import dnsimple_info -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args, AnsibleExitJson +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args, AnsibleExitJson from httmock import response from httmock import with_httmock from httmock import urlmatch @@ -56,19 +56,19 @@ class TestDNSimple_Info(ModuleTestCase): def test_with_no_parameters(self): """Failure must occurs when all parameters are missing""" with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() @with_httmock(zones_resp) def test_only_key_and_account(self): """key and account will pass, returns domains""" account_id = "1234" with self.assertRaises(AnsibleExitJson) as exc_info: - set_module_args({ + with set_module_args({ "api_key": "abcd1324", "account_id": account_id - }) - self.module.main() + }): + self.module.main() result = exc_info.exception.args[0] # nothing should change self.assertFalse(result['changed']) @@ -80,12 +80,12 @@ class TestDNSimple_Info(ModuleTestCase): """name and no record should not fail, returns the record""" name = "example.com" with self.assertRaises(AnsibleExitJson) as exc_info: - set_module_args({ + with set_module_args({ "api_key": "abcd1324", "name": "example.com", "account_id": "1234" - }) - self.module.main() + }): + self.module.main() result = exc_info.exception.args[0] # nothing should change self.assertFalse(result['changed']) @@ -97,13 +97,13 @@ class TestDNSimple_Info(ModuleTestCase): """name and record should not fail, returns the record""" record = "example" with self.assertRaises(AnsibleExitJson) as exc_info: - set_module_args({ + with set_module_args({ "api_key": "abcd1324", "account_id": "1234", "name": "example.com", "record": "example" - }) - self.module.main() + }): + self.module.main() result = exc_info.exception.args[0] # nothing should change self.assertFalse(result['changed']) diff --git a/tests/unit/plugins/modules/test_facter_facts.py b/tests/unit/plugins/modules/test_facter_facts.py index bb74216b88..7b2b94f08b 100644 --- a/tests/unit/plugins/modules/test_facter_facts.py +++ b/tests/unit/plugins/modules/test_facter_facts.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import facter_facts -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(facter_facts, __name__) +UTHelper.from_module(facter_facts, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_facter_facts.yaml b/tests/unit/plugins/modules/test_facter_facts.yaml index e53f7fe60f..89a98714c5 100644 --- a/tests/unit/plugins/modules/test_facter_facts.yaml +++ b/tests/unit/plugins/modules/test_facter_facts.yaml @@ -4,39 +4,42 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: simple run - output: - ansible_facts: - facter: - a: 1 - b: 2 - c: 3 - mocks: - run_command: - - command: [/testbin/facter, --json] - environ: &env-def {check_rc: true} - rc: 0 - out: > - { "a": 1, "b": 2, "c": 3 } - err: "" -- id: with args - input: - arguments: - - -p - - system_uptime - - timezone - - is_virtual - output: - ansible_facts: - facter: - a: 1 - b: 2 - c: 3 - mocks: - run_command: - - command: [/testbin/facter, --json, -p, system_uptime, timezone, is_virtual] - environ: *env-def - rc: 0 - out: > - { "a": 1, "b": 2, "c": 3 } - err: "" +anchors: + environ: &env-def {check_rc: true} +test_cases: + - id: simple run + output: + ansible_facts: + facter: + a: 1 + b: 2 + c: 3 + mocks: + run_command: + - command: [/testbin/facter, --json] + environ: *env-def + rc: 0 + out: > + { "a": 1, "b": 2, "c": 3 } + err: '' + - id: with args + input: + arguments: + - -p + - system_uptime + - timezone + - is_virtual + output: + ansible_facts: + facter: + a: 1 + b: 2 + c: 3 + mocks: + run_command: + - command: [/testbin/facter, --json, -p, system_uptime, timezone, is_virtual] + environ: *env-def + rc: 0 + out: > + { "a": 1, "b": 2, "c": 3 } + err: '' diff --git a/tests/unit/plugins/modules/test_gconftool2.py b/tests/unit/plugins/modules/test_gconftool2.py index 2ba2e1c70e..ed140bd3e7 100644 --- a/tests/unit/plugins/modules/test_gconftool2.py +++ b/tests/unit/plugins/modules/test_gconftool2.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import gconftool2 -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(gconftool2, __name__) +UTHelper.from_module(gconftool2, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_gconftool2.yaml b/tests/unit/plugins/modules/test_gconftool2.yaml index badbdf2614..19d389247f 100644 --- a/tests/unit/plugins/modules/test_gconftool2.yaml +++ b/tests/unit/plugins/modules/test_gconftool2.yaml @@ -4,147 +4,150 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: test_simple_element_set - input: - state: present - key: /desktop/gnome/background/picture_filename - value: 200 - value_type: int - output: - new_value: '200' - changed: true - version: "3.2.6" - mocks: - run_command: - - command: [/testbin/gconftool-2, --version] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} - rc: 0 - out: "3.2.6\n" - err: "" - - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] - environ: *env-def - rc: 0 - out: "100\n" - err: "" - - command: [/testbin/gconftool-2, --type, int, --set, /desktop/gnome/background/picture_filename, "200"] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] - environ: *env-def - rc: 0 - out: "200\n" - err: "" -- id: test_simple_element_set_idempotency_int - input: - state: present - key: /desktop/gnome/background/picture_filename - value: 200 - value_type: int - output: - new_value: '200' - changed: false - version: "3.2.5" - mocks: - run_command: - - command: [/testbin/gconftool-2, --version] - environ: *env-def - rc: 0 - out: "3.2.5\n" - err: "" - - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] - environ: *env-def - rc: 0 - out: "200\n" - err: "" - - command: [/testbin/gconftool-2, --type, int, --set, /desktop/gnome/background/picture_filename, "200"] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] - environ: *env-def - rc: 0 - out: "200\n" - err: "" -- id: test_simple_element_set_idempotency_bool - input: - state: present - key: /apps/gnome_settings_daemon/screensaver/start_screensaver - value: false - value_type: bool - output: - new_value: 'false' - changed: false - version: "3.2.4" - mocks: - run_command: - - command: [/testbin/gconftool-2, --version] - environ: *env-def - rc: 0 - out: "3.2.4\n" - err: "" - - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] - environ: *env-def - rc: 0 - out: "false\n" - err: "" - - command: [/testbin/gconftool-2, --type, bool, --set, /apps/gnome_settings_daemon/screensaver/start_screensaver, "False"] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] - environ: *env-def - rc: 0 - out: "false\n" - err: "" -- id: test_simple_element_unset - input: - state: absent - key: /desktop/gnome/background/picture_filename - output: - new_value: - changed: true - mocks: - run_command: - - command: [/testbin/gconftool-2, --version] - environ: *env-def - rc: 0 - out: "3.2.4\n" - err: "" - - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] - environ: *env-def - rc: 0 - out: "200\n" - err: "" - - command: [/testbin/gconftool-2, --unset, /desktop/gnome/background/picture_filename] - environ: *env-def - rc: 0 - out: "" - err: "" -- id: test_simple_element_unset_idempotency - input: - state: absent - key: /apps/gnome_settings_daemon/screensaver/start_screensaver - output: - new_value: - changed: false - mocks: - run_command: - - command: [/testbin/gconftool-2, --version] - environ: *env-def - rc: 0 - out: "3.2.4\n" - err: "" - - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/gconftool-2, --unset, /apps/gnome_settings_daemon/screensaver/start_screensaver] - environ: *env-def - rc: 0 - out: "" - err: "" +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} +test_cases: + - id: test_simple_element_set + input: + state: present + key: /desktop/gnome/background/picture_filename + value: 200 + value_type: int + output: + new_value: '200' + changed: true + version: 3.2.6 + mocks: + run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.6\n" + err: '' + - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] + environ: *env-def + rc: 0 + out: "100\n" + err: '' + - command: [/testbin/gconftool-2, --type, int, --set, /desktop/gnome/background/picture_filename, '200'] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] + environ: *env-def + rc: 0 + out: "200\n" + err: '' + - id: test_simple_element_set_idempotency_int + input: + state: present + key: /desktop/gnome/background/picture_filename + value: 200 + value_type: int + output: + new_value: '200' + changed: false + version: 3.2.5 + mocks: + run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.5\n" + err: '' + - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] + environ: *env-def + rc: 0 + out: "200\n" + err: '' + - command: [/testbin/gconftool-2, --type, int, --set, /desktop/gnome/background/picture_filename, '200'] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] + environ: *env-def + rc: 0 + out: "200\n" + err: '' + - id: test_simple_element_set_idempotency_bool + input: + state: present + key: /apps/gnome_settings_daemon/screensaver/start_screensaver + value: false + value_type: bool + output: + new_value: 'false' + changed: false + version: 3.2.4 + mocks: + run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.4\n" + err: '' + - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] + environ: *env-def + rc: 0 + out: "false\n" + err: '' + - command: [/testbin/gconftool-2, --type, bool, --set, /apps/gnome_settings_daemon/screensaver/start_screensaver, 'False'] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] + environ: *env-def + rc: 0 + out: "false\n" + err: '' + - id: test_simple_element_unset + input: + state: absent + key: /desktop/gnome/background/picture_filename + output: + new_value: + changed: true + mocks: + run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.4\n" + err: '' + - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] + environ: *env-def + rc: 0 + out: "200\n" + err: '' + - command: [/testbin/gconftool-2, --unset, /desktop/gnome/background/picture_filename] + environ: *env-def + rc: 0 + out: '' + err: '' + - id: test_simple_element_unset_idempotency + input: + state: absent + key: /apps/gnome_settings_daemon/screensaver/start_screensaver + output: + new_value: + changed: false + mocks: + run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.4\n" + err: '' + - command: [/testbin/gconftool-2, --get, /apps/gnome_settings_daemon/screensaver/start_screensaver] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/gconftool-2, --unset, /apps/gnome_settings_daemon/screensaver/start_screensaver] + environ: *env-def + rc: 0 + out: '' + err: '' diff --git a/tests/unit/plugins/modules/test_gconftool2_info.py b/tests/unit/plugins/modules/test_gconftool2_info.py index 4daa655714..6cbba8dbc0 100644 --- a/tests/unit/plugins/modules/test_gconftool2_info.py +++ b/tests/unit/plugins/modules/test_gconftool2_info.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import gconftool2_info -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(gconftool2_info, __name__) +UTHelper.from_module(gconftool2_info, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_gconftool2_info.yaml b/tests/unit/plugins/modules/test_gconftool2_info.yaml index fac04430a0..141b473e35 100644 --- a/tests/unit/plugins/modules/test_gconftool2_info.yaml +++ b/tests/unit/plugins/modules/test_gconftool2_info.yaml @@ -4,37 +4,40 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: test_simple_element_get - input: - key: /desktop/gnome/background/picture_filename - output: - value: '100' - mocks: - run_command: - - command: [/testbin/gconftool-2, --version] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} - rc: 0 - out: "3.2.6\n" - err: "" - - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] - environ: *env-def - rc: 0 - out: "100\n" - err: "" -- id: test_simple_element_get_not_found - input: - key: /desktop/gnome/background/picture_filename - output: - value: - mocks: - run_command: - - command: [/testbin/gconftool-2, --version] - environ: *env-def - rc: 0 - out: "3.2.6\n" - err: "" - - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] - environ: *env-def - rc: 0 - out: "" - err: "No value set for `/desktop/gnome/background/picture_filename'\n" +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} +test_cases: + - id: test_simple_element_get + input: + key: /desktop/gnome/background/picture_filename + output: + value: '100' + mocks: + run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.6\n" + err: '' + - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] + environ: *env-def + rc: 0 + out: "100\n" + err: '' + - id: test_simple_element_get_not_found + input: + key: /desktop/gnome/background/picture_filename + output: + value: + mocks: + run_command: + - command: [/testbin/gconftool-2, --version] + environ: *env-def + rc: 0 + out: "3.2.6\n" + err: '' + - command: [/testbin/gconftool-2, --get, /desktop/gnome/background/picture_filename] + environ: *env-def + rc: 0 + out: '' + err: "No value set for `/desktop/gnome/background/picture_filename'\n" diff --git a/tests/unit/plugins/modules/test_gem.py b/tests/unit/plugins/modules/test_gem.py index 10c03e537d..78c73be5a6 100644 --- a/tests/unit/plugins/modules/test_gem.py +++ b/tests/unit/plugins/modules/test_gem.py @@ -9,7 +9,7 @@ import copy import pytest from ansible_collections.community.general.plugins.modules import gem -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args def get_command(run_command): @@ -55,14 +55,13 @@ class TestGem(ModuleTestCase): return self.mocker.patch(target) def test_fails_when_user_install_and_install_dir_are_combined(self): - set_module_args({ + with set_module_args({ 'name': 'dummy', 'user_install': True, 'install_dir': '/opt/dummy', - }) - - with pytest.raises(AnsibleFailJson) as exc: - gem.main() + }): + with pytest.raises(AnsibleFailJson) as exc: + gem.main() result = exc.value.args[0] assert result['failed'] @@ -75,18 +74,17 @@ class TestGem(ModuleTestCase): # test mocks. The only thing that matters is the assertion that this 'gem install' is # invoked with '--install-dir'. - set_module_args({ + with set_module_args({ 'name': 'dummy', 'user_install': False, 'install_dir': '/opt/dummy', - }) + }): + self.patch_rubygems_version() + self.patch_installed_versions([]) + run_command = self.patch_run_command() - self.patch_rubygems_version() - self.patch_installed_versions([]) - run_command = self.patch_run_command() - - with pytest.raises(AnsibleExitJson) as exc: - gem.main() + with pytest.raises(AnsibleExitJson) as exc: + gem.main() result = exc.value.args[0] assert result['changed'] @@ -98,20 +96,19 @@ class TestGem(ModuleTestCase): # XXX: This test is also extremely fragile because of mocking. # If this breaks, the only that matters is to check whether '--install-dir' is # in the run command, and that GEM_HOME is passed to the command. - set_module_args({ + with set_module_args({ 'name': 'dummy', 'user_install': False, 'install_dir': '/opt/dummy', 'state': 'absent', - }) + }): + self.patch_rubygems_version() + self.patch_installed_versions(['1.0.0']) - self.patch_rubygems_version() - self.patch_installed_versions(['1.0.0']) + run_command = self.patch_run_command() - run_command = self.patch_run_command() - - with pytest.raises(AnsibleExitJson) as exc: - gem.main() + with pytest.raises(AnsibleExitJson) as exc: + gem.main() result = exc.value.args[0] @@ -124,17 +121,16 @@ class TestGem(ModuleTestCase): assert update_environ.get('GEM_HOME') == '/opt/dummy' def test_passes_add_force_option(self): - set_module_args({ + with set_module_args({ 'name': 'dummy', 'force': True, - }) + }): + self.patch_rubygems_version() + self.patch_installed_versions([]) + run_command = self.patch_run_command() - self.patch_rubygems_version() - self.patch_installed_versions([]) - run_command = self.patch_run_command() - - with pytest.raises(AnsibleExitJson) as exc: - gem.main() + with pytest.raises(AnsibleExitJson) as exc: + gem.main() result = exc.value.args[0] assert result['changed'] diff --git a/tests/unit/plugins/modules/test_gio_mime.py b/tests/unit/plugins/modules/test_gio_mime.py index 5e51320485..180890cc39 100644 --- a/tests/unit/plugins/modules/test_gio_mime.py +++ b/tests/unit/plugins/modules/test_gio_mime.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import gio_mime -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(gio_mime, __name__) +UTHelper.from_module(gio_mime, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_gio_mime.yaml b/tests/unit/plugins/modules/test_gio_mime.yaml index 3645446291..0d98f25503 100644 --- a/tests/unit/plugins/modules/test_gio_mime.yaml +++ b/tests/unit/plugins/modules/test_gio_mime.yaml @@ -4,85 +4,122 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: test_set_handler - input: - handler: google-chrome.desktop - mime_type: x-scheme-handler/http - output: - handler: google-chrome.desktop - changed: true - mocks: - run_command: - - command: [/testbin/gio, --version] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} - rc: 0 - out: "2.80.0\n" - err: "" - - command: [/testbin/gio, mime, x-scheme-handler/http] - environ: *env-def - rc: 0 - out: "" - err: > - No default applications for “x-scheme-handler/http” - - command: [/testbin/gio, mime, x-scheme-handler/http, google-chrome.desktop] - environ: *env-def - rc: 0 - out: "Set google-chrome.desktop as the default for x-scheme-handler/http\n" - err: "" -- id: test_set_handler_check - input: - handler: google-chrome.desktop - mime_type: x-scheme-handler/http - output: - handler: google-chrome.desktop - changed: true - flags: - skip: test helper does not support check mode yet - mocks: - run_command: - - command: [/testbin/gio, --version] - environ: *env-def - rc: 0 - out: "2.80.0\n" - err: "" - - command: [/testbin/gio, mime, x-scheme-handler/http] - environ: *env-def - rc: 0 - out: "" - err: > - No default applications for “x-scheme-handler/http” - - command: [/testbin/gio, mime, x-scheme-handler/http, google-chrome.desktop] - environ: *env-def - rc: 0 - out: "Set google-chrome.desktop as the default for x-scheme-handler/http\n" - err: "" -- id: test_set_handler_idempot - input: - handler: google-chrome.desktop - mime_type: x-scheme-handler/http - output: - handler: google-chrome.desktop - changed: false - mocks: - run_command: - - command: [/testbin/gio, --version] - environ: *env-def - rc: 0 - out: "2.80.0\n" - err: "" - - command: [/testbin/gio, mime, x-scheme-handler/http] - environ: *env-def - rc: 0 - out: | - Default application for “x-scheme-handler/https”: google-chrome.desktop - Registered applications: - brave-browser.desktop - firefox.desktop - google-chrome.desktop - firefox_firefox.desktop - Recommended applications: - brave-browser.desktop - firefox.desktop - google-chrome.desktop - firefox_firefox.desktop - err: "" +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} +test_cases: + - id: test_set_handler + input: + handler: google-chrome.desktop + mime_type: x-scheme-handler/http + output: + handler: google-chrome.desktop + changed: true + mocks: + run_command: + - command: [/testbin/gio, --version] + environ: *env-def + rc: 0 + out: "2.80.0\n" + err: '' + - command: [/testbin/gio, mime, x-scheme-handler/http] + environ: *env-def + rc: 0 + out: '' + err: > + No default applications for “x-scheme-handler/http” + - command: [/testbin/gio, mime, x-scheme-handler/http, google-chrome.desktop] + environ: *env-def + rc: 0 + out: "Set google-chrome.desktop as the default for x-scheme-handler/http\n" + err: '' + - id: test_set_handler_check + input: + handler: google-chrome.desktop + mime_type: x-scheme-handler/http + output: + handler: google-chrome.desktop + changed: true + stdout: Module executed in check mode + diff: + before: + handler: null + after: + handler: google-chrome.desktop + flags: + check: true + diff: true + mocks: + run_command: + - command: [/testbin/gio, --version] + environ: *env-def + rc: 0 + out: "2.80.0\n" + err: '' + - command: [/testbin/gio, mime, x-scheme-handler/http] + environ: *env-def + rc: 0 + out: '' + err: > + No default applications for “x-scheme-handler/http” + - id: test_set_handler_idempot + input: + handler: google-chrome.desktop + mime_type: x-scheme-handler/http + output: + handler: google-chrome.desktop + changed: false + mocks: + run_command: + - command: [/testbin/gio, --version] + environ: *env-def + rc: 0 + out: "2.80.0\n" + err: '' + - command: [/testbin/gio, mime, x-scheme-handler/http] + environ: *env-def + rc: 0 + out: | + Default application for “x-scheme-handler/https”: google-chrome.desktop + Registered applications: + brave-browser.desktop + firefox.desktop + google-chrome.desktop + firefox_firefox.desktop + Recommended applications: + brave-browser.desktop + firefox.desktop + google-chrome.desktop + firefox_firefox.desktop + err: '' + - id: test_set_handler_idempot_check + input: + handler: google-chrome.desktop + mime_type: x-scheme-handler/http + output: + handler: google-chrome.desktop + changed: false + flags: + check: true + mocks: + run_command: + - command: [/testbin/gio, --version] + environ: *env-def + rc: 0 + out: "2.80.0\n" + err: '' + - command: [/testbin/gio, mime, x-scheme-handler/http] + environ: *env-def + rc: 0 + out: | + Default application for “x-scheme-handler/https”: google-chrome.desktop + Registered applications: + brave-browser.desktop + firefox.desktop + google-chrome.desktop + firefox_firefox.desktop + Recommended applications: + brave-browser.desktop + firefox.desktop + google-chrome.desktop + firefox_firefox.desktop + err: '' diff --git a/tests/unit/plugins/modules/test_github_repo.py b/tests/unit/plugins/modules/test_github_repo.py index 10227aadfb..9a76fc4b69 100644 --- a/tests/unit/plugins/modules/test_github_repo.py +++ b/tests/unit/plugins/modules/test_github_repo.py @@ -7,12 +7,13 @@ __metaclass__ = type import re import json -import sys +import pytest from httmock import with_httmock, urlmatch, response -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.modules import github_repo -GITHUB_MINIMUM_PYTHON_VERSION = (2, 7) + +pytest.importorskip('github') @urlmatch(netloc=r'.*') @@ -167,11 +168,6 @@ def delete_repo_notfound_mock(url, request): class TestGithubRepo(unittest.TestCase): - def setUp(self): - if sys.version_info < GITHUB_MINIMUM_PYTHON_VERSION: - self.skipTest("Python %s+ is needed for PyGithub" % - ",".join(map(str, GITHUB_MINIMUM_PYTHON_VERSION))) - @with_httmock(get_orgs_mock) @with_httmock(get_repo_notfound_mock) @with_httmock(create_new_org_repo_mock) diff --git a/tests/unit/plugins/modules/test_gitlab_group_access_token.py b/tests/unit/plugins/modules/test_gitlab_group_access_token.py index 06af948204..cc7644060f 100644 --- a/tests/unit/plugins/modules/test_gitlab_group_access_token.py +++ b/tests/unit/plugins/modules/test_gitlab_group_access_token.py @@ -68,9 +68,33 @@ class TestGitlabGroupAccessToken(GitlabModuleTestCase): group = self.gitlab_instance.groups.get(1) self.assertIsNotNone(group) - rvalue = self.moduleUtil.find_access_token(group, "token1") + rvalue = self.moduleUtil.find_access_token(group, "test-token") self.assertEqual(rvalue, False) self.assertIsNotNone(self.moduleUtil.access_token_object) + self.assertEqual(self.moduleUtil.access_token_object.id, 691) + self.assertFalse(self.moduleUtil.access_token_object.revoked) + + @with_httmock(resp_get_group) + @with_httmock(resp_list_group_access_tokens) + def test_find_access_token_old_format(self): + group = self.gitlab_instance.groups.get(1) + self.assertIsNotNone(group) + + rvalue = self.moduleUtil.find_access_token(group, "test-token-no-revoked") + self.assertEqual(rvalue, False) + self.assertIsNotNone(self.moduleUtil.access_token_object) + self.assertEqual(self.moduleUtil.access_token_object.id, 695) + self.assertFalse(hasattr(self.moduleUtil.access_token_object, "revoked")) + + @with_httmock(resp_get_group) + @with_httmock(resp_list_group_access_tokens) + def test_find_revoked_access_token(self): + group = self.gitlab_instance.groups.get(1) + self.assertIsNotNone(group) + + rvalue = self.moduleUtil.find_access_token(group, "test-token-three") + self.assertEqual(rvalue, False) + self.assertIsNone(self.moduleUtil.access_token_object) @with_httmock(resp_get_group) @with_httmock(resp_list_group_access_tokens) @@ -99,7 +123,7 @@ class TestGitlabGroupAccessToken(GitlabModuleTestCase): groups = self.gitlab_instance.groups.get(1) self.assertIsNotNone(groups) - rvalue = self.moduleUtil.find_access_token(groups, "token1") + rvalue = self.moduleUtil.find_access_token(groups, "test-token") self.assertEqual(rvalue, False) self.assertIsNotNone(self.moduleUtil.access_token_object) diff --git a/tests/unit/plugins/modules/test_gitlab_project_access_token.py b/tests/unit/plugins/modules/test_gitlab_project_access_token.py index ebc324b889..050c2435fa 100644 --- a/tests/unit/plugins/modules/test_gitlab_project_access_token.py +++ b/tests/unit/plugins/modules/test_gitlab_project_access_token.py @@ -68,9 +68,33 @@ class TestGitlabProjectAccessToken(GitlabModuleTestCase): project = self.gitlab_instance.projects.get(1) self.assertIsNotNone(project) - rvalue = self.moduleUtil.find_access_token(project, "token1") + rvalue = self.moduleUtil.find_access_token(project, "test-token") self.assertEqual(rvalue, False) self.assertIsNotNone(self.moduleUtil.access_token_object) + self.assertEqual(self.moduleUtil.access_token_object.id, 691) + self.assertFalse(self.moduleUtil.access_token_object.revoked) + + @with_httmock(resp_get_project) + @with_httmock(resp_list_project_access_tokens) + def test_find_access_token_old_format(self): + project = self.gitlab_instance.projects.get(1) + self.assertIsNotNone(project) + + rvalue = self.moduleUtil.find_access_token(project, "test-token-no-revoked") + self.assertEqual(rvalue, False) + self.assertIsNotNone(self.moduleUtil.access_token_object) + self.assertEqual(self.moduleUtil.access_token_object.id, 695) + self.assertFalse(hasattr(self.moduleUtil.access_token_object, "revoked")) + + @with_httmock(resp_get_project) + @with_httmock(resp_list_project_access_tokens) + def test_find_revoked_access_token(self): + project = self.gitlab_instance.projects.get(1) + self.assertIsNotNone(project) + + rvalue = self.moduleUtil.find_access_token(project, "test-token-three") + self.assertEqual(rvalue, False) + self.assertIsNone(self.moduleUtil.access_token_object) @with_httmock(resp_get_project) @with_httmock(resp_list_project_access_tokens) @@ -99,7 +123,7 @@ class TestGitlabProjectAccessToken(GitlabModuleTestCase): project = self.gitlab_instance.projects.get(1) self.assertIsNotNone(project) - rvalue = self.moduleUtil.find_access_token(project, "token1") + rvalue = self.moduleUtil.find_access_token(project, "test-token") self.assertEqual(rvalue, False) self.assertIsNotNone(self.moduleUtil.access_token_object) diff --git a/tests/unit/plugins/modules/test_homebrew.py b/tests/unit/plugins/modules/test_homebrew.py index d04ca4de58..2404c68721 100644 --- a/tests/unit/plugins/modules/test_homebrew.py +++ b/tests/unit/plugins/modules/test_homebrew.py @@ -6,7 +6,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate diff --git a/tests/unit/plugins/modules/test_homebrew_cask.py b/tests/unit/plugins/modules/test_homebrew_cask.py index 6fcc06d976..85f5920912 100644 --- a/tests/unit/plugins/modules/test_homebrew_cask.py +++ b/tests/unit/plugins/modules/test_homebrew_cask.py @@ -6,18 +6,33 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.plugins.modules.homebrew_cask import HomebrewCask +from ansible_collections.community.general.plugins.modules.homebrew_cask import ( + HomebrewCask, +) +from ansible_collections.community.general.plugins.module_utils.homebrew import ( + HomebrewValidate, +) -class TestHomebrewCaskModule(unittest.TestCase): +def test_valid_cask_names(): + brew_cask_names = ["visual-studio-code", "firefox"] + for name in brew_cask_names: + assert HomebrewCask.valid_cask(name) - def setUp(self): - self.brew_cask_names = [ - 'visual-studio-code', - 'firefox' - ] - def test_valid_cask_names(self): - for name in self.brew_cask_names: - self.assertTrue(HomebrewCask.valid_cask(name)) +def test_homebrew_version(mocker): + brew_versions = [ + "Homebrew 4.1.0", + "Homebrew >=4.1.0 (shallow or no git repository)", + "Homebrew 4.1.0-dirty", + ] + module = mocker.Mock() + + mocker.patch.object(HomebrewCask, "valid_module", return_value=True) + mocker.patch.object(HomebrewValidate, "valid_path", return_value=True) + mocker.patch.object(HomebrewValidate, "valid_brew_path", return_value=True) + + homebrewcask = HomebrewCask(module=module) + for version in brew_versions: + module.run_command.return_value = (0, version, "") + assert homebrewcask._get_brew_version() == "4.1.0" diff --git a/tests/unit/plugins/modules/test_icinga2_feature.py b/tests/unit/plugins/modules/test_icinga2_feature.py index 23c94fad58..730cdc11f0 100644 --- a/tests/unit/plugins/modules/test_icinga2_feature.py +++ b/tests/unit/plugins/modules/test_icinga2_feature.py @@ -10,8 +10,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible_collections.community.general.plugins.modules import icinga2_feature -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible.module_utils import basic @@ -38,63 +38,63 @@ class TestIcinga2Feature(ModuleTestCase): def test_without_required_parameters(self): """Failure must occurs when all parameters are missing.""" with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() def test_enable_feature(self): """Check that result is changed.""" - set_module_args({ + with set_module_args({ 'name': 'api', - }) - with patch.object(basic.AnsibleModule, 'run_command') as run_command: - run_command.return_value = 0, '', '' # successful execution, no output - with self.assertRaises(AnsibleExitJson) as result: - icinga2_feature.main() - self.assertTrue(result.exception.args[0]['changed']) + }): + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, '', '' # successful execution, no output + with self.assertRaises(AnsibleExitJson) as result: + icinga2_feature.main() + self.assertTrue(result.exception.args[0]['changed']) self.assertEqual(run_command.call_count, 2) self.assertEqual(run_command.call_args[0][0][-1], 'api') def test_enable_feature_with_check_mode(self): """Check that result is changed in check mode.""" - set_module_args({ + with set_module_args({ 'name': 'api', '_ansible_check_mode': True, - }) - with patch.object(basic.AnsibleModule, 'run_command') as run_command: - run_command.return_value = 0, '', '' # successful execution, no output - with self.assertRaises(AnsibleExitJson) as result: - icinga2_feature.main() - self.assertTrue(result.exception.args[0]['changed']) + }): + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, '', '' # successful execution, no output + with self.assertRaises(AnsibleExitJson) as result: + icinga2_feature.main() + self.assertTrue(result.exception.args[0]['changed']) self.assertEqual(run_command.call_count, 1) def test_disable_feature(self): """Check that result is changed.""" - set_module_args({ + with set_module_args({ 'name': 'api', 'state': 'absent' - }) - with patch.object(basic.AnsibleModule, 'run_command') as run_command: - run_command.return_value = 0, '', '' # successful execution, no output - with self.assertRaises(AnsibleExitJson) as result: - icinga2_feature.main() - self.assertTrue(result.exception.args[0]['changed']) + }): + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, '', '' # successful execution, no output + with self.assertRaises(AnsibleExitJson) as result: + icinga2_feature.main() + self.assertTrue(result.exception.args[0]['changed']) self.assertEqual(run_command.call_count, 2) self.assertEqual(run_command.call_args[0][0][-1], 'api') def test_disable_feature_with_check_mode(self): """Check that result is changed in check mode.""" - set_module_args({ + with set_module_args({ 'name': 'api', 'state': 'absent', '_ansible_check_mode': True, - }) - with patch.object(basic.AnsibleModule, 'run_command') as run_command: - run_command.return_value = 0, '', '' # successful execution, no output - with self.assertRaises(AnsibleExitJson) as result: - icinga2_feature.main() - self.assertTrue(result.exception.args[0]['changed']) + }): + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, '', '' # successful execution, no output + with self.assertRaises(AnsibleExitJson) as result: + icinga2_feature.main() + self.assertTrue(result.exception.args[0]['changed']) self.assertEqual(run_command.call_count, 1) diff --git a/tests/unit/plugins/modules/test_ipa_getkeytab.py b/tests/unit/plugins/modules/test_ipa_getkeytab.py index e4e8ed2ece..467d87fff7 100644 --- a/tests/unit/plugins/modules/test_ipa_getkeytab.py +++ b/tests/unit/plugins/modules/test_ipa_getkeytab.py @@ -6,9 +6,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import call, patch from ansible_collections.community.general.plugins.modules import ipa_getkeytab -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args class IPAKeytabModuleTestCase(ModuleTestCase): @@ -34,18 +34,17 @@ class IPAKeytabModuleTestCase(ModuleTestCase): return exc.exception.args[0] def test_present(self): - set_module_args({ + with set_module_args({ 'path': '/tmp/test.keytab', 'principal': 'HTTP/freeipa-dc02.ipa.test', 'ipa_host': 'freeipa-dc01.ipa.test', 'state': 'present' - }) + }): + self.module_main_command.side_effect = [ + (0, '{}', ''), + ] - self.module_main_command.side_effect = [ - (0, '{}', ''), - ] - - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ diff --git a/tests/unit/plugins/modules/test_ipa_otpconfig.py b/tests/unit/plugins/modules/test_ipa_otpconfig.py index 718359a301..df26babd0c 100644 --- a/tests/unit/plugins/modules/test_ipa_otpconfig.py +++ b/tests/unit/plugins/modules/test_ipa_otpconfig.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import call, patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import call, patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import ipa_otpconfig @@ -61,12 +61,12 @@ class TestIPAOTPConfig(ModuleTestCase): changed (bool): Whether or not the module is supposed to be marked as changed """ - set_module_args(module_args) # Run the module - with patch_ipa(return_value=return_value) as (mock_login, mock_post): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with patch_ipa(return_value=return_value) as (mock_login, mock_post): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify that the calls to _post_json match what is expected expected_call_count = len(mock_calls) @@ -389,16 +389,15 @@ class TestIPAOTPConfig(ModuleTestCase): def test_fail_post(self): """Fail due to an exception raised from _post_json""" - set_module_args({ + with set_module_args({ 'ipatokentotpauthwindow': 11, 'ipatokentotpsyncwindow': 12, 'ipatokenhotpauthwindow': 13, 'ipatokenhotpsyncwindow': 14 - }) - - with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post): - with self.assertRaises(AnsibleFailJson) as exec_info: - self.module.main() + }): + with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post): + with self.assertRaises(AnsibleFailJson) as exec_info: + self.module.main() self.assertEqual(exec_info.exception.args[0]['msg'], 'ERROR MESSAGE') diff --git a/tests/unit/plugins/modules/test_ipa_otptoken.py b/tests/unit/plugins/modules/test_ipa_otptoken.py index 23911e5a5b..36222f7601 100644 --- a/tests/unit/plugins/modules/test_ipa_otptoken.py +++ b/tests/unit/plugins/modules/test_ipa_otptoken.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import call, patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import call, patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import ipa_otptoken @@ -61,12 +61,11 @@ class TestIPAOTPToken(ModuleTestCase): changed (bool): Whether or not the module is supposed to be marked as changed """ - set_module_args(module_args) - - # Run the module - with patch_ipa(return_value=return_value) as (mock_login, mock_post): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + # Run the module + with patch_ipa(return_value=return_value) as (mock_login, mock_post): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify that the calls to _post_json match what is expected expected_call_count = len(mock_calls) @@ -481,13 +480,12 @@ class TestIPAOTPToken(ModuleTestCase): def test_fail_post(self): """Fail due to an exception raised from _post_json""" - set_module_args({ + with set_module_args({ 'uniqueid': 'NewToken1' - }) - - with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post): - with self.assertRaises(AnsibleFailJson) as exec_info: - self.module.main() + }): + with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post): + with self.assertRaises(AnsibleFailJson) as exec_info: + self.module.main() self.assertEqual(exec_info.exception.args[0]['msg'], 'ERROR MESSAGE') diff --git a/tests/unit/plugins/modules/test_ipa_pwpolicy.py b/tests/unit/plugins/modules/test_ipa_pwpolicy.py index 538f61e9aa..3006312614 100644 --- a/tests/unit/plugins/modules/test_ipa_pwpolicy.py +++ b/tests/unit/plugins/modules/test_ipa_pwpolicy.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import call, patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import call, patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import ipa_pwpolicy @@ -62,12 +62,11 @@ class TestIPAPwPolicy(ModuleTestCase): changed (bool): Whether or not the module is supposed to be marked as changed """ - set_module_args(module_args) - - # Run the module - with patch_ipa(return_value=return_value) as (mock_login, mock_post): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + # Run the module + with patch_ipa(return_value=return_value) as (mock_login, mock_post): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify that the calls to _post_json match what is expected expected_call_count = len(mock_calls) @@ -693,14 +692,13 @@ class TestIPAPwPolicy(ModuleTestCase): def test_fail_post(self): """Fail due to an exception raised from _post_json""" - set_module_args({ + with set_module_args({ 'group': 'admins', 'state': 'absent' - }) - - with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post): - with self.assertRaises(AnsibleFailJson) as exec_info: - self.module.main() + }): + with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post): + with self.assertRaises(AnsibleFailJson) as exec_info: + self.module.main() self.assertEqual(exec_info.exception.args[0]['msg'], 'ERROR MESSAGE') diff --git a/tests/unit/plugins/modules/test_ipbase.py b/tests/unit/plugins/modules/test_ipbase.py index 8106889da1..6b2cc36b1b 100644 --- a/tests/unit/plugins/modules/test_ipbase.py +++ b/tests/unit/plugins/modules/test_ipbase.py @@ -9,8 +9,8 @@ __metaclass__ = type import json from ansible_collections.community.general.plugins.modules.ipbase_info import IpbaseInfo -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import Mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import Mock IPBASE_DATA = { diff --git a/tests/unit/plugins/modules/test_java_keystore.py b/tests/unit/plugins/modules/test_java_keystore.py index b2e70404a9..a92cd69ef3 100644 --- a/tests/unit/plugins/modules/test_java_keystore.py +++ b/tests/unit/plugins/modules/test_java_keystore.py @@ -11,9 +11,9 @@ __metaclass__ = type import os -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.compat.mock import Mock +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import Mock from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.modules.java_keystore import JavaKeystore @@ -83,20 +83,20 @@ class TestCreateJavaKeystore(ModuleTestCase): self.mock_os_path_exists.stop() def test_create_jks_success(self): - set_module_args(dict( + with set_module_args(dict( certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', name='test', password='changeit' - )) + )): - module = AnsibleModule( - argument_spec=module_argument_spec, - supports_check_mode=module_supports_check_mode, - mutually_exclusive=module_choose_between, - required_one_of=module_choose_between - ) + module = AnsibleModule( + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between + ) with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] @@ -115,21 +115,21 @@ class TestCreateJavaKeystore(ModuleTestCase): } def test_create_jks_keypass_fail_export_pkcs12(self): - set_module_args(dict( + with set_module_args(dict( certificate='cert-foo', private_key='private-foo', private_key_passphrase='passphrase-foo', dest='/path/to/keystore.jks', name='test', password='changeit' - )) + )): - module = AnsibleModule( - argument_spec=module_argument_spec, - supports_check_mode=module_supports_check_mode, - mutually_exclusive=module_choose_between, - required_one_of=module_choose_between - ) + module = AnsibleModule( + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between + ) module.exit_json = Mock() module.fail_json = Mock() @@ -154,20 +154,20 @@ class TestCreateJavaKeystore(ModuleTestCase): ) def test_create_jks_fail_export_pkcs12(self): - set_module_args(dict( + with set_module_args(dict( certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', name='test', password='changeit' - )) + )): - module = AnsibleModule( - argument_spec=module_argument_spec, - supports_check_mode=module_supports_check_mode, - mutually_exclusive=module_choose_between, - required_one_of=module_choose_between - ) + module = AnsibleModule( + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between + ) module.exit_json = Mock() module.fail_json = Mock() @@ -191,20 +191,20 @@ class TestCreateJavaKeystore(ModuleTestCase): ) def test_create_jks_fail_import_key(self): - set_module_args(dict( + with set_module_args(dict( certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', name='test', password='changeit' - )) + )): - module = AnsibleModule( - argument_spec=module_argument_spec, - supports_check_mode=module_supports_check_mode, - mutually_exclusive=module_choose_between, - required_one_of=module_choose_between - ) + module = AnsibleModule( + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between + ) module.exit_json = Mock() module.fail_json = Mock() @@ -257,20 +257,20 @@ class TestCertChanged(ModuleTestCase): self.mock_atomic_move.stop() def test_cert_unchanged_same_fingerprint(self): - set_module_args(dict( + with set_module_args(dict( certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', name='foo', password='changeit' - )) + )): - module = AnsibleModule( - argument_spec=module_argument_spec, - supports_check_mode=module_supports_check_mode, - mutually_exclusive=module_choose_between, - required_one_of=module_choose_between - ) + module = AnsibleModule( + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between + ) with patch('os.remove', return_value=True): self.create_file.side_effect = ['/tmp/placeholder', ''] @@ -282,20 +282,20 @@ class TestCertChanged(ModuleTestCase): self.assertFalse(result, 'Fingerprint is identical') def test_cert_changed_fingerprint_mismatch(self): - set_module_args(dict( + with set_module_args(dict( certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', name='foo', password='changeit' - )) + )): - module = AnsibleModule( - argument_spec=module_argument_spec, - supports_check_mode=module_supports_check_mode, - mutually_exclusive=module_choose_between, - required_one_of=module_choose_between - ) + module = AnsibleModule( + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between + ) with patch('os.remove', return_value=True): self.create_file.side_effect = ['/tmp/placeholder', ''] @@ -307,20 +307,20 @@ class TestCertChanged(ModuleTestCase): self.assertTrue(result, 'Fingerprint mismatch') def test_cert_changed_alias_does_not_exist(self): - set_module_args(dict( + with set_module_args(dict( certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', name='foo', password='changeit' - )) + )): - module = AnsibleModule( - argument_spec=module_argument_spec, - supports_check_mode=module_supports_check_mode, - mutually_exclusive=module_choose_between, - required_one_of=module_choose_between - ) + module = AnsibleModule( + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between + ) with patch('os.remove', return_value=True): self.create_file.side_effect = ['/tmp/placeholder', ''] @@ -332,20 +332,20 @@ class TestCertChanged(ModuleTestCase): self.assertTrue(result, 'Alias mismatch detected') def test_cert_changed_password_mismatch(self): - set_module_args(dict( + with set_module_args(dict( certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', name='foo', password='changeit' - )) + )): - module = AnsibleModule( - argument_spec=module_argument_spec, - supports_check_mode=module_supports_check_mode, - mutually_exclusive=module_choose_between, - required_one_of=module_choose_between - ) + module = AnsibleModule( + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between + ) with patch('os.remove', return_value=True): self.create_file.side_effect = ['/tmp/placeholder', ''] @@ -357,20 +357,20 @@ class TestCertChanged(ModuleTestCase): self.assertTrue(result, 'Password mismatch detected') def test_cert_changed_fail_read_cert(self): - set_module_args(dict( + with set_module_args(dict( certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', name='foo', password='changeit' - )) + )): - module = AnsibleModule( - argument_spec=module_argument_spec, - supports_check_mode=module_supports_check_mode, - mutually_exclusive=module_choose_between, - required_one_of=module_choose_between - ) + module = AnsibleModule( + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between + ) module.exit_json = Mock() module.fail_json = Mock() @@ -390,20 +390,20 @@ class TestCertChanged(ModuleTestCase): ) def test_cert_changed_fail_read_keystore(self): - set_module_args(dict( + with set_module_args(dict( certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', name='foo', password='changeit' - )) + )): - module = AnsibleModule( - argument_spec=module_argument_spec, - supports_check_mode=module_supports_check_mode, - mutually_exclusive=module_choose_between, - required_one_of=module_choose_between - ) + module = AnsibleModule( + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between + ) module.exit_json = Mock() module.fail_json = Mock(return_value=True) diff --git a/tests/unit/plugins/modules/test_jenkins_build.py b/tests/unit/plugins/modules/test_jenkins_build.py index d9013a0181..1a91d4a5e9 100644 --- a/tests/unit/plugins/modules/test_jenkins_build.py +++ b/tests/unit/plugins/modules/test_jenkins_build.py @@ -5,42 +5,17 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible.module_utils import basic -from ansible.module_utils.common.text.converters import to_bytes from ansible_collections.community.general.plugins.modules import jenkins_build - -import json - - -def set_module_args(args): - """prepare arguments so that they will be picked up during module creation""" - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) - basic._ANSIBLE_ARGS = to_bytes(args) - - -class AnsibleExitJson(Exception): - """Exception class to be raised by module.exit_json and caught by the test case""" - pass - - -class AnsibleFailJson(Exception): - """Exception class to be raised by module.fail_json and caught by the test case""" - pass - - -def exit_json(*args, **kwargs): - """function to patch over exit_json; package return data into an exception""" - if 'changed' not in kwargs: - kwargs['changed'] = False - raise AnsibleExitJson(kwargs) - - -def fail_json(*args, **kwargs): - """function to patch over fail_json; package return data into an exception""" - kwargs['failed'] = True - raise AnsibleFailJson(kwargs) +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + set_module_args, + exit_json, + fail_json, +) class jenkins: @@ -131,18 +106,18 @@ class TestJenkinsBuild(unittest.TestCase): def test_module_fail_when_required_args_missing(self, test_deps): test_deps.return_value = None with self.assertRaises(AnsibleFailJson): - set_module_args({}) - jenkins_build.main() + with set_module_args({}): + jenkins_build.main() @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') def test_module_fail_when_missing_build_number(self, test_deps): test_deps.return_value = None with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ "name": "required-if", "state": "stopped" - }) - jenkins_build.main() + }): + jenkins_build.main() @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection') @@ -151,12 +126,12 @@ class TestJenkinsBuild(unittest.TestCase): jenkins_connection.return_value = JenkinsMock() with self.assertRaises(AnsibleExitJson): - set_module_args({ + with set_module_args({ "name": "host-check", "user": "abc", "token": "xyz" - }) - jenkins_build.main() + }): + jenkins_build.main() @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection') @@ -165,14 +140,14 @@ class TestJenkinsBuild(unittest.TestCase): jenkins_connection.return_value = JenkinsMock() with self.assertRaises(AnsibleExitJson) as return_json: - set_module_args({ + with set_module_args({ "name": "host-check", "build_number": "1234", "state": "stopped", "user": "abc", "token": "xyz" - }) - jenkins_build.main() + }): + jenkins_build.main() self.assertTrue(return_json.exception.args[0]['changed']) @@ -183,14 +158,14 @@ class TestJenkinsBuild(unittest.TestCase): jenkins_connection.return_value = JenkinsMockIdempotent() with self.assertRaises(AnsibleExitJson) as return_json: - set_module_args({ + with set_module_args({ "name": "host-check", "build_number": "1234", "state": "stopped", "user": "abc", "password": "xyz" - }) - jenkins_build.main() + }): + jenkins_build.main() self.assertFalse(return_json.exception.args[0]['changed']) @@ -203,14 +178,14 @@ class TestJenkinsBuild(unittest.TestCase): build_status.return_value = JenkinsBuildMock().get_build_status() with self.assertRaises(AnsibleExitJson): - set_module_args({ + with set_module_args({ "name": "host-delete", "build_number": "1234", "state": "absent", "user": "abc", "token": "xyz" - }) - jenkins_build.main() + }): + jenkins_build.main() @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection') @@ -219,14 +194,14 @@ class TestJenkinsBuild(unittest.TestCase): jenkins_connection.return_value = JenkinsMockIdempotent() with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ "name": "host-delete", "build_number": "1234", "state": "absent", "user": "abc", "token": "xyz" - }) - jenkins_build.main() + }): + jenkins_build.main() @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection') @@ -237,12 +212,12 @@ class TestJenkinsBuild(unittest.TestCase): build_status.return_value = JenkinsBuildMock().get_build_status() with self.assertRaises(AnsibleExitJson) as return_json: - set_module_args({ + with set_module_args({ "name": "create-detached", "user": "abc", "token": "xyz" - }) - jenkins_build.main() + }): + jenkins_build.main() self.assertFalse(return_json.exception.args[0]['changed']) @@ -253,12 +228,12 @@ class TestJenkinsBuild(unittest.TestCase): jenkins_connection.return_value = JenkinsMock() with self.assertRaises(AnsibleExitJson) as return_json: - set_module_args({ + with set_module_args({ "name": "create-detached", "user": "abc", "token": "xyz", "detach": True - }) - jenkins_build.main() + }): + jenkins_build.main() self.assertTrue(return_json.exception.args[0]['changed']) diff --git a/tests/unit/plugins/modules/test_jenkins_build_info.py b/tests/unit/plugins/modules/test_jenkins_build_info.py index b5d4126fe0..7f80c8f631 100644 --- a/tests/unit/plugins/modules/test_jenkins_build_info.py +++ b/tests/unit/plugins/modules/test_jenkins_build_info.py @@ -5,42 +5,17 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible.module_utils import basic -from ansible.module_utils.common.text.converters import to_bytes from ansible_collections.community.general.plugins.modules import jenkins_build_info - -import json - - -def set_module_args(args): - """prepare arguments so that they will be picked up during module creation""" - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) - basic._ANSIBLE_ARGS = to_bytes(args) - - -class AnsibleExitJson(Exception): - """Exception class to be raised by module.exit_json and caught by the test case""" - pass - - -class AnsibleFailJson(Exception): - """Exception class to be raised by module.fail_json and caught by the test case""" - pass - - -def exit_json(*args, **kwargs): - """function to patch over exit_json; package return data into an exception""" - if 'changed' not in kwargs: - kwargs['changed'] = False - raise AnsibleExitJson(kwargs) - - -def fail_json(*args, **kwargs): - """function to patch over fail_json; package return data into an exception""" - kwargs['failed'] = True - raise AnsibleFailJson(kwargs) +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + set_module_args, + exit_json, + fail_json, +) class jenkins: @@ -101,8 +76,8 @@ class TestJenkinsBuildInfo(unittest.TestCase): def test_module_fail_when_required_args_missing(self, test_deps): test_deps.return_value = None with self.assertRaises(AnsibleFailJson): - set_module_args({}) - jenkins_build_info.main() + with set_module_args({}): + jenkins_build_info.main() @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.test_dependencies') @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_jenkins_connection') @@ -111,13 +86,13 @@ class TestJenkinsBuildInfo(unittest.TestCase): jenkins_connection.return_value = JenkinsMock() with self.assertRaises(AnsibleExitJson) as return_json: - set_module_args({ + with set_module_args({ "name": "job-present", "user": "abc", "token": "xyz", "build_number": 30 - }) - jenkins_build_info.main() + }): + jenkins_build_info.main() self.assertFalse(return_json.exception.args[0]["changed"]) @@ -130,13 +105,13 @@ class TestJenkinsBuildInfo(unittest.TestCase): build_status.return_value = JenkinsBuildMock("job-absent", 30).get_build_status() with self.assertRaises(AnsibleExitJson) as return_json: - set_module_args({ + with set_module_args({ "name": "job-absent", "user": "abc", "token": "xyz", "build_number": 30 - }) - jenkins_build_info.main() + }): + jenkins_build_info.main() self.assertFalse(return_json.exception.args[0]['changed']) self.assertTrue(return_json.exception.args[0]['failed']) @@ -149,12 +124,12 @@ class TestJenkinsBuildInfo(unittest.TestCase): jenkins_connection.return_value = JenkinsMock() with self.assertRaises(AnsibleExitJson) as return_json: - set_module_args({ + with set_module_args({ "name": "job-present", "user": "abc", "token": "xyz" - }) - jenkins_build_info.main() + }): + jenkins_build_info.main() self.assertFalse(return_json.exception.args[0]['changed']) self.assertEqual("SUCCESS", return_json.exception.args[0]['build_info']['result']) @@ -168,12 +143,12 @@ class TestJenkinsBuildInfo(unittest.TestCase): build_status.return_value = JenkinsBuildMock("job-absent").get_build_status() with self.assertRaises(AnsibleExitJson) as return_json: - set_module_args({ + with set_module_args({ "name": "job-absent", "user": "abc", "token": "xyz" - }) - jenkins_build_info.main() + }): + jenkins_build_info.main() self.assertFalse(return_json.exception.args[0]['changed']) self.assertTrue(return_json.exception.args[0]['failed']) diff --git a/tests/unit/plugins/modules/test_jenkins_credential.py b/tests/unit/plugins/modules/test_jenkins_credential.py new file mode 100644 index 0000000000..b74b7c4b59 --- /dev/null +++ b/tests/unit/plugins/modules/test_jenkins_credential.py @@ -0,0 +1,348 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible_collections.community.general.plugins.modules import jenkins_credential +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import ( + MagicMock, + patch, + mock_open, +) + +import json +import sys + +if sys.version_info[0] == 3: + import builtins + open_path = "builtins.open" +else: + import __builtin__ as builtins + open_path = "__builtin__.open" + + +def test_validate_file_exist_passes_when_file_exists(): + module = MagicMock() + with patch("os.path.exists", return_value=True): + jenkins_credential.validate_file_exist(module, "/some/file/path") + module.fail_json.assert_not_called() + + +def test_validate_file_exist_fails_when_file_missing(): + module = MagicMock() + with patch("os.path.exists", return_value=False): + jenkins_credential.validate_file_exist(module, "/missing/file/path") + module.fail_json.assert_called_once_with( + msg="File not found: /missing/file/path" + ) + + +@patch( + "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" +) +def test_get_jenkins_crumb_sets_crumb_header(fetch_mock): + module = MagicMock() + module.params = {"type": "file", "url": "http://localhost:8080"} + headers = {} + + fake_response = MagicMock() + fake_response.read.return_value = json.dumps( + {"crumbRequestField": "crumb_field", "crumb": "abc123"} + ).encode("utf-8") + + fetch_mock.return_value = ( + fake_response, + {"status": 200, "set-cookie": "JSESSIONID=something; Path=/"}, + ) + + crumb_request_field, crumb, session_coockie = jenkins_credential.get_jenkins_crumb( + module, headers + ) + + assert "Cookie" not in headers + assert "crumb_field" in headers + assert crumb == "abc123" + assert headers[crumb_request_field] == crumb + + +@patch( + "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" +) +def test_get_jenkins_crumb_sets_cookie_if_type_token(fetch_mock): + module = MagicMock() + module.params = {"type": "token", "url": "http://localhost:8080"} + headers = {} + + fake_response = MagicMock() + fake_response.read.return_value = json.dumps( + {"crumbRequestField": "crumb_field", "crumb": "secure"} + ).encode("utf-8") + + fetch_mock.return_value = ( + fake_response, + {"status": 200, "set-cookie": "JSESSIONID=token-cookie; Path=/"}, + ) + + crumb_request_field, crumb, session_cookie = jenkins_credential.get_jenkins_crumb( + module, headers + ) + + assert "crumb_field" in headers + assert crumb == "secure" + assert headers[crumb_request_field] == crumb + assert headers["Cookie"] == session_cookie + + +@patch( + "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" +) +def test_get_jenkins_crumb_fails_on_non_200_status(fetch_mock): + module = MagicMock() + module.params = {"type": "file", "url": "http://localhost:8080"} + headers = {} + + fetch_mock.return_value = (MagicMock(), {"status": 403}) + + jenkins_credential.get_jenkins_crumb(module, headers) + + module.fail_json.assert_called_once() + assert "Failed to fetch Jenkins crumb" in module.fail_json.call_args[1]["msg"] + + +@patch( + "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" +) +def test_get_jenkins_crumb_removes_job_from_url(fetch_mock): + module = MagicMock() + module.params = {"type": "file", "url": "http://localhost:8080/job/test"} + headers = {} + + fake_response = MagicMock() + fake_response.read.return_value = json.dumps( + {"crumbRequestField": "Jenkins-Crumb", "crumb": "xyz"} + ).encode("utf-8") + + fetch_mock.return_value = (fake_response, {"status": 200, "set-cookie": ""}) + + jenkins_credential.get_jenkins_crumb(module, headers) + + url_called = fetch_mock.call_args[0][1] + assert url_called == "http://localhost:8080/crumbIssuer/api/json" + + +def test_clean_data_removes_extraneous_fields(): + data = { + "id": "cred1", + "description": "test", + "jenkins_user": "admin", + "token": "secret", + "url": "http://localhost:8080", + "file_path": None, + } + expected = {"id": "cred1", "description": "test"} + result = jenkins_credential.clean_data(data) + assert result == expected, "Expected {}, got {}".format(expected, result) + + +@patch( + "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" +) +def test_target_exists_returns_true_on_200(fetch_url_mock): + module = MagicMock() + module.params = { + "url": "http://localhost:8080", + "location": "system", + "scope": "_", + "id": "my-id", + "jenkins_user": "admin", + "token": "secret", + "type": "file", + } + + fetch_url_mock.return_value = (MagicMock(), {"status": 200}) + assert jenkins_credential.target_exists(module) is True + + +@patch( + "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" +) +def test_target_exists_returns_false_on_404(fetch_url_mock): + module = MagicMock() + module.params = { + "url": "http://localhost:8080", + "location": "system", + "scope": "_", + "id": "my-id", + "jenkins_user": "admin", + "token": "secret", + "type": "file", + } + + fetch_url_mock.return_value = (MagicMock(), {"status": 404}) + assert jenkins_credential.target_exists(module) is False + + +@patch( + "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" +) +def test_target_exists_calls_fail_json_on_unexpected_status(fetch_url_mock): + module = MagicMock() + module.params = { + "url": "http://localhost:8080", + "location": "system", + "scope": "_", + "id": "my-id", + "jenkins_user": "admin", + "token": "secret", + "type": "file", + } + + fetch_url_mock.return_value = (MagicMock(), {"status": 500}) + jenkins_credential.target_exists(module) + module.fail_json.assert_called_once() + assert "Unexpected status code" in module.fail_json.call_args[1]["msg"] + + +@patch( + "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" +) +def test_target_exists_skips_check_for_token_type(fetch_url_mock): + module = MagicMock() + module.params = { + "type": "token", + "url": "ignored", + "location": "ignored", + "scope": "ignored", + "id": "ignored", + "jenkins_user": "ignored", + "token": "ignored", + } + + assert jenkins_credential.target_exists(module) is False + fetch_url_mock.assert_not_called() + + +@patch( + "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" +) +def test_delete_target_fails_deleting(fetch_mock): + module = MagicMock() + module.params = { + "type": "token", + "jenkins_user": "admin", + "url": "http://localhost:8080", + "id": "token-id", + "location": "system", + "scope": "_", + } + headers = {"Authorization": "Basic abc", "Content-Type": "whatever"} + + fetch_mock.return_value = (MagicMock(), {"status": 500}) + + jenkins_credential.delete_target(module, headers) + + module.fail_json.assert_called_once() + assert "Failed to delete" in module.fail_json.call_args[1]["msg"] + + +@patch( + "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url", + side_effect=Exception("network error"), +) +def test_delete_target_raises_exception(fetch_mock): + module = MagicMock() + module.params = { + "type": "scope", + "jenkins_user": "admin", + "location": "system", + "url": "http://localhost:8080", + "id": "domain-id", + "scope": "_", + } + headers = {"Authorization": "Basic auth"} + + jenkins_credential.delete_target(module, headers) + + module.fail_json.assert_called_once() + assert "Exception during delete" in module.fail_json.call_args[1]["msg"] + assert "network error" in module.fail_json.call_args[1]["msg"] + + +def test_read_privateKey_returns_trimmed_contents(): + module = MagicMock() + module.params = {"private_key_path": "/fake/path/key.pem"} + + mocked_file = mock_open( + read_data="\n \t -----BEGIN PRIVATE KEY-----\nKEYDATA\n-----END PRIVATE KEY----- \n\n" + ) + with patch(open_path, mocked_file): + result = jenkins_credential.read_privateKey(module) + + expected = "-----BEGIN PRIVATE KEY-----\nKEYDATA\n-----END PRIVATE KEY-----" + + assert result == expected + mocked_file.assert_called_once_with("/fake/path/key.pem", "r") + + +def test_read_privateKey_handles_file_read_error(): + module = MagicMock() + module.params = {"private_key_path": "/invalid/path.pem"} + + with patch(open_path, side_effect=IOError("cannot read file")): + jenkins_credential.read_privateKey(module) + + module.fail_json.assert_called_once() + assert "Failed to read private key file" in module.fail_json.call_args[1]["msg"] + + +def test_embed_file_into_body_returns_multipart_fields(): + module = MagicMock() + file_path = "/fake/path/secret.pem" + credentials = {"id": "my-id"} + fake_file_content = b"MY SECRET DATA" + + mock = mock_open() + mock.return_value.read.return_value = fake_file_content + + with patch("os.path.basename", return_value="secret.pem"), patch.object( + builtins, "open", mock + ): + body, content_type = jenkins_credential.embed_file_into_body( + module, file_path, credentials.copy() + ) + + assert "multipart/form-data; boundary=" in content_type + + # Check if file content is embedded in body + assert b"MY SECRET DATA" in body + assert b'filename="secret.pem"' in body + + +def test_embed_file_into_body_fails_when_file_unreadable(): + module = MagicMock() + file_path = "/fake/path/missing.pem" + credentials = {"id": "something"} + + with patch(open_path, side_effect=IOError("can't read file")): + jenkins_credential.embed_file_into_body(module, file_path, credentials) + + module.fail_json.assert_called_once() + assert "Failed to read file" in module.fail_json.call_args[1]["msg"] + + +def test_embed_file_into_body_injects_file_keys_into_credentials(): + module = MagicMock() + file_path = "/fake/path/file.txt" + credentials = {"id": "test"} + + with patch(open_path, mock_open(read_data=b"1234")), patch( + "os.path.basename", return_value="file.txt" + ): + + jenkins_credential.embed_file_into_body(module, file_path, credentials) + + assert credentials["file"] == "file0" + assert credentials["fileName"] == "file.txt" diff --git a/tests/unit/plugins/modules/test_jenkins_node.py b/tests/unit/plugins/modules/test_jenkins_node.py index 7c2634744d..b4a6dffe82 100644 --- a/tests/unit/plugins/modules/test_jenkins_node.py +++ b/tests/unit/plugins/modules/test_jenkins_node.py @@ -6,16 +6,20 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type import jenkins -import json from xml.etree import ElementTree as et import pytest -from ansible.module_utils import basic -from ansible.module_utils.common.text.converters import to_bytes -from ansible_collections.community.general.tests.unit.compat.mock import patch, call +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, call from ansible_collections.community.general.plugins.modules import jenkins_node +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + set_module_args, + exit_json, + fail_json, +) from pytest import fixture, raises, mark, param @@ -62,35 +66,6 @@ def assert_xml_equal(x, y): raise AssertionError("{} != {}".format(x, y)) -def set_module_args(args): - """prepare arguments so that they will be picked up during module creation""" - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) - basic._ANSIBLE_ARGS = to_bytes(args) - - -class AnsibleExitJson(Exception): - def __init__(self, value): - self.value = value - - def __getitem__(self, item): - return self.value[item] - - -def exit_json(*args, **kwargs): - if 'changed' not in kwargs: - kwargs['changed'] = False - raise AnsibleExitJson(kwargs) - - -class AnsibleFailJson(Exception): - pass - - -def fail_json(*args, **kwargs): - kwargs['failed'] = True - raise AnsibleFailJson(kwargs) - - @fixture(autouse=True) def module(): with patch.multiple( @@ -120,16 +95,16 @@ def get_instance(instance): def test_get_jenkins_instance_with_user_and_token(instance): instance.node_exists.return_value = False - set_module_args({ + with set_module_args({ "name": "my-node", "state": "absent", "url": "https://localhost:8080", "user": "admin", "token": "password", - }) + }): - with pytest.raises(AnsibleExitJson): - jenkins_node.main() + with pytest.raises(AnsibleExitJson): + jenkins_node.main() assert instance.call_args == call("https://localhost:8080", "admin", "password") @@ -137,15 +112,15 @@ def test_get_jenkins_instance_with_user_and_token(instance): def test_get_jenkins_instance_with_user(instance): instance.node_exists.return_value = False - set_module_args({ + with set_module_args({ "name": "my-node", "state": "absent", "url": "https://localhost:8080", "user": "admin", - }) + }): - with pytest.raises(AnsibleExitJson): - jenkins_node.main() + with pytest.raises(AnsibleExitJson): + jenkins_node.main() assert instance.call_args == call("https://localhost:8080", "admin") @@ -153,14 +128,14 @@ def test_get_jenkins_instance_with_user(instance): def test_get_jenkins_instance_with_no_credential(instance): instance.node_exists.return_value = False - set_module_args({ + with set_module_args({ "name": "my-node", "state": "absent", "url": "https://localhost:8080", - }) + }): - with pytest.raises(AnsibleExitJson): - jenkins_node.main() + with pytest.raises(AnsibleExitJson): + jenkins_node.main() assert instance.call_args == call("https://localhost:8080") @@ -173,18 +148,18 @@ def test_state_present_when_absent(get_instance, instance, state): instance.node_exists.return_value = False instance.get_node_config.return_value = "" - set_module_args({ + with set_module_args({ "name": "my-node", "state": state, - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.create_node.call_args == call("my-node", launcher=jenkins.LAUNCHER_SSH) - assert result.value["created"] is True - assert result.value["changed"] is True + assert result.value.args[0]["created"] is True + assert result.value.args[0]["changed"] is True @mark.parametrize(["state"], [param(state) for state in PRESENT_STATES]) @@ -192,19 +167,19 @@ def test_state_present_when_absent_check_mode(get_instance, instance, state): instance.node_exists.return_value = False instance.get_node_config.return_value = "" - set_module_args({ + with set_module_args({ "name": "my-node", "state": state, "_ansible_check_mode": True, - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.create_node.called - assert result.value["created"] is True - assert result.value["changed"] is True + assert result.value.args[0]["created"] is True + assert result.value.args[0]["changed"] is True @mark.parametrize(["state"], [param(state) for state in PRESENT_STATES]) @@ -215,18 +190,18 @@ def test_state_present_when_absent_redirect_auth_error_handled( instance.get_node_config.return_value = "" instance.create_node.side_effect = jenkins.JenkinsException - set_module_args({ + with set_module_args({ "name": "my-node", "state": state, - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.create_node.call_args == call("my-node", launcher=jenkins.LAUNCHER_SSH) - assert result.value["created"] is True - assert result.value["changed"] is True + assert result.value.args[0]["created"] is True + assert result.value.args[0]["changed"] is True @mark.parametrize(["state"], [param(state) for state in PRESENT_STATES]) @@ -235,72 +210,72 @@ def test_state_present_when_absent_other_error_raised(get_instance, instance, st instance.get_node_config.return_value = "" instance.create_node.side_effect = jenkins.JenkinsException - set_module_args({ + with set_module_args({ "name": "my-node", "state": state, - }) + }): - with raises(AnsibleFailJson) as result: - jenkins_node.main() + with raises(AnsibleFailJson) as result: + jenkins_node.main() assert instance.create_node.call_args == call("my-node", launcher=jenkins.LAUNCHER_SSH) - assert "Create node failed" in str(result.value) + assert "Create node failed" in str(result.value.args[0]) def test_state_present_when_present(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - set_module_args({ + with set_module_args({ "name": "my-node", "state": "present", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.create_node.called - assert result.value["created"] is False - assert result.value["changed"] is False + assert result.value.args[0]["created"] is False + assert result.value.args[0]["changed"] is False def test_state_absent_when_present(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - set_module_args({ + with set_module_args({ "name": "my-node", "state": "absent", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.delete_node.call_args == call("my-node") - assert result.value["deleted"] is True - assert result.value["changed"] is True + assert result.value.args[0]["deleted"] is True + assert result.value.args[0]["changed"] is True def test_state_absent_when_present_check_mode(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - set_module_args({ + with set_module_args({ "name": "my-node", "state": "absent", "_ansible_check_mode": True, - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.delete_node.called - assert result.value["deleted"] is True - assert result.value["changed"] is True + assert result.value.args[0]["deleted"] is True + assert result.value.args[0]["changed"] is True def test_state_absent_when_present_redirect_auth_error_handled(get_instance, instance): @@ -308,18 +283,18 @@ def test_state_absent_when_present_redirect_auth_error_handled(get_instance, ins instance.get_node_config.return_value = "" instance.delete_node.side_effect = jenkins.JenkinsException - set_module_args({ + with set_module_args({ "name": "my-node", "state": "absent", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.delete_node.call_args == call("my-node") - assert result.value["deleted"] is True - assert result.value["changed"] is True + assert result.value.args[0]["deleted"] is True + assert result.value.args[0]["changed"] is True def test_state_absent_when_present_other_error_raised(get_instance, instance): @@ -327,35 +302,35 @@ def test_state_absent_when_present_other_error_raised(get_instance, instance): instance.get_node_config.return_value = "" instance.delete_node.side_effect = jenkins.JenkinsException - set_module_args({ + with set_module_args({ "name": "my-node", "state": "absent", - }) + }): - with raises(AnsibleFailJson) as result: - jenkins_node.main() + with raises(AnsibleFailJson) as result: + jenkins_node.main() assert instance.delete_node.call_args == call("my-node") - assert "Delete node failed" in str(result.value) + assert "Delete node failed" in str(result.value.args[0]) def test_state_absent_when_absent(get_instance, instance): instance.node_exists.return_value = False instance.get_node_config.return_value = "" - set_module_args({ + with set_module_args({ "name": "my-node", "state": "absent", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.delete_node.called - assert result.value["deleted"] is False - assert result.value["changed"] is False + assert result.value.args[0]["deleted"] is False + assert result.value.args[0]["changed"] is False def test_state_enabled_when_offline(get_instance, instance): @@ -363,18 +338,18 @@ def test_state_enabled_when_offline(get_instance, instance): instance.get_node_config.return_value = "" instance.get_node_info.return_value = {"offline": True} - set_module_args({ + with set_module_args({ "name": "my-node", "state": "enabled", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.enable_node.call_args == call("my-node") - assert result.value["enabled"] is True - assert result.value["changed"] is True + assert result.value.args[0]["enabled"] is True + assert result.value.args[0]["changed"] is True def test_state_enabled_when_offline_check_mode(get_instance, instance): @@ -382,19 +357,19 @@ def test_state_enabled_when_offline_check_mode(get_instance, instance): instance.get_node_config.return_value = "" instance.get_node_info.return_value = {"offline": True} - set_module_args({ + with set_module_args({ "name": "my-node", "state": "enabled", "_ansible_check_mode": True, - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.enable_node.called - assert result.value["enabled"] is True - assert result.value["changed"] is True + assert result.value.args[0]["enabled"] is True + assert result.value.args[0]["changed"] is True def test_state_enabled_when_offline_redirect_auth_error_handled(get_instance, instance): @@ -403,18 +378,18 @@ def test_state_enabled_when_offline_redirect_auth_error_handled(get_instance, in instance.get_node_info.side_effect = [{"offline": True}, {"offline": False}] instance.enable_node.side_effect = jenkins.JenkinsException - set_module_args({ + with set_module_args({ "name": "my-node", "state": "enabled", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.enable_node.call_args == call("my-node") - assert result.value["enabled"] is True - assert result.value["changed"] is True + assert result.value.args[0]["enabled"] is True + assert result.value.args[0]["changed"] is True def test_state_enabled_when_offline_other_error_raised(get_instance, instance): @@ -423,17 +398,17 @@ def test_state_enabled_when_offline_other_error_raised(get_instance, instance): instance.get_node_info.side_effect = [{"offline": True}, {"offline": True}] instance.enable_node.side_effect = jenkins.JenkinsException - set_module_args({ + with set_module_args({ "name": "my-node", "state": "enabled", - }) + }): - with raises(AnsibleFailJson) as result: - jenkins_node.main() + with raises(AnsibleFailJson) as result: + jenkins_node.main() assert instance.enable_node.call_args == call("my-node") - assert "Enable node failed" in str(result.value) + assert "Enable node failed" in str(result.value.args[0]) def test_state_enabled_when_not_offline(get_instance, instance): @@ -441,18 +416,18 @@ def test_state_enabled_when_not_offline(get_instance, instance): instance.get_node_config.return_value = "" instance.get_node_info.return_value = {"offline": False} - set_module_args({ + with set_module_args({ "name": "my-node", "state": "enabled", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.enable_node.called - assert result.value["enabled"] is False - assert result.value["changed"] is False + assert result.value.args[0]["enabled"] is False + assert result.value.args[0]["changed"] is False def test_state_disabled_when_not_offline(get_instance, instance): @@ -463,18 +438,18 @@ def test_state_disabled_when_not_offline(get_instance, instance): "offlineCauseReason": "", } - set_module_args({ + with set_module_args({ "name": "my-node", "state": "disabled", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.disable_node.call_args == call("my-node", "") - assert result.value["disabled"] is True - assert result.value["changed"] is True + assert result.value.args[0]["disabled"] is True + assert result.value.args[0]["changed"] is True def test_state_disabled_when_not_offline_redirect_auth_error_handled( @@ -494,18 +469,18 @@ def test_state_disabled_when_not_offline_redirect_auth_error_handled( ] instance.disable_node.side_effect = jenkins.JenkinsException - set_module_args({ + with set_module_args({ "name": "my-node", "state": "disabled", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.disable_node.call_args == call("my-node", "") - assert result.value["disabled"] is True - assert result.value["changed"] is True + assert result.value.args[0]["disabled"] is True + assert result.value.args[0]["changed"] is True def test_state_disabled_when_not_offline_other_error_raised(get_instance, instance): @@ -523,17 +498,17 @@ def test_state_disabled_when_not_offline_other_error_raised(get_instance, instan ] instance.disable_node.side_effect = jenkins.JenkinsException - set_module_args({ + with set_module_args({ "name": "my-node", "state": "disabled", - }) + }): - with raises(AnsibleFailJson) as result: - jenkins_node.main() + with raises(AnsibleFailJson) as result: + jenkins_node.main() assert instance.disable_node.call_args == call("my-node", "") - assert "Disable node failed" in str(result.value) + assert "Disable node failed" in str(result.value.args[0]) def test_state_disabled_when_not_offline_check_mode(get_instance, instance): @@ -544,19 +519,19 @@ def test_state_disabled_when_not_offline_check_mode(get_instance, instance): "offlineCauseReason": "", } - set_module_args({ + with set_module_args({ "name": "my-node", "state": "disabled", "_ansible_check_mode": True, - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.disable_node.called - assert result.value["disabled"] is True - assert result.value["changed"] is True + assert result.value.args[0]["disabled"] is True + assert result.value.args[0]["changed"] is True def test_state_disabled_when_offline(get_instance, instance): @@ -567,32 +542,32 @@ def test_state_disabled_when_offline(get_instance, instance): "offlineCauseReason": "", } - set_module_args({ + with set_module_args({ "name": "my-node", "state": "disabled", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.disable_node.called - assert result.value["disabled"] is False - assert result.value["changed"] is False + assert result.value.args[0]["disabled"] is False + assert result.value.args[0]["changed"] is False def test_configure_num_executors_when_not_configured(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - set_module_args({ + with set_module_args({ "name": "my-node", "state": "present", "num_executors": 3, - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.reconfig_node.call_args[0][0] == "my-node" assert_xml_equal(instance.reconfig_node.call_args[0][1], """ @@ -601,8 +576,8 @@ def test_configure_num_executors_when_not_configured(get_instance, instance): """) - assert result.value["configured"] is True - assert result.value["changed"] is True + assert result.value.args[0]["configured"] is True + assert result.value.args[0]["changed"] is True def test_configure_num_executors_when_not_equal(get_instance, instance): @@ -613,14 +588,14 @@ def test_configure_num_executors_when_not_equal(get_instance, instance): """ - set_module_args({ + with set_module_args({ "name": "my-node", "state": "present", "num_executors": 2, - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert_xml_equal(instance.reconfig_node.call_args[0][1], """ @@ -628,8 +603,8 @@ def test_configure_num_executors_when_not_equal(get_instance, instance): """) - assert result.value["configured"] is True - assert result.value["changed"] is True + assert result.value.args[0]["configured"] is True + assert result.value.args[0]["changed"] is True def test_configure_num_executors_when_equal(get_instance, instance): @@ -640,26 +615,26 @@ def test_configure_num_executors_when_equal(get_instance, instance): """ - set_module_args({ + with set_module_args({ "name": "my-node", "state": "present", "num_executors": 2, - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.reconfig_node.called - assert result.value["configured"] is False - assert result.value["changed"] is False + assert result.value.args[0]["configured"] is False + assert result.value.args[0]["changed"] is False def test_configure_labels_when_not_configured(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - set_module_args({ + with set_module_args({ "name": "my-node", "state": "present", "labels": [ @@ -667,10 +642,10 @@ def test_configure_labels_when_not_configured(get_instance, instance): "b", "c", ], - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.reconfig_node.call_args[0][0] == "my-node" assert_xml_equal(instance.reconfig_node.call_args[0][1], """ @@ -679,8 +654,8 @@ def test_configure_labels_when_not_configured(get_instance, instance): """) - assert result.value["configured"] is True - assert result.value["changed"] is True + assert result.value.args[0]["configured"] is True + assert result.value.args[0]["changed"] is True def test_configure_labels_when_not_equal(get_instance, instance): @@ -691,7 +666,7 @@ def test_configure_labels_when_not_equal(get_instance, instance): """ - set_module_args({ + with set_module_args({ "name": "my-node", "state": "present", "labels": [ @@ -699,10 +674,10 @@ def test_configure_labels_when_not_equal(get_instance, instance): "z", "c", ], - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.reconfig_node.call_args[0][0] == "my-node" assert_xml_equal(instance.reconfig_node.call_args[0][1], """ @@ -711,8 +686,8 @@ def test_configure_labels_when_not_equal(get_instance, instance): """) - assert result.value["configured"] is True - assert result.value["changed"] is True + assert result.value.args[0]["configured"] is True + assert result.value.args[0]["changed"] is True def test_configure_labels_when_equal(get_instance, instance): @@ -723,7 +698,7 @@ def test_configure_labels_when_equal(get_instance, instance): """ - set_module_args({ + with set_module_args({ "name": "my-node", "state": "present", "labels": [ @@ -731,45 +706,45 @@ def test_configure_labels_when_equal(get_instance, instance): "b", "c", ], - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.reconfig_node.called - assert result.value["configured"] is False - assert result.value["changed"] is False + assert result.value.args[0]["configured"] is False + assert result.value.args[0]["changed"] is False def test_configure_labels_fail_when_contains_space(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - set_module_args({ + with set_module_args({ "name": "my-node", "state": "present", "labels": [ "a error", ], - }) + }): - with raises(AnsibleFailJson): - jenkins_node.main() + with raises(AnsibleFailJson): + jenkins_node.main() assert not instance.reconfig_node.called @mark.parametrize(["state"], [param(state) for state in ["enabled", "present", "absent"]]) def test_raises_error_if_offline_message_when_state_not_disabled(get_instance, instance, state): - set_module_args({ + with set_module_args({ "name": "my-node", "state": state, "offline_message": "This is a message...", - }) + }): - with raises(AnsibleFailJson): - jenkins_node.main() + with raises(AnsibleFailJson): + jenkins_node.main() assert not instance.disable_node.called @@ -782,18 +757,18 @@ def test_set_offline_message_when_equal(get_instance, instance): "offlineCauseReason": "This is an old message...", } - set_module_args({ + with set_module_args({ "name": "my-node", "state": "disabled", "offline_message": "This is an old message...", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.disable_node.called - assert result.value["changed"] is False + assert result.value.args[0]["changed"] is False def test_set_offline_message_when_not_equal_not_offline(get_instance, instance): @@ -804,18 +779,18 @@ def test_set_offline_message_when_not_equal_not_offline(get_instance, instance): "offlineCauseReason": "This is an old message...", } - set_module_args({ + with set_module_args({ "name": "my-node", "state": "disabled", "offline_message": "This is a new message...", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert instance.disable_node.call_args == call("my-node", "This is a new message...") - assert result.value["changed"] is True + assert result.value.args[0]["changed"] is True # Not calling disable_node when already offline seems like a sensible thing to do. @@ -829,15 +804,15 @@ def test_set_offline_message_when_not_equal_offline(get_instance, instance): "offlineCauseReason": "This is an old message...", } - set_module_args({ + with set_module_args({ "name": "my-node", "state": "disabled", "offline_message": "This is a new message...", - }) + }): - with raises(AnsibleExitJson) as result: - jenkins_node.main() + with raises(AnsibleExitJson) as result: + jenkins_node.main() assert not instance.disable_node.called - assert result.value["changed"] is False + assert result.value.args[0]["changed"] is False diff --git a/tests/unit/plugins/modules/test_keycloak_authentication.py b/tests/unit/plugins/modules/test_keycloak_authentication.py index aaa1fa9b1b..89a44c919e 100644 --- a/tests/unit/plugins/modules/test_keycloak_authentication.py +++ b/tests/unit/plugins/modules/test_keycloak_authentication.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_authentication @@ -163,17 +163,16 @@ class TestKeycloakAuthentication(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, copy_auth_flow=return_value_copied, - get_executions_representation=return_value_executions_after) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, copy_auth_flow=return_value_copied, + get_executions_representation=return_value_executions_after) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) @@ -256,17 +255,16 @@ class TestKeycloakAuthentication(ModuleTestCase): ] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, - get_executions_representation=return_value_executions_after) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) @@ -328,17 +326,16 @@ class TestKeycloakAuthentication(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, - get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) @@ -419,17 +416,16 @@ class TestKeycloakAuthentication(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, - get_executions_representation=return_value_executions_after) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) @@ -472,16 +468,15 @@ class TestKeycloakAuthentication(ModuleTestCase): }] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) @@ -508,16 +503,15 @@ class TestKeycloakAuthentication(ModuleTestCase): return_value_auth_flow_before = [{}] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) @@ -596,17 +590,16 @@ class TestKeycloakAuthentication(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, - get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) diff --git a/tests/unit/plugins/modules/test_keycloak_authentication_required_actions.py b/tests/unit/plugins/modules/test_keycloak_authentication_required_actions.py index 2adc3a896b..86fb235357 100644 --- a/tests/unit/plugins/modules/test_keycloak_authentication_required_actions.py +++ b/tests/unit/plugins/modules/test_keycloak_authentication_required_actions.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_authentication_required_actions @@ -235,20 +235,19 @@ class TestKeycloakAuthentication(ModuleTestCase): changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api( - get_required_actions=return_value_required_actions, - ) as ( - mock_get_required_actions, - mock_register_required_action, - mock_update_required_action, - mock_delete_required_action, - ): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api( + get_required_actions=return_value_required_actions, + ) as ( + mock_get_required_actions, + mock_register_required_action, + mock_update_required_action, + mock_delete_required_action, + ): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_required_actions.mock_calls), 1) @@ -386,20 +385,19 @@ class TestKeycloakAuthentication(ModuleTestCase): changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api( - get_required_actions=return_value_required_actions, - ) as ( - mock_get_required_actions, - mock_register_required_action, - mock_update_required_action, - mock_delete_required_action, - ): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api( + get_required_actions=return_value_required_actions, + ) as ( + mock_get_required_actions, + mock_register_required_action, + mock_update_required_action, + mock_delete_required_action, + ): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_required_actions.mock_calls), 1) @@ -537,20 +535,19 @@ class TestKeycloakAuthentication(ModuleTestCase): changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api( - get_required_actions=return_value_required_actions, - ) as ( - mock_get_required_actions, - mock_register_required_action, - mock_update_required_action, - mock_delete_required_action, - ): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api( + get_required_actions=return_value_required_actions, + ) as ( + mock_get_required_actions, + mock_register_required_action, + mock_update_required_action, + mock_delete_required_action, + ): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_required_actions.mock_calls), 1) @@ -676,20 +673,19 @@ class TestKeycloakAuthentication(ModuleTestCase): changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api( - get_required_actions=return_value_required_actions, - ) as ( - mock_get_required_actions, - mock_register_required_action, - mock_update_required_action, - mock_delete_required_action, - ): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api( + get_required_actions=return_value_required_actions, + ) as ( + mock_get_required_actions, + mock_register_required_action, + mock_update_required_action, + mock_delete_required_action, + ): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_required_actions.mock_calls), 1) @@ -806,20 +802,19 @@ class TestKeycloakAuthentication(ModuleTestCase): changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api( - get_required_actions=return_value_required_actions, - ) as ( - mock_get_required_actions, - mock_register_required_action, - mock_update_required_action, - mock_delete_required_action, - ): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api( + get_required_actions=return_value_required_actions, + ) as ( + mock_get_required_actions, + mock_register_required_action, + mock_update_required_action, + mock_delete_required_action, + ): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(len(mock_get_required_actions.mock_calls), 1) diff --git a/tests/unit/plugins/modules/test_keycloak_client.py b/tests/unit/plugins/modules/test_keycloak_client.py index b44013af13..4f2572780b 100644 --- a/tests/unit/plugins/modules/test_keycloak_client.py +++ b/tests/unit/plugins/modules/test_keycloak_client.py @@ -10,9 +10,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_client @@ -126,15 +126,14 @@ class TestKeycloakRealm(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_client_by_clientid=return_value_get_client_by_clientid) \ - as (mock_get_client_by_clientid, mock_get_client_by_id, mock_create_client, mock_update_client, mock_delete_client): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_client_by_clientid=return_value_get_client_by_clientid) \ + as (mock_get_client_by_clientid, mock_get_client_by_id, mock_create_client, mock_update_client, mock_delete_client): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(mock_get_client_by_clientid.call_count, 2) self.assertEqual(mock_get_client_by_id.call_count, 0) diff --git a/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py b/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py index 359e6304ef..b5679d22a0 100644 --- a/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py +++ b/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_client_rolemapping @@ -188,20 +188,19 @@ class TestKeycloakRealm(ModuleTestCase): changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, - get_client_role_id_by_name=return_value_get_client_role_id_by_name, - get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, - get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ - as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, - mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, - mock_delete_group_rolemapping): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_id_by_name=return_value_get_client_role_id_by_name, + get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, + get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, + mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(mock_get_group_by_name.call_count, 1) self.assertEqual(mock_get_client_id.call_count, 1) @@ -272,20 +271,19 @@ class TestKeycloakRealm(ModuleTestCase): changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, - get_client_role_id_by_name=return_value_get_client_role_id_by_name, - get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, - get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ - as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, - mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, - mock_delete_group_rolemapping): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_id_by_name=return_value_get_client_role_id_by_name, + get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, + get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, + mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(mock_get_group_by_name.call_count, 1) self.assertEqual(mock_get_client_id.call_count, 1) @@ -374,20 +372,19 @@ class TestKeycloakRealm(ModuleTestCase): changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, - get_client_role_id_by_name=return_value_get_client_role_id_by_name, - get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, - get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ - as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, - mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, - mock_delete_group_rolemapping): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_id_by_name=return_value_get_client_role_id_by_name, + get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, + get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, + mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(mock_get_group_by_name.call_count, 0) self.assertEqual(mock_get_client_id.call_count, 0) @@ -461,20 +458,19 @@ class TestKeycloakRealm(ModuleTestCase): changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, - get_client_role_id_by_name=return_value_get_client_role_id_by_name, - get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, - get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ - as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, - mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, - mock_delete_group_rolemapping): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_id_by_name=return_value_get_client_role_id_by_name, + get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, + get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, + mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(mock_get_group_by_name.call_count, 1) self.assertEqual(mock_get_client_id.call_count, 1) @@ -547,20 +543,19 @@ class TestKeycloakRealm(ModuleTestCase): changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, - get_client_role_id_by_name=return_value_get_client_role_id_by_name, - get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, - get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ - as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, - mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, - mock_delete_group_rolemapping): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_id_by_name=return_value_get_client_role_id_by_name, + get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, + get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, + mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(mock_get_group_by_name.call_count, 1) self.assertEqual(mock_get_client_id.call_count, 1) diff --git a/tests/unit/plugins/modules/test_keycloak_clientscope.py b/tests/unit/plugins/modules/test_keycloak_clientscope.py index ea015b05bf..261315c7fa 100644 --- a/tests/unit/plugins/modules/test_keycloak_clientscope.py +++ b/tests/unit/plugins/modules/test_keycloak_clientscope.py @@ -10,9 +10,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_clientscope @@ -142,18 +142,17 @@ class TestKeycloakAuthentication(ModuleTestCase): changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(mock_get_clientscope_by_name.call_count, 2) @@ -188,18 +187,17 @@ class TestKeycloakAuthentication(ModuleTestCase): changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(mock_get_clientscope_by_name.call_count, 1) @@ -234,18 +232,17 @@ class TestKeycloakAuthentication(ModuleTestCase): changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(mock_get_clientscope_by_name.call_count, 1) @@ -276,18 +273,17 @@ class TestKeycloakAuthentication(ModuleTestCase): changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(mock_get_clientscope_by_name.call_count, 1) @@ -405,18 +401,17 @@ class TestKeycloakAuthentication(ModuleTestCase): changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(mock_get_clientscope_by_name.call_count, 2) @@ -582,19 +577,18 @@ class TestKeycloakAuthentication(ModuleTestCase): changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name, - get_clientscope_by_clientscopeid=return_value_get_clientscope_by_clientscopeid) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name, + get_clientscope_by_clientscopeid=return_value_get_clientscope_by_clientscopeid) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() # Verify number of call on each mock self.assertEqual(mock_get_clientscope_by_name.call_count, 1) diff --git a/tests/unit/plugins/modules/test_keycloak_component.py b/tests/unit/plugins/modules/test_keycloak_component.py index e491bf431c..22436838fb 100644 --- a/tests/unit/plugins/modules/test_keycloak_component.py +++ b/tests/unit/plugins/modules/test_keycloak_component.py @@ -13,9 +13,9 @@ from itertools import count from ansible.module_utils.six import StringIO from ansible_collections.community.general.plugins.modules import keycloak_realm_key -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_component @@ -126,15 +126,14 @@ class TestKeycloakComponent(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ - as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ + as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_create_component.mock_calls), 1) @@ -199,16 +198,15 @@ class TestKeycloakComponent(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, - update_component=return_value_component_update) \ - as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, + update_component=return_value_component_update) \ + as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_create_component.mock_calls), 0) @@ -241,15 +239,14 @@ class TestKeycloakComponent(ModuleTestCase): ] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get) \ - as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get) \ + as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_create_component.mock_calls), 0) @@ -304,15 +301,14 @@ class TestKeycloakComponent(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ - as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ + as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_create_component.mock_calls), 0) diff --git a/tests/unit/plugins/modules/test_keycloak_identity_provider.py b/tests/unit/plugins/modules/test_keycloak_identity_provider.py index a893a130a5..2f6cf738db 100644 --- a/tests/unit/plugins/modules/test_keycloak_identity_provider.py +++ b/tests/unit/plugins/modules/test_keycloak_identity_provider.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_identity_provider @@ -238,19 +238,18 @@ class TestKeycloakIdentityProvider(ModuleTestCase): return_value_mapper_created = [None, None] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - create_identity_provider=return_value_idp_created, create_identity_provider_mapper=return_value_mapper_created, - get_realm_by_id=return_value_realm_get) \ - as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, - mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper, mock_get_realm_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + create_identity_provider=return_value_idp_created, create_identity_provider_mapper=return_value_mapper_created, + get_realm_by_id=return_value_realm_get) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_identity_provider.mock_calls), 2) self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1) @@ -547,19 +546,18 @@ class TestKeycloakIdentityProvider(ModuleTestCase): return_value_mapper_created = [None] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, - create_identity_provider_mapper=return_value_mapper_created, get_realm_by_id=return_value_realm_get) \ - as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, - mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper, mock_get_realm_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, + create_identity_provider_mapper=return_value_mapper_created, get_realm_by_id=return_value_realm_get) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_identity_provider.mock_calls), 2) self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 5) @@ -697,19 +695,18 @@ class TestKeycloakIdentityProvider(ModuleTestCase): return_value_mapper_created = [None] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, - create_identity_provider_mapper=return_value_mapper_created, get_realm_by_id=return_value_realm_get) \ - as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, - mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper, mock_get_realm_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, + create_identity_provider_mapper=return_value_mapper_created, get_realm_by_id=return_value_realm_get) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_identity_provider.mock_calls), 1) self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 2) @@ -738,17 +735,16 @@ class TestKeycloakIdentityProvider(ModuleTestCase): return_value_idp_get = [None] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_identity_provider=return_value_idp_get) \ - as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, - mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper, mock_get_realm_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_identity_provider.mock_calls), 1) self.assertEqual(len(mock_delete_identity_provider.mock_calls), 0) @@ -844,18 +840,17 @@ class TestKeycloakIdentityProvider(ModuleTestCase): return_value_idp_deleted = [None] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - delete_identity_provider=return_value_idp_deleted, get_realm_by_id=return_value_realm_get) \ - as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, - mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper, mock_get_realm_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + delete_identity_provider=return_value_idp_deleted, get_realm_by_id=return_value_realm_get) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_identity_provider.mock_calls), 1) self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1) diff --git a/tests/unit/plugins/modules/test_keycloak_realm.py b/tests/unit/plugins/modules/test_keycloak_realm.py index 72993cbdfe..e1d1759c21 100644 --- a/tests/unit/plugins/modules/test_keycloak_realm.py +++ b/tests/unit/plugins/modules/test_keycloak_realm.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_realm @@ -113,15 +113,14 @@ class TestKeycloakRealm(ModuleTestCase): }] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_by_id=return_value_absent, create_realm=return_value_created) \ - as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_by_id=return_value_absent, create_realm=return_value_created) \ + as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_by_id.mock_calls), 2) self.assertEqual(len(mock_create_realm.mock_calls), 1) @@ -164,15 +163,14 @@ class TestKeycloakRealm(ModuleTestCase): }] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) \ - as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) \ + as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_by_id.mock_calls), 2) self.assertEqual(len(mock_create_realm.mock_calls), 0) @@ -215,15 +213,14 @@ class TestKeycloakRealm(ModuleTestCase): }] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) \ - as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) \ + as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_by_id.mock_calls), 2) self.assertEqual(len(mock_create_realm.mock_calls), 0) @@ -251,15 +248,14 @@ class TestKeycloakRealm(ModuleTestCase): return_value_deleted = [None] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) \ - as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) \ + as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1) self.assertEqual(len(mock_delete_realm.mock_calls), 0) @@ -290,15 +286,14 @@ class TestKeycloakRealm(ModuleTestCase): return_value_deleted = [None] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) \ - as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) \ + as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1) self.assertEqual(len(mock_delete_realm.mock_calls), 1) diff --git a/tests/unit/plugins/modules/test_keycloak_realm_info.py b/tests/unit/plugins/modules/test_keycloak_realm_info.py index 41095a8784..7e9469b040 100644 --- a/tests/unit/plugins/modules/test_keycloak_realm_info.py +++ b/tests/unit/plugins/modules/test_keycloak_realm_info.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_realm_info @@ -105,15 +105,14 @@ class TestKeycloakRealmRole(ModuleTestCase): } ] - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_info_by_id=return_value) \ - as (mock_get_realm_info_by_id): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_info_by_id=return_value) \ + as (mock_get_realm_info_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_info_by_id.mock_calls), 1) diff --git a/tests/unit/plugins/modules/test_keycloak_realm_keys.py b/tests/unit/plugins/modules/test_keycloak_realm_keys.py index 628fa54f31..f6b7ae4432 100644 --- a/tests/unit/plugins/modules/test_keycloak_realm_keys.py +++ b/tests/unit/plugins/modules/test_keycloak_realm_keys.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_realm_key @@ -139,15 +139,14 @@ class TestKeycloakRealmKeys(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_get_component.mock_calls), 0) @@ -232,16 +231,15 @@ class TestKeycloakRealmKeys(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, - update_component=return_value_component_update) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, + update_component=return_value_component_update) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_get_component.mock_calls), 0) @@ -277,15 +275,14 @@ class TestKeycloakRealmKeys(ModuleTestCase): ] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_get_component.mock_calls), 0) @@ -356,15 +353,14 @@ class TestKeycloakRealmKeys(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_get_component.mock_calls), 0) diff --git a/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py b/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py index 14d36f6aab..201f665d12 100644 --- a/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py +++ b/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py @@ -14,9 +14,9 @@ from itertools import count from ansible.module_utils.six import StringIO from ansible_collections.community.general.plugins.modules import \ keycloak_realm_keys_metadata_info -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( AnsibleExitJson, ModuleTestCase, set_module_args) @@ -158,23 +158,22 @@ class TestKeycloakRealmRole(ModuleTestCase): } ] - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(side_effect=return_value) as ( - mock_get_realm_keys_metadata_by_id - ): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(side_effect=return_value) as ( + mock_get_realm_keys_metadata_by_id + ): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() - result = exec_info.exception.args[0] - self.assertIs(result["changed"], False) - self.assertEqual( - result["msg"], "Get realm keys metadata successful for ID my-realm" - ) - self.assertEqual(result["keys_metadata"], return_value[0]) + result = exec_info.exception.args[0] + self.assertIs(result["changed"], False) + self.assertEqual( + result["msg"], "Get realm keys metadata successful for ID my-realm" + ) + self.assertEqual(result["keys_metadata"], return_value[0]) self.assertEqual(len(mock_get_realm_keys_metadata_by_id.mock_calls), 1) diff --git a/tests/unit/plugins/modules/test_keycloak_role.py b/tests/unit/plugins/modules/test_keycloak_role.py index cc2f6e716b..eebb6ca09c 100644 --- a/tests/unit/plugins/modules/test_keycloak_role.py +++ b/tests/unit/plugins/modules/test_keycloak_role.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_role @@ -129,17 +129,16 @@ class TestKeycloakRealmRole(ModuleTestCase): return_value_created = [None] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_absent, create_realm_role=return_value_created) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_absent, create_realm_role=return_value_created) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, + mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, + mock_get_client_by_client_id, mock_get_role_composites): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_role.mock_calls), 2) self.assertEqual(len(mock_create_realm_role.mock_calls), 1) @@ -185,17 +184,16 @@ class TestKeycloakRealmRole(ModuleTestCase): return_value_updated = [None] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, + mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, + mock_get_client_by_client_id, mock_get_role_composites): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_role.mock_calls), 2) self.assertEqual(len(mock_create_realm_role.mock_calls), 0) @@ -241,17 +239,16 @@ class TestKeycloakRealmRole(ModuleTestCase): return_value_updated = [None] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, + mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, + mock_get_client_by_client_id, mock_get_role_composites): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_role.mock_calls), 1) self.assertEqual(len(mock_create_realm_role.mock_calls), 0) @@ -371,19 +368,18 @@ class TestKeycloakRealmRole(ModuleTestCase): changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated, - get_client_by_id=return_get_client_by_client_id, - get_role_composites=return_get_role_composites) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated, + get_client_by_id=return_get_client_by_client_id, + get_role_composites=return_get_role_composites) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, + mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, + mock_get_client_by_client_id, mock_get_role_composites): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_role.mock_calls), 1) self.assertEqual(len(mock_create_realm_role.mock_calls), 0) @@ -412,17 +408,16 @@ class TestKeycloakRealmRole(ModuleTestCase): return_value_deleted = [None] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, + mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, + mock_get_client_by_client_id, mock_get_role_composites): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_role.mock_calls), 1) self.assertEqual(len(mock_delete_realm_role.mock_calls), 0) @@ -458,17 +453,16 @@ class TestKeycloakRealmRole(ModuleTestCase): return_value_deleted = [None] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, + mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, + mock_get_client_by_client_id, mock_get_role_composites): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_role.mock_calls), 1) self.assertEqual(len(mock_delete_realm_role.mock_calls), 1) @@ -531,17 +525,16 @@ class TestKeycloakClientRole(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_client_role=return_get_client_role) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_client_role=return_get_client_role) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, + mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, + mock_get_client_by_client_id, mock_get_role_composites): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_role.mock_calls), 0) self.assertEqual(len(mock_create_realm_role.mock_calls), 0) @@ -653,18 +646,17 @@ class TestKeycloakClientRole(ModuleTestCase): ] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_client_role=return_get_client_role, get_client_by_id=return_get_client_by_client_id, - get_role_composites=return_get_role_composites) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_client_role=return_get_client_role, get_client_by_id=return_get_client_by_client_id, + get_role_composites=return_get_role_composites) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, + mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, + mock_get_client_by_client_id, mock_get_role_composites): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_realm_role.mock_calls), 0) self.assertEqual(len(mock_create_realm_role.mock_calls), 0) diff --git a/tests/unit/plugins/modules/test_keycloak_user.py b/tests/unit/plugins/modules/test_keycloak_user.py index 26bc33d826..729bb89e1f 100644 --- a/tests/unit/plugins/modules/test_keycloak_user.py +++ b/tests/unit/plugins/modules/test_keycloak_user.py @@ -10,9 +10,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_user @@ -114,25 +114,24 @@ class TestKeycloakUser(ModuleTestCase): return_update_user = None changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, - create_user=return_create_user, - update_user_groups_membership=return_value_update_user_groups_membership, - get_user_groups=return_get_user_groups, - update_user=return_update_user, - delete_user=return_delete_user) \ - as (mock_get_user_by_username, - mock_create_user, - mock_update_user_groups_membership, - mock_get_user_groups, - mock_delete_user, - mock_update_user): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, + create_user=return_create_user, + update_user_groups_membership=return_value_update_user_groups_membership, + get_user_groups=return_get_user_groups, + update_user=return_update_user, + delete_user=return_delete_user) \ + as (mock_get_user_by_username, + mock_create_user, + mock_update_user_groups_membership, + mock_get_user_groups, + mock_delete_user, + mock_update_user): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(mock_get_user_by_username.call_count, 1) self.assertEqual(mock_create_user.call_count, 1) @@ -176,25 +175,24 @@ class TestKeycloakUser(ModuleTestCase): return_update_user = None changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, - create_user=return_create_user, - update_user_groups_membership=return_value_update_user_groups_membership, - get_user_groups=return_get_user_groups, - update_user=return_update_user, - delete_user=return_delete_user) \ - as (mock_get_user_by_username, - mock_create_user, - mock_update_user_groups_membership, - mock_get_user_groups, - mock_delete_user, - mock_update_user): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, + create_user=return_create_user, + update_user_groups_membership=return_value_update_user_groups_membership, + get_user_groups=return_get_user_groups, + update_user=return_update_user, + delete_user=return_delete_user) \ + as (mock_get_user_by_username, + mock_create_user, + mock_update_user_groups_membership, + mock_get_user_groups, + mock_delete_user, + mock_update_user): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(mock_get_user_by_username.call_count, 1) self.assertEqual(mock_create_user.call_count, 0) @@ -257,25 +255,24 @@ class TestKeycloakUser(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, - create_user=return_create_user, - update_user_groups_membership=return_value_update_user_groups_membership, - get_user_groups=return_get_user_groups, - update_user=return_update_user, - delete_user=return_delete_user) \ - as (mock_get_user_by_username, - mock_create_user, - mock_update_user_groups_membership, - mock_get_user_groups, - mock_delete_user, - mock_update_user): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, + create_user=return_create_user, + update_user_groups_membership=return_value_update_user_groups_membership, + get_user_groups=return_get_user_groups, + update_user=return_update_user, + delete_user=return_delete_user) \ + as (mock_get_user_by_username, + mock_create_user, + mock_update_user_groups_membership, + mock_get_user_groups, + mock_delete_user, + mock_update_user): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(mock_get_user_by_username.call_count, 1) self.assertEqual(mock_create_user.call_count, 0) @@ -319,25 +316,24 @@ class TestKeycloakUser(ModuleTestCase): return_update_user = None changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, - create_user=return_create_user, - update_user_groups_membership=return_value_update_user_groups_membership, - get_user_groups=return_get_user_groups, - update_user=return_update_user, - delete_user=return_delete_user) \ - as (mock_get_user_by_username, - mock_create_user, - mock_update_user_groups_membership, - mock_get_user_groups, - mock_delete_user, - mock_update_user): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, + create_user=return_create_user, + update_user_groups_membership=return_value_update_user_groups_membership, + get_user_groups=return_get_user_groups, + update_user=return_update_user, + delete_user=return_delete_user) \ + as (mock_get_user_by_username, + mock_create_user, + mock_update_user_groups_membership, + mock_get_user_groups, + mock_delete_user, + mock_update_user): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(mock_get_user_by_username.call_count, 1) self.assertEqual(mock_create_user.call_count, 0) diff --git a/tests/unit/plugins/modules/test_keycloak_user_federation.py b/tests/unit/plugins/modules/test_keycloak_user_federation.py index 81fd65e108..7ffddb7268 100644 --- a/tests/unit/plugins/modules/test_keycloak_user_federation.py +++ b/tests/unit/plugins/modules/test_keycloak_user_federation.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_user_federation @@ -150,15 +150,14 @@ class TestKeycloakUserFederation(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 3) self.assertEqual(len(mock_get_component.mock_calls), 0) @@ -272,16 +271,15 @@ class TestKeycloakUserFederation(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, get_component=return_value_component_get, - update_component=return_value_component_update) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, get_component=return_value_component_get, + update_component=return_value_component_update) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 3) self.assertEqual(len(mock_get_component.mock_calls), 1) @@ -494,15 +492,14 @@ class TestKeycloakUserFederation(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 3) self.assertEqual(len(mock_get_component.mock_calls), 0) @@ -530,15 +527,14 @@ class TestKeycloakUserFederation(ModuleTestCase): ] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_get_component.mock_calls), 0) @@ -604,15 +600,14 @@ class TestKeycloakUserFederation(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ + as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 2) self.assertEqual(len(mock_get_component.mock_calls), 0) diff --git a/tests/unit/plugins/modules/test_keycloak_userprofile.py b/tests/unit/plugins/modules/test_keycloak_userprofile.py index 3ae01bbb8b..b414949585 100644 --- a/tests/unit/plugins/modules/test_keycloak_userprofile.py +++ b/tests/unit/plugins/modules/test_keycloak_userprofile.py @@ -9,9 +9,9 @@ __metaclass__ = type from contextlib import contextmanager -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules import keycloak_userprofile @@ -350,15 +350,14 @@ class TestKeycloakUserprofile(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_get_components_get, create_component=return_value_component_create) as ( - mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get, create_component=return_value_component_create) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_get_component.mock_calls), 0) @@ -639,16 +638,15 @@ class TestKeycloakUserprofile(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_get_components_get, - update_component=return_value_component_update) as ( - mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get, + update_component=return_value_component_update) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_get_component.mock_calls), 0) @@ -676,15 +674,14 @@ class TestKeycloakUserprofile(ModuleTestCase): ] changed = False - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_get_components_get) as ( - mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_get_component.mock_calls), 0) @@ -844,15 +841,14 @@ class TestKeycloakUserprofile(ModuleTestCase): ] changed = True - set_module_args(module_args) - # Run the module - with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_get_components_get, delete_component=return_value_component_delete) as ( - mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): - with self.assertRaises(AnsibleExitJson) as exec_info: - self.module.main() + with set_module_args(module_args): + with mock_good_connection(): + with patch_keycloak_api(get_components=return_value_get_components_get, delete_component=return_value_component_delete) as ( + mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() self.assertEqual(len(mock_get_components.mock_calls), 1) self.assertEqual(len(mock_get_component.mock_calls), 0) diff --git a/tests/unit/plugins/modules/test_krb_ticket.py b/tests/unit/plugins/modules/test_krb_ticket.py index 8c17e2e43b..99c97a4f03 100644 --- a/tests/unit/plugins/modules/test_krb_ticket.py +++ b/tests/unit/plugins/modules/test_krb_ticket.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import krb_ticket -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(krb_ticket, __name__) +UTHelper.from_module(krb_ticket, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_krb_ticket.yaml b/tests/unit/plugins/modules/test_krb_ticket.yaml index 9882bf137d..d1b6d67f57 100644 --- a/tests/unit/plugins/modules/test_krb_ticket.yaml +++ b/tests/unit/plugins/modules/test_krb_ticket.yaml @@ -4,106 +4,111 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: test_kinit_default - input: - state: present - password: cool_password - output: - changed: true - mocks: - run_command: - - command: [/testbin/klist] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} - rc: 1 - out: "" - err: "" - - command: [/testbin/kinit] - environ: &env-data {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true, data: cool_password} - rc: 0 - out: "" - err: "" -- id: test_kinit_principal - input: - state: present - password: cool_password - principal: admin@IPA.TEST - output: - changed: true - mocks: - run_command: - - command: [/testbin/klist, -l] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/kinit, admin@IPA.TEST] - environ: *env-data - rc: 0 - out: "" - err: "" -- id: test_kdestroy_default - input: - state: absent - output: - changed: true - mocks: - run_command: - - command: [/testbin/klist] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/kdestroy] - environ: &env-norc {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} - rc: 0 - out: "" - err: "" -- id: test_kdestroy_principal - input: - state: absent - principal: admin@IPA.TEST - output: - changed: true - mocks: - run_command: - - command: [/testbin/klist, -l] - environ: *env-def - rc: 0 - out: "admin@IPA.TEST" - err: "" - - command: [/testbin/kdestroy, -p, admin@IPA.TEST] - environ: *env-norc - rc: 0 - out: "" - err: "" -- id: test_kdestroy_cache_name - input: - state: absent - cache_name: KEYRING:persistent:0:0 - output: - changed: true - mocks: - run_command: - - command: [/testbin/klist, -l] - environ: *env-def - rc: 0 - out: "KEYRING:persistent:0:0" - err: "" - - command: [/testbin/kdestroy, -c, KEYRING:persistent:0:0] - environ: *env-norc - rc: 0 - out: "" - err: "" -- id: test_kdestroy_all - input: - state: absent - kdestroy_all: true - output: - changed: true - mocks: - run_command: - - command: [/testbin/kdestroy, -A] - environ: *env-norc - rc: 0 - out: "" - err: "" +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} + environ_data: &env-data {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true, data: cool_password} + environ_norc: &env-norc {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} +test_cases: + - id: test_kinit_default + input: + state: present + password: cool_password + output: + changed: true + mocks: + run_command: + - command: [/testbin/klist] + environ: *env-def + rc: 1 + out: '' + err: '' + - command: [/testbin/kinit] + environ: *env-data + rc: 0 + out: '' + err: '' + - id: test_kinit_principal + input: + state: present + password: cool_password + principal: admin@IPA.TEST + output: + changed: true + mocks: + run_command: + - command: [/testbin/klist, -l] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/kinit, admin@IPA.TEST] + environ: *env-data + rc: 0 + out: '' + err: '' + - id: test_kdestroy_default + input: + state: absent + output: + changed: true + mocks: + run_command: + - command: [/testbin/klist] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/kdestroy] + environ: *env-norc + rc: 0 + out: '' + err: '' + - id: test_kdestroy_principal + input: + state: absent + principal: admin@IPA.TEST + output: + changed: true + mocks: + run_command: + - command: [/testbin/klist, -l] + environ: *env-def + rc: 0 + out: admin@IPA.TEST + err: '' + - command: [/testbin/kdestroy, -p, admin@IPA.TEST] + environ: *env-norc + rc: 0 + out: '' + err: '' + - id: test_kdestroy_cache_name + input: + state: absent + cache_name: KEYRING:persistent:0:0 + output: + changed: true + mocks: + run_command: + - command: [/testbin/klist, -l] + environ: *env-def + rc: 0 + out: KEYRING:persistent:0:0 + err: '' + - command: [/testbin/kdestroy, -c, KEYRING:persistent:0:0] + environ: *env-norc + rc: 0 + out: '' + err: '' + - id: test_kdestroy_all + input: + state: absent + kdestroy_all: true + output: + changed: true + mocks: + run_command: + - command: [/testbin/kdestroy, -A] + environ: *env-norc + rc: 0 + out: '' + err: '' diff --git a/tests/unit/plugins/modules/test_linode.py b/tests/unit/plugins/modules/test_linode.py index 9e7b158d8e..e54c656816 100644 --- a/tests/unit/plugins/modules/test_linode.py +++ b/tests/unit/plugins/modules/test_linode.py @@ -8,7 +8,7 @@ __metaclass__ = type import pytest from ansible_collections.community.general.plugins.modules import linode -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args from .linode_conftest import api_key, auth # noqa: F401, pylint: disable=unused-import @@ -18,5 +18,5 @@ if not linode.HAS_LINODE: def test_name_is_a_required_parameter(api_key, auth): with pytest.raises(SystemExit): - set_module_args({}) - linode.main() + with set_module_args({}): + linode.main() diff --git a/tests/unit/plugins/modules/test_linode_v4.py b/tests/unit/plugins/modules/test_linode_v4.py index 915a82f087..47e77a52e8 100644 --- a/tests/unit/plugins/modules/test_linode_v4.py +++ b/tests/unit/plugins/modules/test_linode_v4.py @@ -7,31 +7,26 @@ __metaclass__ = type import json import os -import sys import pytest linode_apiv4 = pytest.importorskip('linode_api4') -mandatory_py_version = pytest.mark.skipif( - sys.version_info < (2, 7), - reason='The linode_api4 dependency requires python2.7 or higher' -) from linode_api4.errors import ApiError as LinodeApiError from linode_api4 import LinodeClient from ansible_collections.community.general.plugins.modules import linode_v4 from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args -from ansible_collections.community.general.tests.unit.compat import mock +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat import mock from .linode_conftest import access_token, no_access_token_in_env, default_args, mock_linode # noqa: F401, pylint: disable=unused-import def test_mandatory_state_is_validated(capfd): with pytest.raises(SystemExit): - set_module_args({'label': 'foo'}) - linode_v4.initialise_module() + with set_module_args({'label': 'foo'}): + linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) @@ -42,8 +37,8 @@ def test_mandatory_state_is_validated(capfd): def test_mandatory_label_is_validated(capfd): with pytest.raises(SystemExit): - set_module_args({'state': 'present'}) - linode_v4.initialise_module() + with set_module_args({'state': 'present'}): + linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) @@ -56,8 +51,8 @@ def test_mandatory_access_token_is_validated(default_args, no_access_token_in_env, capfd): with pytest.raises(SystemExit): - set_module_args(default_args) - linode_v4.initialise_module() + with set_module_args(default_args): + linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) @@ -72,12 +67,12 @@ def test_mandatory_access_token_is_validated(default_args, def test_mandatory_access_token_passed_in_env(default_args, access_token): - set_module_args(default_args) + with set_module_args(default_args): - try: - module = linode_v4.initialise_module() - except SystemExit: - pytest.fail("'access_token' is passed in environment") + try: + module = linode_v4.initialise_module() + except SystemExit: + pytest.fail("'access_token' is passed in environment") now_set_token = module.params['access_token'] assert now_set_token == os.environ['LINODE_ACCESS_TOKEN'] @@ -86,26 +81,26 @@ def test_mandatory_access_token_passed_in_env(default_args, def test_mandatory_access_token_passed_in_as_parameter(default_args, no_access_token_in_env): default_args.update({'access_token': 'foo'}) - set_module_args(default_args) + with set_module_args(default_args): - try: - module = linode_v4.initialise_module() - except SystemExit: - pytest.fail("'access_token' is passed in as parameter") + try: + module = linode_v4.initialise_module() + except SystemExit: + pytest.fail("'access_token' is passed in as parameter") assert module.params['access_token'] == 'foo' def test_instance_by_label_cannot_authenticate(capfd, access_token, default_args): - set_module_args(default_args) - module = linode_v4.initialise_module() - client = LinodeClient(module.params['access_token']) + with set_module_args(default_args): + module = linode_v4.initialise_module() + client = LinodeClient(module.params['access_token']) - target = 'linode_api4.linode_client.LinodeGroup.instances' - with mock.patch(target, side_effect=LinodeApiError('foo')): - with pytest.raises(SystemExit): - linode_v4.maybe_instance_from_label(module, client) + target = 'linode_api4.linode_client.LinodeGroup.instances' + with mock.patch(target, side_effect=LinodeApiError('foo')): + with pytest.raises(SystemExit): + linode_v4.maybe_instance_from_label(module, client) out, err = capfd.readouterr() results = json.loads(out) @@ -116,23 +111,23 @@ def test_instance_by_label_cannot_authenticate(capfd, access_token, def test_no_instances_found_with_label_gives_none(default_args, access_token): - set_module_args(default_args) - module = linode_v4.initialise_module() - client = LinodeClient(module.params['access_token']) + with set_module_args(default_args): + module = linode_v4.initialise_module() + client = LinodeClient(module.params['access_token']) - target = 'linode_api4.linode_client.LinodeGroup.instances' - with mock.patch(target, return_value=[]): - result = linode_v4.maybe_instance_from_label(module, client) + target = 'linode_api4.linode_client.LinodeGroup.instances' + with mock.patch(target, return_value=[]): + result = linode_v4.maybe_instance_from_label(module, client) assert result is None def test_optional_region_is_validated(default_args, capfd, access_token): default_args.update({'type': 'foo', 'image': 'bar'}) - set_module_args(default_args) + with set_module_args(default_args): - with pytest.raises(SystemExit): - linode_v4.initialise_module() + with pytest.raises(SystemExit): + linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) @@ -147,10 +142,10 @@ def test_optional_region_is_validated(default_args, capfd, access_token): def test_optional_type_is_validated(default_args, capfd, access_token): default_args.update({'region': 'foo', 'image': 'bar'}) - set_module_args(default_args) + with set_module_args(default_args): - with pytest.raises(SystemExit): - linode_v4.initialise_module() + with pytest.raises(SystemExit): + linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) @@ -165,10 +160,10 @@ def test_optional_type_is_validated(default_args, capfd, access_token): def test_optional_image_is_validated(default_args, capfd, access_token): default_args.update({'type': 'foo', 'region': 'bar'}) - set_module_args(default_args) + with set_module_args(default_args): - with pytest.raises(SystemExit): - linode_v4.initialise_module() + with pytest.raises(SystemExit): + linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) @@ -184,9 +179,9 @@ def test_optional_image_is_validated(default_args, capfd, access_token): @pytest.mark.parametrize('value', [True, False]) def test_private_ip_valid_values(default_args, access_token, value): default_args.update({'private_ip': value}) - set_module_args(default_args) + with set_module_args(default_args): - module = linode_v4.initialise_module() + module = linode_v4.initialise_module() assert module.params['private_ip'] is value @@ -194,10 +189,10 @@ def test_private_ip_valid_values(default_args, access_token, value): @pytest.mark.parametrize('value', ['not-a-bool', 42]) def test_private_ip_invalid_values(default_args, capfd, access_token, value): default_args.update({'private_ip': value}) - set_module_args(default_args) + with set_module_args(default_args): - with pytest.raises(SystemExit): - linode_v4.initialise_module() + with pytest.raises(SystemExit): + linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) @@ -208,23 +203,23 @@ def test_private_ip_invalid_values(default_args, capfd, access_token, value): def test_private_ip_default_value(default_args, access_token): default_args.pop('private_ip', None) - set_module_args(default_args) + with set_module_args(default_args): - module = linode_v4.initialise_module() + module = linode_v4.initialise_module() assert module.params['private_ip'] is False def test_private_ip_is_forwarded_to_linode(default_args, mock_linode, access_token): default_args.update({'private_ip': True}) - set_module_args(default_args) + with set_module_args(default_args): - target = 'linode_api4.linode_client.LinodeGroup.instances' - with mock.patch(target, return_value=[]): - with pytest.raises(SystemExit): - target = 'linode_api4.linode_client.LinodeGroup.instance_create' - with mock.patch(target, return_value=(mock_linode, 'passw0rd')) as instance_create_mock: - linode_v4.main() + target = 'linode_api4.linode_client.LinodeGroup.instances' + with mock.patch(target, return_value=[]): + with pytest.raises(SystemExit): + target = 'linode_api4.linode_client.LinodeGroup.instance_create' + with mock.patch(target, return_value=(mock_linode, 'passw0rd')) as instance_create_mock: + linode_v4.main() args, kwargs = instance_create_mock.call_args assert kwargs['private_ip'] is True @@ -239,12 +234,12 @@ def test_instance_already_created(default_args, 'region': 'bar', 'image': 'baz' }) - set_module_args(default_args) + with set_module_args(default_args): - target = 'linode_api4.linode_client.LinodeGroup.instances' - with mock.patch(target, return_value=[mock_linode]): - with pytest.raises(SystemExit) as sys_exit_exc: - linode_v4.main() + target = 'linode_api4.linode_client.LinodeGroup.instances' + with mock.patch(target, return_value=[mock_linode]): + with pytest.raises(SystemExit) as sys_exit_exc: + linode_v4.main() assert sys_exit_exc.value.code == 0 @@ -268,14 +263,14 @@ def test_instance_to_be_created_without_root_pass(default_args, 'region': 'bar', 'image': 'baz' }) - set_module_args(default_args) + with set_module_args(default_args): - target = 'linode_api4.linode_client.LinodeGroup.instances' - with mock.patch(target, return_value=[]): - with pytest.raises(SystemExit) as sys_exit_exc: - target = 'linode_api4.linode_client.LinodeGroup.instance_create' - with mock.patch(target, return_value=(mock_linode, 'passw0rd')): - linode_v4.main() + target = 'linode_api4.linode_client.LinodeGroup.instances' + with mock.patch(target, return_value=[]): + with pytest.raises(SystemExit) as sys_exit_exc: + target = 'linode_api4.linode_client.LinodeGroup.instance_create' + with mock.patch(target, return_value=(mock_linode, 'passw0rd')): + linode_v4.main() assert sys_exit_exc.value.code == 0 @@ -300,14 +295,14 @@ def test_instance_to_be_created_with_root_pass(default_args, 'image': 'baz', 'root_pass': 'passw0rd', }) - set_module_args(default_args) + with set_module_args(default_args): - target = 'linode_api4.linode_client.LinodeGroup.instances' - with mock.patch(target, return_value=[]): - with pytest.raises(SystemExit) as sys_exit_exc: - target = 'linode_api4.linode_client.LinodeGroup.instance_create' - with mock.patch(target, return_value=mock_linode): - linode_v4.main() + target = 'linode_api4.linode_client.LinodeGroup.instances' + with mock.patch(target, return_value=[]): + with pytest.raises(SystemExit) as sys_exit_exc: + target = 'linode_api4.linode_client.LinodeGroup.instance_create' + with mock.patch(target, return_value=mock_linode): + linode_v4.main() assert sys_exit_exc.value.code == 0 @@ -327,12 +322,12 @@ def test_instance_to_be_deleted(default_args, capfd, access_token): default_args.update({'state': 'absent'}) - set_module_args(default_args) + with set_module_args(default_args): - target = 'linode_api4.linode_client.LinodeGroup.instances' - with mock.patch(target, return_value=[mock_linode]): - with pytest.raises(SystemExit) as sys_exit_exc: - linode_v4.main() + target = 'linode_api4.linode_client.LinodeGroup.instances' + with mock.patch(target, return_value=[mock_linode]): + with pytest.raises(SystemExit) as sys_exit_exc: + linode_v4.main() assert sys_exit_exc.value.code == 0 @@ -351,12 +346,12 @@ def test_instance_already_deleted_no_change(default_args, capfd, access_token): default_args.update({'state': 'absent'}) - set_module_args(default_args) + with set_module_args(default_args): - target = 'linode_api4.linode_client.LinodeGroup.instances' - with mock.patch(target, return_value=[]): - with pytest.raises(SystemExit) as sys_exit_exc: - linode_v4.main() + target = 'linode_api4.linode_client.LinodeGroup.instances' + with mock.patch(target, return_value=[]): + with pytest.raises(SystemExit) as sys_exit_exc: + linode_v4.main() assert sys_exit_exc.value.code == 0 diff --git a/tests/unit/plugins/modules/test_lvg_rename.py b/tests/unit/plugins/modules/test_lvg_rename.py index 0f2fcb7fa7..9cbaa2664a 100644 --- a/tests/unit/plugins/modules/test_lvg_rename.py +++ b/tests/unit/plugins/modules/test_lvg_rename.py @@ -7,8 +7,8 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible_collections.community.general.plugins.modules import lvg_rename -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args) @@ -48,10 +48,10 @@ class TestLvgRename(ModuleTestCase): 'vg': 'vg_missing', 'vg_new': 'vg_data_testhost2', } - set_module_args(args=module_args) + with set_module_args(args=module_args): - with self.assertRaises(AnsibleFailJson) as result: - self.module.main() + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 1) self.assertIs(result.exception.args[0]['failed'], failed) @@ -67,10 +67,10 @@ class TestLvgRename(ModuleTestCase): 'vg': 'Yfj4YG-c8nI-z7w5-B7Fw-i2eM-HqlF-ApFVp0', 'vg_new': 'vg_data_testhost2', } - set_module_args(args=module_args) + with set_module_args(args=module_args): - with self.assertRaises(AnsibleFailJson) as result: - self.module.main() + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 1) self.assertIs(result.exception.args[0]['failed'], failed) @@ -86,10 +86,10 @@ class TestLvgRename(ModuleTestCase): 'vg': 'vg_data_testhost1', 'vg_new': 'vg_sys_testhost2', } - set_module_args(args=module_args) + with set_module_args(args=module_args): - with self.assertRaises(AnsibleFailJson) as result: - self.module.main() + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 1) self.assertIs(result.exception.args[0]['failed'], failed) @@ -109,10 +109,10 @@ class TestLvgRename(ModuleTestCase): 'vg': '/dev/vg_data_testhost1', 'vg_new': 'vg_data_testhost2', } - set_module_args(args=module_args) + with set_module_args(args=module_args): - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 2) self.assertIs(result.exception.args[0]['changed'], changed) @@ -130,10 +130,10 @@ class TestLvgRename(ModuleTestCase): 'vg_new': 'vg_data_testhost2', '_ansible_check_mode': True, } - set_module_args(args=module_args) + with set_module_args(args=module_args): - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 1) self.assertIs(result.exception.args[0]['changed'], changed) @@ -150,10 +150,10 @@ class TestLvgRename(ModuleTestCase): 'vg': 'vg_data_testhostX', 'vg_new': 'vg_data_testhost1', } - set_module_args(args=module_args) + with set_module_args(args=module_args): - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 1) self.assertIs(result.exception.args[0]['changed'], changed) diff --git a/tests/unit/plugins/modules/test_lxca_cmms.py b/tests/unit/plugins/modules/test_lxca_cmms.py index efbdad0620..3c79148022 100644 --- a/tests/unit/plugins/modules/test_lxca_cmms.py +++ b/tests/unit/plugins/modules/test_lxca_cmms.py @@ -8,7 +8,7 @@ __metaclass__ = type import json import pytest -from ansible_collections.community.general.tests.unit.compat import mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import mock from ansible_collections.community.general.plugins.modules import lxca_cmms @@ -61,8 +61,8 @@ class TestMyModule(): command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid', 'cmms_by_chassis_uuid']), auth_url=dict(required=True), - uuid=dict(default=None), - chassis=dict(default=None), + uuid=dict(), + chassis=dict(), ) _setup_conn.return_value = "Fake connection" _execute_module.return_value = [] diff --git a/tests/unit/plugins/modules/test_lxca_nodes.py b/tests/unit/plugins/modules/test_lxca_nodes.py index 87effa0c01..1f8ab84368 100644 --- a/tests/unit/plugins/modules/test_lxca_nodes.py +++ b/tests/unit/plugins/modules/test_lxca_nodes.py @@ -8,7 +8,7 @@ __metaclass__ = type import json import pytest -from ansible_collections.community.general.tests.unit.compat import mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import mock from ansible_collections.community.general.plugins.modules import lxca_nodes @@ -63,8 +63,8 @@ class TestMyModule(): 'nodes_status_managed', 'nodes_status_unmanaged']), auth_url=dict(required=True), - uuid=dict(default=None), - chassis=dict(default=None), + uuid=dict(), + chassis=dict(), ) _setup_conn.return_value = "Fake connection" _execute_module.return_value = [] diff --git a/tests/unit/plugins/modules/test_modprobe.py b/tests/unit/plugins/modules/test_modprobe.py index 2ad0831511..bada481cfc 100644 --- a/tests/unit/plugins/modules/test_modprobe.py +++ b/tests/unit/plugins/modules/test_modprobe.py @@ -7,10 +7,10 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.compat.mock import Mock -from ansible_collections.community.general.tests.unit.compat.mock import mock_open +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import Mock +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import mock_open from ansible_collections.community.general.plugins.modules.modprobe import Modprobe, build_module @@ -36,19 +36,18 @@ class TestLoadModule(ModuleTestCase): self.mock_get_bin_path.stop() def test_load_module_success(self): - set_module_args(dict( + with set_module_args(dict( name='test', state='present', - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [True] + self.run_command.side_effect = [(0, '', '')] - self.get_bin_path.side_effect = ['modprobe'] - self.module_loaded.side_effect = [True] - self.run_command.side_effect = [(0, '', '')] - - modprobe = Modprobe(module) - modprobe.load_module() + modprobe = Modprobe(module) + modprobe.load_module() assert modprobe.result == { 'changed': True, @@ -58,21 +57,20 @@ class TestLoadModule(ModuleTestCase): } def test_load_module_unchanged(self): - set_module_args(dict( + with set_module_args(dict( name='test', state='present', - )) + )): + module = build_module() - module = build_module() + module.warn = Mock() - module.warn = Mock() + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [False] + self.run_command.side_effect = [(0, '', ''), (1, '', '')] - self.get_bin_path.side_effect = ['modprobe'] - self.module_loaded.side_effect = [False] - self.run_command.side_effect = [(0, '', ''), (1, '', '')] - - modprobe = Modprobe(module) - modprobe.load_module() + modprobe = Modprobe(module) + modprobe.load_module() module.warn.assert_called_once_with('') @@ -99,19 +97,18 @@ class TestUnloadModule(ModuleTestCase): self.mock_get_bin_path.stop() def test_unload_module_success(self): - set_module_args(dict( + with set_module_args(dict( name='test', state='absent', - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [False] + self.run_command.side_effect = [(0, '', '')] - self.get_bin_path.side_effect = ['modprobe'] - self.module_loaded.side_effect = [False] - self.run_command.side_effect = [(0, '', '')] - - modprobe = Modprobe(module) - modprobe.unload_module() + modprobe = Modprobe(module) + modprobe.unload_module() assert modprobe.result == { 'changed': True, @@ -121,21 +118,20 @@ class TestUnloadModule(ModuleTestCase): } def test_unload_module_failure(self): - set_module_args(dict( + with set_module_args(dict( name='test', state='absent', - )) + )): + module = build_module() - module = build_module() + module.fail_json = Mock() - module.fail_json = Mock() + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [True] + self.run_command.side_effect = [(1, '', '')] - self.get_bin_path.side_effect = ['modprobe'] - self.module_loaded.side_effect = [True] - self.run_command.side_effect = [(1, '', '')] - - modprobe = Modprobe(module) - modprobe.unload_module() + modprobe = Modprobe(module) + modprobe.unload_module() dummy_result = { 'changed': False, @@ -151,7 +147,7 @@ class TestUnloadModule(ModuleTestCase): class TestModuleIsLoadedPersistently(ModuleTestCase): def setUp(self): - if (sys.version_info[0] == 3 and sys.version_info[1] < 7) or (sys.version_info[0] == 2 and sys.version_info[1] < 7): + if sys.version_info[0] == 3 and sys.version_info[1] < 7: self.skipTest("open_mock doesn't support readline in earlier python versions") super(TestModuleIsLoadedPersistently, self).setUp() @@ -168,68 +164,65 @@ class TestModuleIsLoadedPersistently(ModuleTestCase): def test_module_is_loaded(self): - set_module_args(dict( + with set_module_args(dict( name='dummy', state='present', persistent='present' - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] - self.get_bin_path.side_effect = ['modprobe'] + modprobe = Modprobe(module) + with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='dummy')) as mocked_file: + with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): + modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] - modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='dummy')) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): - modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] - - assert modprobe.module_is_loaded_persistently + assert modprobe.module_is_loaded_persistently mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf') def test_module_is_not_loaded_empty_file(self): - set_module_args(dict( + with set_module_args(dict( name='dummy', state='present', persistent='present' - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] - self.get_bin_path.side_effect = ['modprobe'] + modprobe = Modprobe(module) + with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='')) as mocked_file: + with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): + modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] - modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='')) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): - modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] - - assert not modprobe.module_is_loaded_persistently + assert not modprobe.module_is_loaded_persistently mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf') def test_module_is_not_loaded_no_files(self): - set_module_args(dict( + with set_module_args(dict( name='dummy', state='present', persistent='present' - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] - self.get_bin_path.side_effect = ['modprobe'] + modprobe = Modprobe(module) + with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): + modprobe.modules_files = [] - modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): - modprobe.modules_files = [] - - assert not modprobe.module_is_loaded_persistently + assert not modprobe.module_is_loaded_persistently class TestPermanentParams(ModuleTestCase): def setUp(self): - if (sys.version_info[0] == 3 and sys.version_info[1] < 7) or (sys.version_info[0] == 2 and sys.version_info[1] < 7): + if sys.version_info[0] == 3 and sys.version_info[1] < 7: self.skipTest("open_mock doesn't support readline in earlier python versions") super(TestPermanentParams, self).setUp() @@ -251,24 +244,23 @@ class TestPermanentParams(ModuleTestCase): ] mock_files_content = [mock_open(read_data=content).return_value for content in files_content] - set_module_args(dict( + with set_module_args(dict( name='dummy', state='present', persistent='present' - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] - self.get_bin_path.side_effect = ['modprobe'] + modprobe = Modprobe(module) - modprobe = Modprobe(module) + with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file: + mocked_file.side_effect = mock_files_content + with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): + modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf', '/etc/modprobe.d/dummy2.conf'] - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file: - mocked_file.side_effect = mock_files_content - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): - modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf', '/etc/modprobe.d/dummy2.conf'] - - assert modprobe.permanent_params == set(['numdummies=4', 'dummy_parameter1=6', 'dummy_parameter2=5']) + assert modprobe.permanent_params == set(['numdummies=4', 'dummy_parameter1=6', 'dummy_parameter2=5']) def test_module_permanent_params_empty(self): @@ -278,24 +270,23 @@ class TestPermanentParams(ModuleTestCase): ] mock_files_content = [mock_open(read_data=content).return_value for content in files_content] - set_module_args(dict( + with set_module_args(dict( name='dummy', state='present', persistent='present' - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] - self.get_bin_path.side_effect = ['modprobe'] + modprobe = Modprobe(module) - modprobe = Modprobe(module) + with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='')) as mocked_file: + mocked_file.side_effect = mock_files_content + with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): + modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf', '/etc/modprobe.d/dummy2.conf'] - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='')) as mocked_file: - mocked_file.side_effect = mock_files_content - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): - modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf', '/etc/modprobe.d/dummy2.conf'] - - assert modprobe.permanent_params == set() + assert modprobe.permanent_params == set() class TestCreateModuleFIle(ModuleTestCase): @@ -314,22 +305,21 @@ class TestCreateModuleFIle(ModuleTestCase): def test_create_file(self): - set_module_args(dict( + with set_module_args(dict( name='dummy', state='present', persistent='present' - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] - self.get_bin_path.side_effect = ['modprobe'] + modprobe = Modprobe(module) - modprobe = Modprobe(module) - - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file: - modprobe.create_module_file() - mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf', 'w') - mocked_file().write.assert_called_once_with('dummy\n') + with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file: + modprobe.create_module_file() + mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf', 'w') + mocked_file().write.assert_called_once_with('dummy\n') class TestCreateModuleOptionsFIle(ModuleTestCase): @@ -348,23 +338,22 @@ class TestCreateModuleOptionsFIle(ModuleTestCase): def test_create_file(self): - set_module_args(dict( + with set_module_args(dict( name='dummy', state='present', params='numdummies=4', persistent='present' - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] - self.get_bin_path.side_effect = ['modprobe'] + modprobe = Modprobe(module) - modprobe = Modprobe(module) - - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file: - modprobe.create_module_options_file() - mocked_file.assert_called_once_with('/etc/modprobe.d/dummy.conf', 'w') - mocked_file().write.assert_called_once_with('options dummy numdummies=4\n') + with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file: + modprobe.create_module_options_file() + mocked_file.assert_called_once_with('/etc/modprobe.d/dummy.conf', 'w') + mocked_file().write.assert_called_once_with('options dummy numdummies=4\n') class TestDisableOldParams(ModuleTestCase): @@ -384,47 +373,45 @@ class TestDisableOldParams(ModuleTestCase): def test_disable_old_params_file_changed(self): mock_data = 'options dummy numdummies=4' - set_module_args(dict( + with set_module_args(dict( name='dummy', state='present', params='numdummies=4', persistent='present' - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] - self.get_bin_path.side_effect = ['modprobe'] + modprobe = Modprobe(module) - modprobe = Modprobe(module) - - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data=mock_data)) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): - modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf'] - modprobe.disable_old_params() - mocked_file.assert_called_with('/etc/modprobe.d/dummy1.conf', 'w') - mocked_file().write.assert_called_once_with('#options dummy numdummies=4') + with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data=mock_data)) as mocked_file: + with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): + modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf'] + modprobe.disable_old_params() + mocked_file.assert_called_with('/etc/modprobe.d/dummy1.conf', 'w') + mocked_file().write.assert_called_once_with('#options dummy numdummies=4') def test_disable_old_params_file_unchanged(self): mock_data = 'options notdummy numdummies=4' - set_module_args(dict( + with set_module_args(dict( name='dummy', state='present', params='numdummies=4', persistent='present' - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] - self.get_bin_path.side_effect = ['modprobe'] + modprobe = Modprobe(module) - modprobe = Modprobe(module) - - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data=mock_data)) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): - modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf'] - modprobe.disable_old_params() - mocked_file.assert_called_once_with('/etc/modprobe.d/dummy1.conf') + with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data=mock_data)) as mocked_file: + with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): + modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf'] + modprobe.disable_old_params() + mocked_file.assert_called_once_with('/etc/modprobe.d/dummy1.conf') class TestDisableModulePermanent(ModuleTestCase): @@ -443,43 +430,41 @@ class TestDisableModulePermanent(ModuleTestCase): def test_disable_module_permanent_file_changed(self): - set_module_args(dict( + with set_module_args(dict( name='dummy', state='present', params='numdummies=4', persistent='present' - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] - self.get_bin_path.side_effect = ['modprobe'] + modprobe = Modprobe(module) - modprobe = Modprobe(module) - - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='dummy')) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): - modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] - modprobe.disable_module_permanent() - mocked_file.assert_called_with('/etc/modules-load.d/dummy.conf', 'w') - mocked_file().write.assert_called_once_with('#dummy') + with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='dummy')) as mocked_file: + with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): + modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] + modprobe.disable_module_permanent() + mocked_file.assert_called_with('/etc/modules-load.d/dummy.conf', 'w') + mocked_file().write.assert_called_once_with('#dummy') def test_disable_module_permanent_file_unchanged(self): - set_module_args(dict( + with set_module_args(dict( name='dummy', state='present', params='numdummies=4', persistent='present' - )) + )): + module = build_module() - module = build_module() + self.get_bin_path.side_effect = ['modprobe'] - self.get_bin_path.side_effect = ['modprobe'] + modprobe = Modprobe(module) - modprobe = Modprobe(module) - - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='notdummy')) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): - modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] - modprobe.disable_module_permanent() - mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf') + with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='notdummy')) as mocked_file: + with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): + modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] + modprobe.disable_module_permanent() + mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf') diff --git a/tests/unit/plugins/modules/test_monit.py b/tests/unit/plugins/modules/test_monit.py index 7f8f15dd9e..e50c8e387f 100644 --- a/tests/unit/plugins/modules/test_monit.py +++ b/tests/unit/plugins/modules/test_monit.py @@ -8,9 +8,9 @@ __metaclass__ = type import mock import pytest -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.modules import monit -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson TEST_OUTPUT = """ diff --git a/tests/unit/plugins/modules/test_nmcli.py b/tests/unit/plugins/modules/test_nmcli.py index 89e8de6d64..79f2f2ea1a 100644 --- a/tests/unit/plugins/modules/test_nmcli.py +++ b/tests/unit/plugins/modules/test_nmcli.py @@ -1570,6 +1570,37 @@ macvlan.promiscuous: yes macvlan.tap: no """ +TESTCASE_VRF = [ + { + 'type': 'vrf', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'vrf_not_exists', + 'ip4': '10.10.10.10/24', + 'gw4': '10.10.10.1', + 'table': 10, + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_VRF_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_device +connection.interface-name: vrf_not_exists +connection.autoconnect: yes +ipv4.method: manual +ipv4.addresses: 10.10.10.10/24 +ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no +table: 10 +802-3-ethernet.mtu: auto +""" + def mocker_set(mocker, connection_exists=False, @@ -2035,6 +2066,13 @@ def mocked_loopback_connection_modify(mocker): )) +@pytest.fixture +def mocked_vrf_connection_unchanged(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=(0, TESTCASE_VRF_SHOW_OUTPUT, "")) + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module']) def test_bond_connection_create(mocked_generic_connection_create, capfd): """ @@ -4312,6 +4350,8 @@ def test_bond_connection_unchanged(mocked_generic_connection_diff_check, capfd): argument_spec=dict( ignore_unsupported_suboptions=dict(type='bool', default=False), autoconnect=dict(type='bool', default=True), + autoconnect_priority=dict(type='int'), + autoconnect_retries=dict(type='int'), state=dict(type='str', required=True, choices=['absent', 'present']), conn_name=dict(type='str', required=True), conn_reload=dict(type='bool', required=False, default=False), @@ -4397,6 +4437,7 @@ def test_bond_connection_unchanged(mocked_generic_connection_diff_check, capfd): downdelay=dict(type='int'), updelay=dict(type='int'), xmit_hash_policy=dict(type='str'), + fail_over_mac=dict(type='str', choices=['none', 'active', 'follow']), arp_interval=dict(type='int'), arp_ip_target=dict(type='str'), primary=dict(type='str'), @@ -4446,8 +4487,10 @@ def test_bond_connection_unchanged(mocked_generic_connection_diff_check, capfd): macvlan=dict(type='dict'), wireguard=dict(type='dict'), vpn=dict(type='dict'), - transport_mode=dict(type='str', choices=['datagram', 'connected']), sriov=dict(type='dict'), + # infiniband specific vars + transport_mode=dict(type='str', choices=['datagram', 'connected']), + infiniband_mac=dict(type='str'), ), mutually_exclusive=[['never_default4', 'gw4'], ['routes4_extended', 'routes4'], @@ -4910,3 +4953,76 @@ def test_add_second_ip4_address_to_loopback_connection(mocked_loopback_connectio results = json.loads(out) assert not results.get('failed') assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VRF, indirect=['patch_ansible_module']) +def test_create_vrf_con(mocked_generic_connection_create, capfd): + """ + Test if VRF created + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'add' + assert args[0][3] == 'type' + assert args[0][4] == 'vrf' + assert args[0][5] == 'con-name' + assert args[0][6] == 'non_existent_nw_device' + + args_text = list(map(to_text, args[0])) + for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'table', '10']: + assert param in args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VRF, indirect=['patch_ansible_module']) +def test_mod_vrf_conn(mocked_generic_connection_modify, capfd): + """ + Test if VRF modified + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'modify' + assert args[0][3] == 'non_existent_nw_device' + + args_text = list(map(to_text, args[0])) + for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'table', '10']: + assert param in args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VRF, indirect=['patch_ansible_module']) +def test_vrf_connection_unchanged(mocked_vrf_connection_unchanged, capfd): + """ + Test : VRF connection unchanged + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert not results['changed'] diff --git a/tests/unit/plugins/modules/test_nomad_token.py b/tests/unit/plugins/modules/test_nomad_token.py index 48f060f8be..43c1f82cf1 100644 --- a/tests/unit/plugins/modules/test_nomad_token.py +++ b/tests/unit/plugins/modules/test_nomad_token.py @@ -10,10 +10,8 @@ __metaclass__ = type import nomad from ansible_collections.community.general.plugins.modules import nomad_token -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, \ - ModuleTestCase, \ - set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args def mock_acl_get_tokens(empty_list=False): @@ -102,8 +100,8 @@ class TestNomadTokenModule(ModuleTestCase): def test_should_fail_without_parameters(self): with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() def test_should_create_token_type_client(self): module_args = { @@ -113,12 +111,12 @@ class TestNomadTokenModule(ModuleTestCase): 'state': 'present' } - set_module_args(module_args) - with patch.object(nomad.api.acl.Acl, 'get_tokens', return_value=mock_acl_get_tokens()) as mock_get_tokens: - with patch.object(nomad.api.acl.Acl, 'create_token', return_value=mock_acl_create_update_token()) as \ - mock_create_update_token: - with self.assertRaises(AnsibleExitJson): - self.module.main() + with set_module_args(module_args): + with patch.object(nomad.api.acl.Acl, 'get_tokens', return_value=mock_acl_get_tokens()) as mock_get_tokens: + with patch.object(nomad.api.acl.Acl, 'create_token', return_value=mock_acl_create_update_token()) as \ + mock_create_update_token: + with self.assertRaises(AnsibleExitJson): + self.module.main() self.assertIs(mock_get_tokens.call_count, 1) self.assertIs(mock_create_update_token.call_count, 1) @@ -130,15 +128,15 @@ class TestNomadTokenModule(ModuleTestCase): 'state': 'present' } - set_module_args(module_args) + with set_module_args(module_args): - with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: - with patch.object(nomad.api.Acl, 'generate_bootstrap') as mock_generate_bootstrap: - mock_get_tokens.return_value = mock_acl_get_tokens(empty_list=True) - mock_generate_bootstrap.return_value = mock_acl_generate_bootstrap() + with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: + with patch.object(nomad.api.Acl, 'generate_bootstrap') as mock_generate_bootstrap: + mock_get_tokens.return_value = mock_acl_get_tokens(empty_list=True) + mock_generate_bootstrap.return_value = mock_acl_generate_bootstrap() - with self.assertRaises(AnsibleExitJson): - self.module.main() + with self.assertRaises(AnsibleExitJson): + self.module.main() self.assertIs(mock_get_tokens.call_count, 1) self.assertIs(mock_generate_bootstrap.call_count, 1) @@ -149,14 +147,14 @@ class TestNomadTokenModule(ModuleTestCase): 'state': 'absent' } - set_module_args(module_args) - with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: - with patch.object(nomad.api.acl.Acl, 'delete_token') as mock_delete_token: - mock_get_tokens.return_value = mock_acl_get_tokens() - mock_delete_token.return_value = mock_acl_delete_token() + with set_module_args(module_args): + with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: + with patch.object(nomad.api.acl.Acl, 'delete_token') as mock_delete_token: + mock_get_tokens.return_value = mock_acl_get_tokens() + mock_delete_token.return_value = mock_acl_delete_token() - with self.assertRaises(AnsibleFailJson): - self.module.main() + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_should_fail_delete_bootstrap_token(self): module_args = { @@ -165,10 +163,10 @@ class TestNomadTokenModule(ModuleTestCase): 'state': 'absent' } - set_module_args(module_args) + with set_module_args(module_args): - with self.assertRaises(AnsibleFailJson): - self.module.main() + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_should_fail_delete_boostrap_token_by_name(self): module_args = { @@ -177,10 +175,10 @@ class TestNomadTokenModule(ModuleTestCase): 'state': 'absent' } - set_module_args(module_args) + with set_module_args(module_args): - with self.assertRaises(AnsibleFailJson): - self.module.main() + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_should_delete_client_token(self): module_args = { @@ -189,15 +187,15 @@ class TestNomadTokenModule(ModuleTestCase): 'state': 'absent' } - set_module_args(module_args) + with set_module_args(module_args): - with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: - with patch.object(nomad.api.acl.Acl, 'delete_token') as mock_delete_token: - mock_get_tokens.return_value = mock_acl_get_tokens() - mock_delete_token.return_value = mock_acl_delete_token() + with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: + with patch.object(nomad.api.acl.Acl, 'delete_token') as mock_delete_token: + mock_get_tokens.return_value = mock_acl_get_tokens() + mock_delete_token.return_value = mock_acl_delete_token() - with self.assertRaises(AnsibleExitJson): - self.module.main() + with self.assertRaises(AnsibleExitJson): + self.module.main() self.assertIs(mock_delete_token.call_count, 1) @@ -209,14 +207,14 @@ class TestNomadTokenModule(ModuleTestCase): 'state': 'present' } - set_module_args(module_args) + with set_module_args(module_args): - with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: - with patch.object(nomad.api.acl.Acl, 'update_token') as mock_create_update_token: - mock_get_tokens.return_value = mock_acl_get_tokens() - mock_create_update_token.return_value = mock_acl_create_update_token() + with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: + with patch.object(nomad.api.acl.Acl, 'update_token') as mock_create_update_token: + mock_get_tokens.return_value = mock_acl_get_tokens() + mock_create_update_token.return_value = mock_acl_create_update_token() - with self.assertRaises(AnsibleExitJson): - self.module.main() + with self.assertRaises(AnsibleExitJson): + self.module.main() self.assertIs(mock_get_tokens.call_count, 1) self.assertIs(mock_create_update_token.call_count, 1) diff --git a/tests/unit/plugins/modules/test_npm.py b/tests/unit/plugins/modules/test_npm.py index cc4d651726..63c854f563 100644 --- a/tests/unit/plugins/modules/test_npm.py +++ b/tests/unit/plugins/modules/test_npm.py @@ -6,9 +6,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import call, patch from ansible_collections.community.general.plugins.modules import npm -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args class NPMModuleTestCase(ModuleTestCase): @@ -34,17 +34,17 @@ class NPMModuleTestCase(ModuleTestCase): return exc.exception.args[0] def test_present(self): - set_module_args({ + with set_module_args({ 'name': 'coffee-script', 'global': 'true', 'state': 'present' - }) - self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ @@ -53,17 +53,17 @@ class NPMModuleTestCase(ModuleTestCase): ]) def test_present_missing(self): - set_module_args({ + with set_module_args({ 'name': 'coffee-script', 'global': 'true', 'state': 'present', - }) - self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"missing" : true}}}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"missing" : true}}}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ @@ -72,18 +72,18 @@ class NPMModuleTestCase(ModuleTestCase): ]) def test_present_version(self): - set_module_args({ + with set_module_args({ 'name': 'coffee-script', 'global': 'true', 'state': 'present', 'version': '2.5.1' - }) - self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ @@ -92,18 +92,18 @@ class NPMModuleTestCase(ModuleTestCase): ]) def test_present_version_update(self): - set_module_args({ + with set_module_args({ 'name': 'coffee-script', 'global': 'true', 'state': 'present', 'version': '2.5.1' - }) - self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ @@ -112,18 +112,18 @@ class NPMModuleTestCase(ModuleTestCase): ]) def test_present_version_exists(self): - set_module_args({ + with set_module_args({ 'name': 'coffee-script', 'global': 'true', 'state': 'present', 'version': '2.5.1' - }) - self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertFalse(result['changed']) self.module_main_command.assert_has_calls([ @@ -131,17 +131,17 @@ class NPMModuleTestCase(ModuleTestCase): ]) def test_absent(self): - set_module_args({ + with set_module_args({ 'name': 'coffee-script', 'global': 'true', 'state': 'absent' - }) - self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ @@ -150,18 +150,18 @@ class NPMModuleTestCase(ModuleTestCase): ]) def test_absent_version(self): - set_module_args({ + with set_module_args({ 'name': 'coffee-script', 'global': 'true', 'state': 'absent', 'version': '2.5.1' - }) - self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ @@ -170,18 +170,18 @@ class NPMModuleTestCase(ModuleTestCase): ]) def test_absent_version_different(self): - set_module_args({ + with set_module_args({ 'name': 'coffee-script', 'global': 'true', 'state': 'absent', 'version': '2.5.1' - }) - self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ @@ -190,16 +190,16 @@ class NPMModuleTestCase(ModuleTestCase): ]) def test_present_package_json(self): - set_module_args({ + with set_module_args({ 'global': 'true', 'state': 'present' - }) - self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ @@ -207,17 +207,17 @@ class NPMModuleTestCase(ModuleTestCase): ]) def test_present_package_json_production(self): - set_module_args({ + with set_module_args({ 'production': 'true', 'global': 'true', 'state': 'present', - }) - self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ @@ -225,17 +225,17 @@ class NPMModuleTestCase(ModuleTestCase): ]) def test_present_package_json_ci(self): - set_module_args({ + with set_module_args({ 'ci': 'true', 'global': 'true', 'state': 'present' - }) - self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ @@ -243,18 +243,18 @@ class NPMModuleTestCase(ModuleTestCase): ]) def test_present_package_json_ci_production(self): - set_module_args({ + with set_module_args({ 'ci': 'true', 'production': 'true', 'global': 'true', 'state': 'present' - }) - self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), - ] + }): + self.module_main_command.side_effect = [ + (0, '{}', ''), + (0, '{}', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ diff --git a/tests/unit/plugins/modules/test_ocapi_command.py b/tests/unit/plugins/modules/test_ocapi_command.py index 3ce267c4e9..5f8c3c85f8 100644 --- a/tests/unit/plugins/modules/test_ocapi_command.py +++ b/tests/unit/plugins/modules/test_ocapi_command.py @@ -10,12 +10,12 @@ import os import shutil import tempfile -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible.module_utils import basic import ansible_collections.community.general.plugins.modules.ocapi_command as module -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json from ansible.module_utils.six.moves.urllib.parse import urljoin @@ -188,20 +188,20 @@ class TestOcapiCommand(unittest.TestCase): def test_module_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - set_module_args({}) - module.main() + with set_module_args({}): + module.main() self.assertIn("missing required arguments:", get_exception_message(ansible_fail_json)) def test_module_fail_when_unknown_category(self): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - set_module_args({ + with set_module_args({ 'category': 'unknown', 'command': 'IndicatorLedOn', 'username': 'USERID', 'password': 'PASSW0RD=21', 'baseuri': MOCK_BASE_URI - }) - module.main() + }): + module.main() self.assertIn("Invalid Category 'unknown", get_exception_message(ansible_fail_json)) def test_set_power_mode(self): @@ -210,14 +210,14 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_put_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Chassis', 'command': 'PowerModeLow', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -227,14 +227,14 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_put_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Chassis', 'command': 'IndicatorLedOn', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -244,14 +244,14 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Chassis', 'command': 'PowerModeNormal', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertFalse(is_changed(ansible_exit_json)) @@ -261,15 +261,15 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Chassis', 'command': 'IndicatorLedOn', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21', '_ansible_check_mode': True - }) - module.main() + }): + module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -279,15 +279,15 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Chassis', 'command': 'IndicatorLedOn', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21', '_ansible_check_mode': True - }) - module.main() + }): + module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -297,14 +297,14 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Chassis', 'command': 'IndicatorLedOff', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertFalse(is_changed(ansible_exit_json)) @@ -314,15 +314,15 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Chassis', 'command': 'IndicatorLedOff', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21', "_ansible_check_mode": True - }) - module.main() + }): + module.main() self.assertEqual(NO_ACTION_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertFalse(is_changed(ansible_exit_json)) @@ -331,14 +331,14 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_put_request): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - set_module_args({ + with set_module_args({ 'category': 'Chassis', 'command': 'IndicatorLedBright', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertIn("Invalid Command", get_exception_message(ansible_fail_json)) def test_reset_enclosure(self): @@ -346,14 +346,14 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_put_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Systems', 'command': 'PowerGracefulRestart', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -362,15 +362,15 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Systems', 'command': 'PowerGracefulRestart', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21', "_ansible_check_mode": True - }) - module.main() + }): + module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -379,14 +379,14 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_put_request): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'FWUpload', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual("Missing update_image_path.", get_exception_message(ansible_fail_json)) def test_firmware_upload_file_not_found(self): @@ -394,15 +394,15 @@ class TestOcapiCommand(unittest.TestCase): get_request=mock_get_request, put_request=mock_invalid_http_request): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'FWUpload', 'update_image_path': 'nonexistentfile.bin', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual("File does not exist.", get_exception_message(ansible_fail_json)) def test_firmware_upload(self): @@ -417,15 +417,15 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_put_request, post_request=mock_post_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'FWUpload', 'update_image_path': filepath, 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -441,7 +441,7 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_put_request, post_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'FWUpload', 'update_image_path': filepath, @@ -449,8 +449,8 @@ class TestOcapiCommand(unittest.TestCase): 'username': 'USERID', 'password': 'PASSWORD=21', "_ansible_check_mode": True - }) - module.main() + }): + module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -460,14 +460,14 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_put_request, post_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'FWUpdate', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -477,15 +477,15 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_invalid_http_request, post_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'FWUpdate', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21', "_ansible_check_mode": True - }) - module.main() + }): + module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -495,14 +495,14 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_put_request, post_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'FWActivate', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -512,15 +512,15 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_invalid_http_request, post_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'FWActivate', 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21', "_ansible_check_mode": True - }) - module.main() + }): + module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -531,15 +531,15 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_invalid_http_request, post_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Jobs', 'command': 'DeleteJob', 'baseuri': MOCK_BASE_URI, 'job_name': MOCK_JOB_NAME, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -550,15 +550,15 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_invalid_http_request, post_request=mock_invalid_http_request): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - set_module_args({ + with set_module_args({ 'category': 'Jobs', 'command': 'DeleteJob', 'baseuri': MOCK_BASE_URI, 'job_name': MOCK_JOB_NAME, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json)) def test_delete_job_in_progress_only_on_delete(self): @@ -568,15 +568,15 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_invalid_http_request, post_request=mock_invalid_http_request): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - set_module_args({ + with set_module_args({ 'category': 'Jobs', 'command': 'DeleteJob', 'baseuri': MOCK_BASE_URI, 'job_name': MOCK_JOB_NAME, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json)) def test_delete_job_check_mode(self): @@ -586,7 +586,7 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_invalid_http_request, post_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Jobs', 'command': 'DeleteJob', 'baseuri': MOCK_BASE_URI, @@ -594,8 +594,8 @@ class TestOcapiCommand(unittest.TestCase): 'username': 'USERID', 'password': 'PASSWORD=21', '_ansible_check_mode': True - }) - module.main() + }): + module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -606,7 +606,7 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_invalid_http_request, post_request=mock_invalid_http_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Jobs', 'command': 'DeleteJob', 'baseuri': MOCK_BASE_URI, @@ -614,8 +614,8 @@ class TestOcapiCommand(unittest.TestCase): 'username': 'USERID', 'password': 'PASSWORD=21', '_ansible_check_mode': True - }) - module.main() + }): + module.main() self.assertEqual("Job already deleted.", get_exception_message(ansible_exit_json)) self.assertFalse(is_changed(ansible_exit_json)) @@ -626,7 +626,7 @@ class TestOcapiCommand(unittest.TestCase): put_request=mock_invalid_http_request, post_request=mock_invalid_http_request): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - set_module_args({ + with set_module_args({ 'category': 'Jobs', 'command': 'DeleteJob', 'baseuri': MOCK_BASE_URI, @@ -634,6 +634,6 @@ class TestOcapiCommand(unittest.TestCase): 'username': 'USERID', 'password': 'PASSWORD=21', '_ansible_check_mode': True - }) - module.main() + }): + module.main() self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json)) diff --git a/tests/unit/plugins/modules/test_ocapi_info.py b/tests/unit/plugins/modules/test_ocapi_info.py index 5010b328f8..a5213f64e1 100644 --- a/tests/unit/plugins/modules/test_ocapi_info.py +++ b/tests/unit/plugins/modules/test_ocapi_info.py @@ -6,12 +6,12 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible.module_utils import basic import ansible_collections.community.general.plugins.modules.ocapi_info as module -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json MOCK_BASE_URI = "mockBaseUri" MOCK_JOB_NAME_IN_PROGRESS = "MockJobInProgress" @@ -127,32 +127,32 @@ class TestOcapiInfo(unittest.TestCase): def test_module_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - set_module_args({}) - module.main() + with set_module_args({}): + module.main() self.assertIn("missing required arguments:", get_exception_message(ansible_fail_json)) def test_module_fail_when_unknown_category(self): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - set_module_args({ + with set_module_args({ 'category': 'unknown', 'command': 'JobStatus', 'username': 'USERID', 'password': 'PASSW0RD=21', 'baseuri': MOCK_BASE_URI - }) - module.main() + }): + module.main() self.assertIn("Invalid Category 'unknown", get_exception_message(ansible_fail_json)) def test_module_fail_when_unknown_command(self): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - set_module_args({ + with set_module_args({ 'category': 'Jobs', 'command': 'unknown', 'username': 'USERID', 'password': 'PASSW0RD=21', 'baseuri': MOCK_BASE_URI - }) - module.main() + }): + module.main() self.assertIn("Invalid Command 'unknown", get_exception_message(ansible_fail_json)) def test_job_status_in_progress(self): @@ -162,15 +162,15 @@ class TestOcapiInfo(unittest.TestCase): delete_request=mock_delete_request, post_request=mock_post_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Jobs', 'command': 'JobStatus', 'job_name': MOCK_JOB_NAME_IN_PROGRESS, 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) response_data = ansible_exit_json.exception.args[0] self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["PercentComplete"], response_data["percentComplete"]) @@ -190,15 +190,15 @@ class TestOcapiInfo(unittest.TestCase): delete_request=mock_delete_request, post_request=mock_post_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Jobs', 'command': 'JobStatus', 'job_name': MOCK_JOB_NAME_COMPLETE, 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) response_data = ansible_exit_json.exception.args[0] self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["PercentComplete"], response_data["percentComplete"]) @@ -218,15 +218,15 @@ class TestOcapiInfo(unittest.TestCase): delete_request=mock_delete_request, post_request=mock_post_request): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - set_module_args({ + with set_module_args({ 'category': 'Jobs', 'command': 'JobStatus', 'job_name': MOCK_JOB_NAME_DOES_NOT_EXIST, 'baseuri': MOCK_BASE_URI, 'username': 'USERID', 'password': 'PASSWORD=21' - }) - module.main() + }): + module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) response_data = ansible_exit_json.exception.args[0] self.assertFalse(response_data["jobExists"]) diff --git a/tests/unit/plugins/modules/test_oneview_enclosure_info.py b/tests/unit/plugins/modules/test_oneview_enclosure_info.py index e8ef3449fb..aeb62f4a0f 100644 --- a/tests/unit/plugins/modules/test_oneview_enclosure_info.py +++ b/tests/unit/plugins/modules/test_oneview_enclosure_info.py @@ -7,7 +7,7 @@ __metaclass__ = type from .hpe_test_utils import FactsParamsTestCase -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.modules.oneview_enclosure_info import EnclosureInfoModule diff --git a/tests/unit/plugins/modules/test_oneview_ethernet_network.py b/tests/unit/plugins/modules/test_oneview_ethernet_network.py index f1398740ee..8854ccec44 100644 --- a/tests/unit/plugins/modules/test_oneview_ethernet_network.py +++ b/tests/unit/plugins/modules/test_oneview_ethernet_network.py @@ -9,7 +9,7 @@ __metaclass__ = type import yaml -from ansible_collections.community.general.tests.unit.compat import unittest, mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest, mock from .oneview_module_loader import EthernetNetworkModule from .hpe_test_utils import OneViewBaseTestCase diff --git a/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py b/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py index 4a2813e2f8..f010b7d46e 100644 --- a/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py +++ b/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from .oneview_module_loader import EthernetNetworkInfoModule from .hpe_test_utils import FactsParamsTestCase diff --git a/tests/unit/plugins/modules/test_oneview_fc_network.py b/tests/unit/plugins/modules/test_oneview_fc_network.py index 6def80fc43..625ee6a948 100644 --- a/tests/unit/plugins/modules/test_oneview_fc_network.py +++ b/tests/unit/plugins/modules/test_oneview_fc_network.py @@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from .oneview_module_loader import FcNetworkModule from .hpe_test_utils import OneViewBaseTestCase diff --git a/tests/unit/plugins/modules/test_oneview_fc_network_info.py b/tests/unit/plugins/modules/test_oneview_fc_network_info.py index 236ce136ad..22f5a073e5 100644 --- a/tests/unit/plugins/modules/test_oneview_fc_network_info.py +++ b/tests/unit/plugins/modules/test_oneview_fc_network_info.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from .oneview_module_loader import FcNetworkInfoModule from .hpe_test_utils import FactsParamsTestCase diff --git a/tests/unit/plugins/modules/test_oneview_fcoe_network.py b/tests/unit/plugins/modules/test_oneview_fcoe_network.py index 224e5471e9..d5c3f840cc 100644 --- a/tests/unit/plugins/modules/test_oneview_fcoe_network.py +++ b/tests/unit/plugins/modules/test_oneview_fcoe_network.py @@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from .oneview_module_loader import FcoeNetworkModule from .hpe_test_utils import OneViewBaseTestCase diff --git a/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py b/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py index 387c1da3c1..d64ec8f58b 100644 --- a/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py +++ b/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from .oneview_module_loader import FcoeNetworkInfoModule from .hpe_test_utils import FactsParamsTestCase diff --git a/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py b/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py index 1f941fb50f..a9ab0fc73d 100644 --- a/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py +++ b/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py @@ -7,7 +7,7 @@ __metaclass__ = type from copy import deepcopy -from ansible_collections.community.general.tests.unit.compat import unittest, mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest, mock from .hpe_test_utils import OneViewBaseTestCase from ansible_collections.community.general.plugins.modules.oneview_logical_interconnect_group import LogicalInterconnectGroupModule diff --git a/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py b/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py index 9fa602a8cf..b259a7d7e4 100644 --- a/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py +++ b/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from .hpe_test_utils import FactsParamsTestCase from ansible_collections.community.general.plugins.modules.oneview_logical_interconnect_group_info import ( LogicalInterconnectGroupInfoModule diff --git a/tests/unit/plugins/modules/test_oneview_network_set.py b/tests/unit/plugins/modules/test_oneview_network_set.py index f801cd102a..43ec3c759c 100644 --- a/tests/unit/plugins/modules/test_oneview_network_set.py +++ b/tests/unit/plugins/modules/test_oneview_network_set.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest, mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest, mock from .hpe_test_utils import OneViewBaseTestCase from .oneview_module_loader import NetworkSetModule diff --git a/tests/unit/plugins/modules/test_oneview_network_set_info.py b/tests/unit/plugins/modules/test_oneview_network_set_info.py index 13cd0400a4..512ca0ff7e 100644 --- a/tests/unit/plugins/modules/test_oneview_network_set_info.py +++ b/tests/unit/plugins/modules/test_oneview_network_set_info.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from .oneview_module_loader import NetworkSetInfoModule from .hpe_test_utils import FactsParamsTestCase diff --git a/tests/unit/plugins/modules/test_oneview_san_manager.py b/tests/unit/plugins/modules/test_oneview_san_manager.py index d675c3b353..6ff3598248 100644 --- a/tests/unit/plugins/modules/test_oneview_san_manager.py +++ b/tests/unit/plugins/modules/test_oneview_san_manager.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest, mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest, mock from .oneview_module_loader import SanManagerModule from .hpe_test_utils import OneViewBaseTestCase from copy import deepcopy diff --git a/tests/unit/plugins/modules/test_oneview_san_manager_info.py b/tests/unit/plugins/modules/test_oneview_san_manager_info.py index be1f243161..3ba18a4a07 100644 --- a/tests/unit/plugins/modules/test_oneview_san_manager_info.py +++ b/tests/unit/plugins/modules/test_oneview_san_manager_info.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from .oneview_module_loader import SanManagerInfoModule from .hpe_test_utils import FactsParamsTestCase diff --git a/tests/unit/plugins/modules/test_opkg.py b/tests/unit/plugins/modules/test_opkg.py index cfee3e1115..dad3a8d3f1 100644 --- a/tests/unit/plugins/modules/test_opkg.py +++ b/tests/unit/plugins/modules/test_opkg.py @@ -8,7 +8,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import opkg -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(opkg, __name__) +UTHelper.from_module(opkg, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_opkg.yaml b/tests/unit/plugins/modules/test_opkg.yaml index 090a72c20c..a437e54499 100644 --- a/tests/unit/plugins/modules/test_opkg.yaml +++ b/tests/unit/plugins/modules/test_opkg.yaml @@ -4,169 +4,172 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: install_zlibdev - input: - name: zlib-dev - state: present - output: - msg: installed 1 package(s) - mocks: - run_command: - - command: [/testbin/opkg, --version] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} - rc: 0 - out: "" - err: "" - - command: [/testbin/opkg, list-installed, zlib-dev] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/opkg, install, zlib-dev] - environ: *env-def - rc: 0 - out: | - Installing zlib-dev (1.2.11-6) to root... - Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk - Installing zlib (1.2.11-6) to root... - Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk - Configuring zlib. - Configuring zlib-dev. - err: "" - - command: [/testbin/opkg, list-installed, zlib-dev] - environ: *env-def - rc: 0 - out: | - zlib-dev - 1.2.11-6 - err: "" -- id: install_zlibdev_present - input: - name: zlib-dev - state: present - output: - msg: package(s) already present - mocks: - run_command: - - command: [/testbin/opkg, --version] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/opkg, list-installed, zlib-dev] - environ: *env-def - rc: 0 - out: | - zlib-dev - 1.2.11-6 - err: "" -- id: install_zlibdev_force_reinstall - input: - name: zlib-dev - state: present - force: reinstall - output: - msg: installed 1 package(s) - mocks: - run_command: - - command: [/testbin/opkg, --version] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/opkg, list-installed, zlib-dev] - environ: *env-def - rc: 0 - out: | - zlib-dev - 1.2.11-6 - err: "" - - command: [/testbin/opkg, install, --force-reinstall, zlib-dev] - environ: *env-def - rc: 0 - out: | - Installing zlib-dev (1.2.11-6) to root... - Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk - Configuring zlib-dev. - err: "" - - command: [/testbin/opkg, list-installed, zlib-dev] - environ: *env-def - rc: 0 - out: | - zlib-dev - 1.2.11-6 - err: "" -- id: install_zlibdev_with_version - input: - name: zlib-dev=1.2.11-6 - state: present - output: - msg: installed 1 package(s) - mocks: - run_command: - - command: [/testbin/opkg, --version] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/opkg, list-installed, zlib-dev] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/opkg, install, zlib-dev=1.2.11-6] - environ: *env-def - rc: 0 - out: | - Installing zlib-dev (1.2.11-6) to root... - Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk - Installing zlib (1.2.11-6) to root... - Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk - Configuring zlib. - Configuring zlib-dev. - err: "" - - command: [/testbin/opkg, list-installed, zlib-dev] - environ: *env-def - rc: 0 - out: "zlib-dev - 1.2.11-6 \n" # This output has the extra space at the end, to satisfy the behaviour of Yocto/OpenEmbedded's opkg - err: "" -- id: install_vim_updatecache - input: - name: vim-fuller - state: present - update_cache: true - output: - msg: installed 1 package(s) - mocks: - run_command: - - command: [/testbin/opkg, --version] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/opkg, update] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/opkg, list-installed, vim-fuller] - environ: *env-def - rc: 0 - out: "" - err: "" - - command: [/testbin/opkg, install, vim-fuller] - environ: *env-def - rc: 0 - out: | - Multiple packages (libgcc1 and libgcc1) providing same name marked HOLD or PREFER. Using latest. - Installing vim-fuller (9.0-1) to root... - Downloading https://downloads.openwrt.org/snapshots/packages/x86_64/packages/vim-fuller_9.0-1_x86_64.ipk - Installing terminfo (6.4-2) to root... - Downloading https://downloads.openwrt.org/snapshots/packages/x86_64/base/terminfo_6.4-2_x86_64.ipk - Installing libncurses6 (6.4-2) to root... - Downloading https://downloads.openwrt.org/snapshots/packages/x86_64/base/libncurses6_6.4-2_x86_64.ipk - Configuring terminfo. - Configuring libncurses6. - Configuring vim-fuller. - err: "" - - command: [/testbin/opkg, list-installed, vim-fuller] - environ: *env-def - rc: 0 - out: "vim-fuller - 9.0-1 \n" # This output has the extra space at the end, to satisfy the behaviour of Yocto/OpenEmbedded's opkg - err: "" +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} +test_cases: + - id: install_zlibdev + input: + name: zlib-dev + state: present + output: + msg: installed 1 package(s) + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, install, zlib-dev] + environ: *env-def + rc: 0 + out: | + Installing zlib-dev (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk + Installing zlib (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk + Configuring zlib. + Configuring zlib-dev. + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + - id: install_zlibdev_present + input: + name: zlib-dev + state: present + output: + msg: package(s) already present + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + - id: install_zlibdev_force_reinstall + input: + name: zlib-dev + state: present + force: reinstall + output: + msg: installed 1 package(s) + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + - command: [/testbin/opkg, install, --force-reinstall, zlib-dev] + environ: *env-def + rc: 0 + out: | + Installing zlib-dev (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk + Configuring zlib-dev. + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + - id: install_zlibdev_with_version + input: + name: zlib-dev=1.2.11-6 + state: present + output: + msg: installed 1 package(s) + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, install, zlib-dev=1.2.11-6] + environ: *env-def + rc: 0 + out: | + Installing zlib-dev (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk + Installing zlib (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk + Configuring zlib. + Configuring zlib-dev. + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: "zlib-dev - 1.2.11-6 \n" # This output has the extra space at the end, to satisfy the behaviour of Yocto/OpenEmbedded's opkg + err: '' + - id: install_vim_updatecache + input: + name: vim-fuller + state: present + update_cache: true + output: + msg: installed 1 package(s) + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, update] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, vim-fuller] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, install, vim-fuller] + environ: *env-def + rc: 0 + out: | + Multiple packages (libgcc1 and libgcc1) providing same name marked HOLD or PREFER. Using latest. + Installing vim-fuller (9.0-1) to root... + Downloading https://downloads.openwrt.org/snapshots/packages/x86_64/packages/vim-fuller_9.0-1_x86_64.ipk + Installing terminfo (6.4-2) to root... + Downloading https://downloads.openwrt.org/snapshots/packages/x86_64/base/terminfo_6.4-2_x86_64.ipk + Installing libncurses6 (6.4-2) to root... + Downloading https://downloads.openwrt.org/snapshots/packages/x86_64/base/libncurses6_6.4-2_x86_64.ipk + Configuring terminfo. + Configuring libncurses6. + Configuring vim-fuller. + err: '' + - command: [/testbin/opkg, list-installed, vim-fuller] + environ: *env-def + rc: 0 + out: "vim-fuller - 9.0-1 \n" # This output has the extra space at the end, to satisfy the behaviour of Yocto/OpenEmbedded's opkg + err: '' diff --git a/tests/unit/plugins/modules/test_pacemaker_cluster.py b/tests/unit/plugins/modules/test_pacemaker_cluster.py new file mode 100644 index 0000000000..ad69411a9c --- /dev/null +++ b/tests/unit/plugins/modules/test_pacemaker_cluster.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Author: Dexter Le (dextersydney2001@gmail.com) +# Largely adapted from test_redhat_subscription by +# Jiri Hnidek (jhnidek@redhat.com) +# +# Copyright (c) Dexter Le (dextersydney2001@gmail.com) +# Copyright (c) Jiri Hnidek (jhnidek@redhat.com) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.community.general.plugins.modules import pacemaker_cluster +from .uthelper import UTHelper, RunCommandMock + +UTHelper.from_module(pacemaker_cluster, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_pacemaker_cluster.yaml b/tests/unit/plugins/modules/test_pacemaker_cluster.yaml new file mode 100644 index 0000000000..785a7cb4f6 --- /dev/null +++ b/tests/unit/plugins/modules/test_pacemaker_cluster.yaml @@ -0,0 +1,488 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Dexter Le (dextersydney2001@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +--- +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} +test_cases: + - id: test_online_minimal_input_initial_online_all_no_maintenance + input: + state: online + output: + changed: false + previous_value: ' * Online: [ pc1, pc2, pc3 ]' + value: ' * Online: [ pc1, pc2, pc3 ]' + mocks: + run_command: + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: ' * Online: [ pc1, pc2, pc3 ]' + err: "" + - command: [/testbin/pcs, cluster, start, --all, --wait=300] + environ: *env-def + rc: 0 + out: "Starting Cluster..." + err: "" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 1 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + err: "" + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: ' * Online: [ pc1, pc2, pc3 ]' + err: "" + - id: test_online_minimal_input_initial_offline_all_maintenance + input: + state: online + output: + changed: true + previous_value: 'Error: cluster is not currently running on this node' + value: ' * Online: [ pc1, pc2, pc3 ]' + mocks: + run_command: + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 1 + out: 'Error: cluster is not currently running on this node' + err: "" + - command: [/testbin/pcs, cluster, start, --all, --wait=300] + environ: *env-def + rc: 0 + out: "Starting Cluster..." + err: "" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 0 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + maintenance-mode=true + err: "" + - command: [/testbin/pcs, property, set, maintenance-mode=false] + environ: *env-def + rc: 0 + out: "" + err: "" + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: ' * Online: [ pc1, pc2, pc3 ]' + err: "" + - id: test_online_minimal_input_initial_offline_single_nonlocal_no_maintenance + input: + state: online + name: pc2 + output: + changed: true + previous_value: '* Node pc2: UNCLEAN (offline)\n * Online: [ pc1, pc3 ]' + value: ' * Online: [ pc1, pc2, pc3 ]' + mocks: + run_command: + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: '* Node pc2: UNCLEAN (offline)\n * Online: [ pc1, pc3 ]' + err: "" + - command: [/testbin/pcs, cluster, start, pc2, --wait=300] + environ: *env-def + rc: 0 + out: "Starting Cluster..." + err: "" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 1 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + err: "" + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: ' * Online: [ pc1, pc2, pc3 ]' + err: "" + - id: test_online_minimal_input_initial_offline_single_local_no_maintenance + input: + state: online + name: pc1 + output: + changed: true + previous_value: 'Error: cluster is not currently running on this node' + value: ' * Online: [ pc1, pc2, pc3 ]' + mocks: + run_command: + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 1 + out: 'Error: cluster is not currently running on this node' + err: "" + - command: [/testbin/pcs, cluster, start, pc1, --wait=300] + environ: *env-def + rc: 0 + out: "Starting Cluster..." + err: "" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 1 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + err: "" + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: ' * Online: [ pc1, pc2, pc3 ]' + err: "" + - id: test_offline_minimal_input_initial_online_all + input: + state: offline + output: + changed: true + previous_value: ' * Online: [ pc1, pc2, pc3 ]' + value: 'Error: cluster is not currently running on this node' + mocks: + run_command: + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: ' * Online: [ pc1, pc2, pc3 ]' + err: "" + - command: [/testbin/pcs, cluster, stop, --all, --wait=300] + environ: *env-def + rc: 0 + out: "Stopping Cluster..." + err: "" + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 1 + out: 'Error: cluster is not currently running on this node' + err: "" + - id: test_offline_minimal_input_initial_offline_all + input: + state: offline + output: + changed: false + previous_value: 'Error: cluster is not currently running on this node' + value: 'Error: cluster is not currently running on this node' + mocks: + run_command: + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 1 + out: 'Error: cluster is not currently running on this node' + err: "" + - command: [/testbin/pcs, cluster, stop, --all, --wait=300] + environ: *env-def + rc: 0 + out: "Stopping Cluster..." + err: "" + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 1 + out: 'Error: cluster is not currently running on this node' + err: "" + - id: test_offline_minimal_input_initial_offline_single_nonlocal + input: + state: offline + name: pc3 + output: + changed: true + previous_value: '* Node pc2: UNCLEAN (offline)\n * Online: [ pc1, pc3 ]' + value: '* Node pc2: UNCLEAN (offline)\n* Node pc3: UNCLEAN (offline)\n * Online: [ pc1 ]' + mocks: + run_command: + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: '* Node pc2: UNCLEAN (offline)\n * Online: [ pc1, pc3 ]' + err: "" + - command: [/testbin/pcs, cluster, stop, pc3, --wait=300] + environ: *env-def + rc: 0 + out: "Stopping Cluster..." + err: "" + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: '* Node pc2: UNCLEAN (offline)\n* Node pc3: UNCLEAN (offline)\n * Online: [ pc1 ]' + err: "" + - id: test_restart_minimal_input_initial_online_all_no_maintenance + input: + state: restart + output: + changed: false + previous_value: ' * Online: [ pc1, pc2, pc3 ]' + value: ' * Online: [ pc1, pc2, pc3 ]' + mocks: + run_command: + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: ' * Online: [ pc1, pc2, pc3 ]' + err: "" + - command: [/testbin/pcs, cluster, stop, --all, --wait=300] + environ: *env-def + rc: 0 + out: "Stopping Cluster..." + err: "" + - command: [/testbin/pcs, cluster, start, --all, --wait=300] + environ: *env-def + rc: 0 + out: "Starting Cluster..." + err: "" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 1 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + err: "" + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: ' * Online: [ pc1, pc2, pc3 ]' + err: "" + - id: test_restart_minimal_input_initial_offline_all_no_maintenance + input: + state: restart + output: + changed: true + previous_value: 'Error: cluster is not currently running on this node' + value: ' * Online: [ pc1, pc2, pc3 ]' + mocks: + run_command: + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 1 + out: 'Error: cluster is not currently running on this node' + err: "" + - command: [/testbin/pcs, cluster, stop, --all, --wait=300] + environ: *env-def + rc: 0 + out: "Stopping Cluster..." + err: "" + - command: [/testbin/pcs, cluster, start, --all, --wait=300] + environ: *env-def + rc: 0 + out: "Starting Cluster..." + err: "" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 1 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + err: "" + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: ' * Online: [ pc1, pc2, pc3 ]' + err: "" + - id: test_restart_minimal_input_initial_offline_all_maintenance + input: + state: restart + output: + changed: true + previous_value: 'Error: cluster is not currently running on this node' + value: ' * Online: [ pc1, pc2, pc3 ]' + mocks: + run_command: + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 1 + out: 'Error: cluster is not currently running on this node' + err: "" + - command: [/testbin/pcs, cluster, stop, --all, --wait=300] + environ: *env-def + rc: 0 + out: "Stopping Cluster..." + err: "" + - command: [/testbin/pcs, cluster, start, --all, --wait=300] + environ: *env-def + rc: 0 + out: "Starting Cluster..." + err: "" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 0 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + maintenance-mode=true + err: "" + - command: [/testbin/pcs, property, set, maintenance-mode=false] + environ: *env-def + rc: 0 + out: "" + err: "" + - command: [/testbin/pcs, cluster, status] + environ: *env-def + rc: 0 + out: ' * Online: [ pc1, pc2, pc3 ]' + err: "" + - id: test_maintenance_minimal_input_initial_online + input: + state: maintenance + output: + changed: true + previous_value: 'maintenance-mode=false (default)' + value: 'maintenance-mode=true' + mocks: + run_command: + - command: [/testbin/pcs, property, config, maintenance-mode] + environ: *env-def + rc: 0 + out: 'maintenance-mode=false (default)' + err: "" + - command: [/testbin/pcs, property, set, maintenance-mode=true] + environ: *env-def + rc: 0 + out: "" + err: "" + - command: [/testbin/pcs, property, config, maintenance-mode] + environ: *env-def + rc: 0 + out: 'maintenance-mode=true' + err: "" + - id: test_maintenance_minimal_input_initial_offline + input: + state: maintenance + output: + failed: true + msg: "pcs failed with error (rc=1): Error: unable to get cib" + mocks: + run_command: + - command: [/testbin/pcs, property, config, maintenance-mode] + environ: *env-def + rc: 1 + out: "" + err: "Error: unable to get cib" + - command: [/testbin/pcs, property, set, maintenance-mode=true] + environ: *env-def + rc: 1 + out: "" + err: "Error: unable to get cib" + - id: test_maintenance_minimal_input_initial_maintenance + input: + state: maintenance + output: + changed: false + previous_value: 'maintenance-mode=true' + value: 'maintenance-mode=true' + mocks: + run_command: + - command: [/testbin/pcs, property, config, maintenance-mode] + environ: *env-def + rc: 0 + out: 'maintenance-mode=true' + err: "" + - command: [/testbin/pcs, property, set, maintenance-mode=true] + environ: *env-def + rc: 0 + out: "" + err: "" + - command: [/testbin/pcs, property, config, maintenance-mode] + environ: *env-def + rc: 0 + out: 'maintenance-mode=true' + err: "" + - id: test_cleanup_minimal_input_initial_resources_not_exist + input: + state: cleanup + output: + changed: false + previous_value: "NO resources configured" + value: "NO resources configured" + mocks: + run_command: + - command: [/testbin/pcs, resource, status] + environ: *env-def + rc: 0 + out: "NO resources configured" + err: "" + - command: [/testbin/pcs, resource, cleanup] + environ: *env-def + rc: 0 + out: "Cleaned up all resources on all nodes" + err: "" + - command: [/testbin/pcs, resource, status] + environ: *env-def + rc: 0 + out: "NO resources configured" + err: "" + - id: test_cleanup_minimal_input_initial_resources_exists + input: + state: cleanup + output: + changed: true + previous_value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + value: "NO resources configured" + mocks: + run_command: + - command: [/testbin/pcs, resource, status] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + err: "" + - command: [/testbin/pcs, resource, cleanup] + environ: *env-def + rc: 0 + out: "Cleaned up all resources on all nodes" + err: "" + - command: [/testbin/pcs, resource, status] + environ: *env-def + rc: 0 + out: "NO resources configured" + err: "" + - id: test_cleanup_specific_minimal_input_initial_resources_exists + input: + state: cleanup + name: virtual-ip + output: + changed: true + previous_value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + value: "NO resources configured" + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + err: "" + - command: [/testbin/pcs, resource, cleanup, virtual-ip] + environ: *env-def + rc: 0 + out: "Cleaned up virtual-ip on X" + err: "" + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: "NO resources configured" + err: "" diff --git a/tests/unit/plugins/modules/test_pacemaker_resource.py b/tests/unit/plugins/modules/test_pacemaker_resource.py new file mode 100644 index 0000000000..f559d5ebf5 --- /dev/null +++ b/tests/unit/plugins/modules/test_pacemaker_resource.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Author: Dexter Le (dextersydney2001@gmail.com) +# Largely adapted from test_redhat_subscription by +# Jiri Hnidek (jhnidek@redhat.com) +# +# Copyright (c) Dexter Le (dextersydney2001@gmail.com) +# Copyright (c) Jiri Hnidek (jhnidek@redhat.com) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.community.general.plugins.modules import pacemaker_resource +from .uthelper import UTHelper, RunCommandMock + +UTHelper.from_module(pacemaker_resource, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_pacemaker_resource.yaml b/tests/unit/plugins/modules/test_pacemaker_resource.yaml new file mode 100644 index 0000000000..76679d14d9 --- /dev/null +++ b/tests/unit/plugins/modules/test_pacemaker_resource.yaml @@ -0,0 +1,383 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Dexter Le (dextersydney2001@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +--- +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} +test_cases: + - id: test_missing_input + input: {} + output: + failed: true + msg: "missing required arguments: name" + - id: test_present_minimal_input_resource_not_exist + input: + state: present + name: virtual-ip + resource_type: + resource_name: IPaddr2 + resource_option: + - "ip=[192.168.2.1]" + output: + changed: true + previous_value: null + value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "Error: resource or tag id 'virtual-ip' not found" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 1 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + err: "" + - command: [/testbin/pcs, resource, create, virtual-ip, IPaddr2, "ip=[192.168.2.1]", --wait=300] + environ: *env-def + rc: 0 + out: "Assumed agent name 'ocf:heartbeat:IPaddr2' (deduced from 'IPAddr2')" + err: "" + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + err: "" + - id: test_present_filled_input_resource_not_exist + input: + state: present + name: virtual-ip + resource_type: + resource_name: IPaddr2 + resource_option: + - "ip=[192.168.2.1]" + resource_operation: + - operation_action: start + operation_option: + - timeout=1200 + - operation_action: stop + operation_option: + - timeout=1200 + - operation_action: monitor + operation_option: + - timeout=1200 + resource_meta: + - test_meta1=123 + - test_meta2=456 + resource_argument: + argument_action: group + argument_option: + - test_group + wait: 200 + output: + changed: true + previous_value: null + value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "Error: resource or tag id 'virtual-ip' not found" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 1 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + err: "" + - command: [/testbin/pcs, resource, create, virtual-ip, IPaddr2, "ip=[192.168.2.1]", op, start, timeout=1200, op, stop, timeout=1200, op, monitor, timeout=1200, meta, test_meta1=123, meta, test_meta2=456, --group, test_group, --wait=200] + environ: *env-def + rc: 0 + out: "Assumed agent name 'ocf:heartbeat:IPaddr2' (deduced from 'IPAddr2')" + err: "" + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + err: "" + - id: test_present_minimal_input_resource_exists + input: + state: present + name: virtual-ip + resource_type: + resource_name: IPaddr2 + resource_option: + - "ip=[192.168.2.1]" + output: + changed: false + previous_value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + err: "" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 1 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + err: "" + - command: [/testbin/pcs, resource, create, virtual-ip, IPaddr2, "ip=[192.168.2.1]", --wait=300] + environ: *env-def + rc: 1 + out: "" + err: "Error: 'virtual-ip' already exists\n" + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + err: "" + - id: test_present_minimal_input_resource_maintenance_mode + input: + state: present + name: virtual-ip + resource_type: + resource_name: IPaddr2 + resource_option: + - "ip=[192.168.2.1]" + output: + changed: true + previous_value: null + value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Stopped" + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 0 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + maintenance-mode=true + err: "" + - command: [/testbin/pcs, resource, create, virtual-ip, IPaddr2, "ip=[192.168.2.1]", --wait=300] + environ: *env-def + rc: 1 + out: "" + err: "Error: resource 'virtual-ip' is not running on any node" + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Stopped" + err: "" + - id: test_absent_minimal_input_resource_not_exist + input: + state: absent + name: virtual-ip + output: + changed: false + previous_value: null + value: null + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "Error: resource or tag id 'virtual-ip' not found" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 1 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + err: "" + - command: [/testbin/pcs, resource, remove, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "Error: Resource 'virtual-ip' does not exist.\n" + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "Error: resource or tag id 'virtual-ip' not found" + - id: test_absent_minimal_input_resource_exists + input: + state: absent + name: virtual-ip + output: + changed: true + previous_value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + value: null + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + err: "" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 1 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + err: "" + - command: [/testbin/pcs, resource, remove, virtual-ip] + environ: *env-def + rc: 0 + out: "" + err: "Attempting to stop: virtual-ip... Stopped\n" + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "Error: resource or tag id 'virtual-ip' not found" + - id: test_absent_minimal_input_maintenance_mode + input: + state: absent + name: virtual-ip + output: + changed: true + previous_value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + value: null + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + err: "" + - command: [/testbin/pcs, property, config] + environ: *env-def + rc: 0 + out: | + Cluster Properties: cib-bootstrap-options + cluster-infrastructure=corosync + cluster-name=hacluster + dc-version=2.1.9-1.fc41-7188dbf + have-watchdog=false + maintenance-mode=true + err: "" + - command: [/testbin/pcs, resource, remove, virtual-ip, --force] + environ: *env-def + rc: 0 + out: "" + err: "Deleting Resource (and group) - virtual-ip" + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "Error: resource or tag id 'virtual-ip' not found" + - id: test_enabled_minimal_input_resource_not_exists + input: + state: enabled + name: virtual-ip + output: + failed: true + msg: "pcs failed with error (rc=1): bundle/clone/group/resource/tag 'virtual-ip' does not exist" + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "Error: resource or tag id 'virtual-ip' not found" + - command: [/testbin/pcs, resource, enable, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "bundle/clone/group/resource/tag 'virtual-ip' does not exist" + - id: test_enabled_minimal_input_resource_exists + input: + state: enabled + name: virtual-ip + output: + changed: true + previous_value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Stopped (disabled)" + value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Starting" + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Stopped (disabled)" + err: "" + - command: [/testbin/pcs, resource, enable, virtual-ip] + environ: *env-def + rc: 0 + out: "" + err: "" + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Starting" + err: "" + - id: test_disable_minimal_input_resource_not_exists + input: + state: disabled + name: virtual-ip + output: + failed: true + msg: "pcs failed with error (rc=1): bundle/clone/group/resource/tag 'virtual-ip' does not exist" + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "Error: resource or tag id 'virtual-ip' not found" + - command: [/testbin/pcs, resource, disable, virtual-ip] + environ: *env-def + rc: 1 + out: "" + err: "bundle/clone/group/resource/tag 'virtual-ip' does not exist" + - id: test_disable_minimal_input_resource_exists + input: + state: disabled + name: virtual-ip + output: + changed: true + previous_value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Stopped (disabled)" + mocks: + run_command: + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started" + err: "" + - command: [/testbin/pcs, resource, disable, virtual-ip] + environ: *env-def + rc: 0 + out: "" + err: "" + - command: [/testbin/pcs, resource, status, virtual-ip] + environ: *env-def + rc: 0 + out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Stopped (disabled)" + err: "" diff --git a/tests/unit/plugins/modules/test_pacman.py b/tests/unit/plugins/modules/test_pacman.py index 04ff5bb3e8..0928926856 100644 --- a/tests/unit/plugins/modules/test_pacman.py +++ b/tests/unit/plugins/modules/test_pacman.py @@ -8,8 +8,8 @@ __metaclass__ = type from ansible.module_utils import basic -from ansible_collections.community.general.tests.unit.compat import mock -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( +from ansible_collections.community.internal_test_tools.tests.unit.compat import mock +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( AnsibleExitJson, AnsibleFailJson, set_module_args, @@ -152,25 +152,25 @@ class TestPacman: def test_fail_without_required_args(self): with pytest.raises(AnsibleFailJson) as e: - set_module_args({}) - pacman.main() + with set_module_args({}): + pacman.main() assert e.match(r"one of the following is required") def test_success(self, mock_empty_inventory): - set_module_args({"update_cache": True}) # Simplest args to let init go through - P = pacman.Pacman(pacman.setup_module()) - with pytest.raises(AnsibleExitJson) as e: - P.success() + with set_module_args({"update_cache": True}): # Simplest args to let init go through + P = pacman.Pacman(pacman.setup_module()) + with pytest.raises(AnsibleExitJson) as e: + P.success() def test_fail(self, mock_empty_inventory): - set_module_args({"update_cache": True}) - P = pacman.Pacman(pacman.setup_module()) + with set_module_args({"update_cache": True}): + P = pacman.Pacman(pacman.setup_module()) - args = dict( - msg="msg", stdout="something", stderr="somethingelse", cmd=["command", "with", "args"], rc=1 - ) - with pytest.raises(AnsibleFailJson) as e: - P.fail(**args) + args = dict( + msg="msg", stdout="something", stderr="somethingelse", cmd=["command", "with", "args"], rc=1 + ) + with pytest.raises(AnsibleFailJson) as e: + P.fail(**args) assert all(item in e.value.args[0] for item in args) @@ -333,33 +333,33 @@ class TestPacman: def test_build_inventory(self, expected, run_command_side_effect, raises): self.mock_run_command.side_effect = run_command_side_effect - set_module_args({"update_cache": True}) - if raises: - with pytest.raises(raises): + with set_module_args({"update_cache": True}): + if raises: + with pytest.raises(raises): + P = pacman.Pacman(pacman.setup_module()) + P._build_inventory() + else: P = pacman.Pacman(pacman.setup_module()) - P._build_inventory() - else: - P = pacman.Pacman(pacman.setup_module()) - assert P._build_inventory() == expected + assert P._build_inventory() == expected @pytest.mark.parametrize("check_mode_value", [True, False]) def test_upgrade_check_empty_inventory(self, mock_empty_inventory, check_mode_value): - set_module_args({"upgrade": True, "_ansible_check_mode": check_mode_value}) - P = pacman.Pacman(pacman.setup_module()) - with pytest.raises(AnsibleExitJson) as e: - P.run() - self.mock_run_command.call_count == 0 + with set_module_args({"upgrade": True, "_ansible_check_mode": check_mode_value}): + P = pacman.Pacman(pacman.setup_module()) + with pytest.raises(AnsibleExitJson) as e: + P.run() + assert self.mock_run_command.call_count == 0 out = e.value.args[0] assert "packages" not in out assert not out["changed"] assert "diff" not in out def test_update_db_check(self, mock_empty_inventory): - set_module_args({"update_cache": True, "_ansible_check_mode": True}) - P = pacman.Pacman(pacman.setup_module()) + with set_module_args({"update_cache": True, "_ansible_check_mode": True}): + P = pacman.Pacman(pacman.setup_module()) - with pytest.raises(AnsibleExitJson) as e: - P.run() + with pytest.raises(AnsibleExitJson) as e: + P.run() self.mock_run_command.call_count == 0 out = e.value.args[0] assert "packages" not in out @@ -422,14 +422,14 @@ class TestPacman: def test_update_db(self, module_args, expected_calls, changed): args = {"update_cache": True} args.update(module_args) - set_module_args(args) + with set_module_args(args): - self.mock_run_command.side_effect = [ - (rc, stdout, stderr) for expected_call, kwargs, rc, stdout, stderr in expected_calls - ] - with pytest.raises(AnsibleExitJson) as e: - P = pacman.Pacman(pacman.setup_module()) - P.run() + self.mock_run_command.side_effect = [ + (rc, stdout, stderr) for expected_call, kwargs, rc, stdout, stderr in expected_calls + ] + with pytest.raises(AnsibleExitJson) as e: + P = pacman.Pacman(pacman.setup_module()) + P.run() self.mock_run_command.assert_has_calls([ mock.call(mock.ANY, expected_call, **kwargs) for expected_call, kwargs, rc, stdout, stderr in expected_calls @@ -475,16 +475,16 @@ class TestPacman: args = {"upgrade": True, "_ansible_check_mode": check_mode_value} if upgrade_extra_args: args["upgrade_extra_args"] = upgrade_extra_args - set_module_args(args) + with set_module_args(args): - if run_command_data and "return_value" in run_command_data: - self.mock_run_command.return_value = run_command_data["return_value"] + if run_command_data and "return_value" in run_command_data: + self.mock_run_command.return_value = run_command_data["return_value"] - P = pacman.Pacman(pacman.setup_module()) + P = pacman.Pacman(pacman.setup_module()) - with pytest.raises(AnsibleExitJson) as e: - P.run() - out = e.value.args[0] + with pytest.raises(AnsibleExitJson) as e: + P.run() + out = e.value.args[0] if check_mode_value: self.mock_run_command.call_count == 0 @@ -499,13 +499,13 @@ class TestPacman: assert out["diff"]["before"] and out["diff"]["after"] def test_upgrade_fail(self, mock_valid_inventory): - set_module_args({"upgrade": True}) - self.mock_run_command.return_value = [1, "stdout", "stderr"] - P = pacman.Pacman(pacman.setup_module()) + with set_module_args({"upgrade": True}): + self.mock_run_command.return_value = [1, "stdout", "stderr"] + P = pacman.Pacman(pacman.setup_module()) - with pytest.raises(AnsibleFailJson) as e: - P.run() - self.mock_run_command.call_count == 1 + with pytest.raises(AnsibleFailJson) as e: + P.run() + assert self.mock_run_command.call_count == 1 out = e.value.args[0] assert out["failed"] assert out["stdout"] == "stdout" @@ -633,19 +633,19 @@ class TestPacman: def test_package_list( self, mock_valid_inventory, state, pkg_names, expected, run_command_data, raises ): - set_module_args({"name": pkg_names, "state": state}) - P = pacman.Pacman(pacman.setup_module()) - P.inventory = P._build_inventory() - if run_command_data: - self.mock_run_command.side_effect = run_command_data["side_effect"] - - if raises: - with pytest.raises(raises): - P.package_list() - else: - assert sorted(P.package_list()) == sorted(expected) + with set_module_args({"name": pkg_names, "state": state}): + P = pacman.Pacman(pacman.setup_module()) + P.inventory = P._build_inventory() if run_command_data: - assert self.mock_run_command.mock_calls == run_command_data["calls"] + self.mock_run_command.side_effect = run_command_data["side_effect"] + + if raises: + with pytest.raises(raises): + P.package_list() + else: + assert sorted(P.package_list()) == sorted(expected) + if run_command_data: + assert self.mock_run_command.mock_calls == run_command_data["calls"] @pytest.mark.parametrize("check_mode_value", [True, False]) @pytest.mark.parametrize( @@ -658,11 +658,11 @@ class TestPacman: def test_op_packages_nothing_to_do( self, mock_valid_inventory, mock_package_list, check_mode_value, name, state, package_list ): - set_module_args({"name": name, "state": state, "_ansible_check_mode": check_mode_value}) - mock_package_list.return_value = package_list - P = pacman.Pacman(pacman.setup_module()) - with pytest.raises(AnsibleExitJson) as e: - P.run() + with set_module_args({"name": name, "state": state, "_ansible_check_mode": check_mode_value}): + mock_package_list.return_value = package_list + P = pacman.Pacman(pacman.setup_module()) + with pytest.raises(AnsibleExitJson) as e: + P.run() out = e.value.args[0] assert not out["changed"] assert "packages" in out @@ -1079,13 +1079,13 @@ class TestPacman: run_command_data, raises, ): - set_module_args(module_args) - self.mock_run_command.side_effect = run_command_data["side_effect"] - mock_package_list.return_value = package_list_out + with set_module_args(module_args): + self.mock_run_command.side_effect = run_command_data["side_effect"] + mock_package_list.return_value = package_list_out - P = pacman.Pacman(pacman.setup_module()) - with pytest.raises(raises) as e: - P.run() + P = pacman.Pacman(pacman.setup_module()) + with pytest.raises(raises) as e: + P.run() out = e.value.args[0] assert self.mock_run_command.mock_calls == run_command_data["calls"] diff --git a/tests/unit/plugins/modules/test_pacman_key.py b/tests/unit/plugins/modules/test_pacman_key.py index ac85708985..d372885ce2 100644 --- a/tests/unit/plugins/modules/test_pacman_key.py +++ b/tests/unit/plugins/modules/test_pacman_key.py @@ -17,8 +17,9 @@ MOCK_BIN_PATH = '/mocked/path' TESTING_KEYID = '14F26682D0916CDD81E37B6D61B7B526D98F0353' TESTING_KEYFILE_PATH = '/tmp/pubkey.asc' -# gpg --{show,list}-key output (key present) -GPG_SHOWKEY_OUTPUT = '''tru::1:1616373715:0:3:1:5 +# gpg --{show,list}-key output (key present, but expired) +GPG_SHOWKEY_OUTPUT_EXPIRED = """ +tru::1:1616373715:0:3:1:5 pub:-:4096:1:61B7B526D98F0353:1437155332:::-:::scSC::::::23::0: fpr:::::::::14F26682D0916CDD81E37B6D61B7B526D98F0353: uid:-::::1437155332::E57D1F9BFF3B404F9F30333629369B08DF5E2161::Mozilla Software Releases ::::::::::0: @@ -27,24 +28,76 @@ fpr:::::::::F2EF4E6E6AE75B95F11F1EB51C69C4E55E9905DB: sub:e:4096:1:BBBEBDBB24C6F355:1498143157:1561215157:::::s::::::23: fpr:::::::::DCEAC5D96135B91C4EA672ABBBBEBDBB24C6F355: sub:e:4096:1:F1A6668FBB7D572E:1559247338:1622319338:::::s::::::23: -fpr:::::::::097B313077AE62A02F84DA4DF1A6668FBB7D572E:''' +fpr:::::::::097B313077AE62A02F84DA4DF1A6668FBB7D572E: +""".strip() + +# gpg --{show,list}-key output (key present and trusted) +GPG_SHOWKEY_OUTPUT_TRUSTED = """ +tru::1:1616373715:0:3:1:5 +pub:f:4096:1:61B7B526D98F0353:1437155332:::-:::scSC::::::23::0: +fpr:::::::::14F26682D0916CDD81E37B6D61B7B526D98F0353: +uid:f::::1437155332::E57D1F9BFF3B404F9F30333629369B08DF5E2161::Mozilla Software Releases ::::::::::0: +sub:e:4096:1:1C69C4E55E9905DB:1437155572:1500227572:::::s::::::23: +fpr:::::::::F2EF4E6E6AE75B95F11F1EB51C69C4E55E9905DB: +sub:e:4096:1:BBBEBDBB24C6F355:1498143157:1561215157:::::s::::::23: +fpr:::::::::DCEAC5D96135B91C4EA672ABBBBEBDBB24C6F355: +sub:e:4096:1:F1A6668FBB7D572E:1559247338:1622319338:::::s::::::23: +fpr:::::::::097B313077AE62A02F84DA4DF1A6668FBB7D572E: +""".strip() + +GPG_LIST_SECRET_KEY_OUTPUT = """ +sec:u:2048:1:58FCCBCC131FCCAB:1406639814:::u:::scSC:::+:::23::0: +fpr:::::::::AC0F357BE07F1493C34DCAB258FCCBCC131FCCAB: +grp:::::::::C1227FFDD039AD942F777EA0639E1F1EAA96AB12: +uid:u::::1406639814::79311EDEA01302E0DBBB2F33AE799F8BB677652F::Pacman Keyring Master Key ::::::::::0: +""".lstrip() + +GPG_CHECK_SIGNATURES_OUTPUT = """ +tru::1:1742507906:1750096255:3:1:5 +pub:f:4096:1:61B7B526D98F0353:1437155332:::-:::scSC::::::23:1742507897:1 https\x3a//185.125.188.26\x3a443: +fpr:::::::::14F26682D0916CDD81E37B6D61B7B526D98F0353: +uid:f::::1437155332::E57D1F9BFF3B404F9F30333629369B08DF5E2161::Mozilla Software Releases :::::::::1742507897:1: +sig:!::1:61B7B526D98F0353:1437155332::::Mozilla Software Releases :13x:::::2: +sig:!::1:58FCCBCC131FCCAB:1742507905::::Pacman Keyring Master Key :10l::AC0F357BE07F1493C34DCAB258FCCBCC131FCCAB:::8: +sub:f:4096:1:E36D3B13F3D93274:1683308659:1746380659:::::s::::::23: +fpr:::::::::ADD7079479700DCADFDD5337E36D3B13F3D93274: +sig:!::1:61B7B526D98F0353:1683308659::::Mozilla Software Releases :18x::14F26682D0916CDD81E37B6D61B7B526D98F0353:::10: +sub:e:4096:1:1C69C4E55E9905DB:1437155572:1500227572:::::s::::::23: +fpr:::::::::F2EF4E6E6AE75B95F11F1EB51C69C4E55E9905DB: +sig:!::1:61B7B526D98F0353:1437155572::::Mozilla Software Releases :18x:::::2: +sub:e:4096:1:BBBEBDBB24C6F355:1498143157:1561215157:::::s::::::23: +fpr:::::::::DCEAC5D96135B91C4EA672ABBBBEBDBB24C6F355: +sig:!::1:61B7B526D98F0353:1498143157::::Mozilla Software Releases :18x::14F26682D0916CDD81E37B6D61B7B526D98F0353:::8: +sub:e:4096:1:F1A6668FBB7D572E:1559247338:1622319338:::::s::::::23: +fpr:::::::::097B313077AE62A02F84DA4DF1A6668FBB7D572E: +sig:!::1:61B7B526D98F0353:1559247338::::Mozilla Software Releases :18x::14F26682D0916CDD81E37B6D61B7B526D98F0353:::10: +sub:e:4096:1:EBE41E90F6F12F6D:1621282261:1684354261:::::s::::::23: +fpr:::::::::4360FE2109C49763186F8E21EBE41E90F6F12F6D: +sig:!::1:61B7B526D98F0353:1621282261::::Mozilla Software Releases :18x::14F26682D0916CDD81E37B6D61B7B526D98F0353:::10: +""".strip() # gpg --{show,list}-key output (key absent) -GPG_NOKEY_OUTPUT = '''gpg: error reading key: No public key -tru::1:1616373715:0:3:1:5''' +GPG_NOKEY_OUTPUT = """ +gpg: error reading key: No public key +tru::1:1616373715:0:3:1:5 +""".strip() # pacman-key output (successful invocation) -PACMAN_KEY_SUCCESS = '''==> Updating trust database... -gpg: next trustdb check due at 2021-08-02''' +PACMAN_KEY_SUCCESS = """ +==> Updating trust database... +gpg: next trustdb check due at 2021-08-02 +""".strip() # expected command for gpg --list-keys KEYID RUN_CMD_LISTKEYS = [ MOCK_BIN_PATH, + '--homedir=/etc/pacman.d/gnupg', + '--no-permission-warning', '--with-colons', + '--quiet', '--batch', '--no-tty', '--no-default-keyring', - '--keyring=/etc/pacman.d/gnupg/pubring.gpg', '--list-keys', TESTING_KEYID, ] @@ -52,10 +105,12 @@ RUN_CMD_LISTKEYS = [ # expected command for gpg --show-keys KEYFILE RUN_CMD_SHOW_KEYFILE = [ MOCK_BIN_PATH, + '--no-permission-warning', '--with-colons', - '--with-fingerprint', + '--quiet', '--batch', '--no-tty', + '--with-fingerprint', '--show-keys', TESTING_KEYFILE_PATH, ] @@ -69,6 +124,29 @@ RUN_CMD_LSIGN_KEY = [ TESTING_KEYID, ] +RUN_CMD_LIST_SECRET_KEY = [ + MOCK_BIN_PATH, + '--homedir=/etc/pacman.d/gnupg', + '--no-permission-warning', + '--with-colons', + '--quiet', + '--batch', + '--no-tty', + '--list-secret-key', +] + +# expected command for gpg --check-signatures +RUN_CMD_CHECK_SIGNATURES = [ + MOCK_BIN_PATH, + '--homedir=/etc/pacman.d/gnupg', + '--no-permission-warning', + '--with-colons', + '--quiet', + '--batch', + '--no-tty', + '--check-signatures', + TESTING_KEYID, +] TESTCASES = [ # @@ -152,7 +230,7 @@ TESTCASES = [ {'check_rc': False}, ( 0, - GPG_SHOWKEY_OUTPUT, + GPG_SHOWKEY_OUTPUT_EXPIRED, '', ), ), @@ -222,7 +300,7 @@ TESTCASES = [ {'check_rc': False}, ( 0, - GPG_SHOWKEY_OUTPUT, + GPG_SHOWKEY_OUTPUT_EXPIRED, '', ), ), @@ -248,7 +326,79 @@ TESTCASES = [ {'check_rc': False}, ( 0, - GPG_SHOWKEY_OUTPUT, + GPG_SHOWKEY_OUTPUT_EXPIRED, + '', + ), + ), + ], + 'changed': False, + }, + ], + # state present, ensure_trusted & key expired + [ + { + 'state': 'present', + 'ensure_trusted': True, + 'id': TESTING_KEYID, + 'data': 'FAKEDATA', + '_ansible_check_mode': True, + }, + { + 'id': 'state_present_trusted_key_expired', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + { + 'check_rc': False, + }, + ( + 0, + GPG_SHOWKEY_OUTPUT_EXPIRED, + '', + ), + ), + ], + 'changed': True, + }, + ], + # state present & key trusted + [ + { + 'state': 'present', + 'ensure_trusted': True, + 'id': TESTING_KEYID, + 'data': 'FAKEDATA', + '_ansible_check_mode': True, + }, + { + 'id': 'state_present_and_key_trusted', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + { + 'check_rc': False, + }, + ( + 0, + GPG_SHOWKEY_OUTPUT_TRUSTED, + '', + ), + ), + ( + RUN_CMD_CHECK_SIGNATURES, + {}, + ( + 0, + GPG_CHECK_SIGNATURES_OUTPUT, + '', + ), + ), + ( + RUN_CMD_LIST_SECRET_KEY, + {}, + ( + 0, + GPG_LIST_SECRET_KEY_OUTPUT, '', ), ), @@ -270,7 +420,7 @@ TESTCASES = [ {'check_rc': False}, ( 0, - GPG_SHOWKEY_OUTPUT, + GPG_SHOWKEY_OUTPUT_EXPIRED, '', ), ), @@ -339,7 +489,7 @@ TESTCASES = [ {'check_rc': True}, ( 0, - GPG_SHOWKEY_OUTPUT, + GPG_SHOWKEY_OUTPUT_EXPIRED, '', ), ), @@ -397,7 +547,7 @@ TESTCASES = [ {'check_rc': True}, ( 0, - GPG_SHOWKEY_OUTPUT.replace('61B7B526D98F0353', '61B7B526D98F0354'), + GPG_SHOWKEY_OUTPUT_EXPIRED.replace('61B7B526D98F0353', '61B7B526D98F0354'), '', ), ), @@ -485,7 +635,7 @@ gpg: imported: 1 {'check_rc': True}, ( 0, - GPG_SHOWKEY_OUTPUT, + GPG_SHOWKEY_OUTPUT_EXPIRED, '', ), ), diff --git a/tests/unit/plugins/modules/test_pagerduty.py b/tests/unit/plugins/modules/test_pagerduty.py index 75987d3df0..a65e53b3d0 100644 --- a/tests/unit/plugins/modules/test_pagerduty.py +++ b/tests/unit/plugins/modules/test_pagerduty.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.modules import pagerduty import json diff --git a/tests/unit/plugins/modules/test_pagerduty_alert.py b/tests/unit/plugins/modules/test_pagerduty_alert.py index 7a1e951a24..e2872e4aaf 100644 --- a/tests/unit/plugins/modules/test_pagerduty_alert.py +++ b/tests/unit/plugins/modules/test_pagerduty_alert.py @@ -5,12 +5,12 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.modules import pagerduty_alert import json import pytest -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args class PagerDutyAlertsTest(unittest.TestCase): @@ -64,22 +64,22 @@ class TestPagerDutyAlertModule(ModuleTestCase): def test_module_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() def test_ensure_alert_created_with_minimal_data(self): - set_module_args({ + with set_module_args({ 'state': 'triggered', 'api_version': 'v2', 'integration_key': 'test', 'source': 'My Ansible Script', 'desc': 'Description for alert' - }) + }): - with patch.object(pagerduty_alert, 'fetch_url') as fetch_url_mock: - fetch_url_mock.return_value = (Response(), {"status": 202}) - with self.assertRaises(AnsibleExitJson): - self.module.main() + with patch.object(pagerduty_alert, 'fetch_url') as fetch_url_mock: + fetch_url_mock.return_value = (Response(), {"status": 202}) + with self.assertRaises(AnsibleExitJson): + self.module.main() assert fetch_url_mock.call_count == 1 url = fetch_url_mock.call_args[0][1] @@ -95,7 +95,7 @@ class TestPagerDutyAlertModule(ModuleTestCase): assert data['payload']['timestamp'] is not None def test_ensure_alert_created_with_full_data(self): - set_module_args({ + with set_module_args({ 'api_version': 'v2', 'component': 'mysql', 'custom_details': {'environment': 'production', 'notes': 'this is a test note'}, @@ -106,12 +106,12 @@ class TestPagerDutyAlertModule(ModuleTestCase): 'link_text': 'PagerDuty', 'state': 'triggered', 'source': 'My Ansible Script', - }) + }): - with patch.object(pagerduty_alert, 'fetch_url') as fetch_url_mock: - fetch_url_mock.return_value = (Response(), {"status": 202}) - with self.assertRaises(AnsibleExitJson): - self.module.main() + with patch.object(pagerduty_alert, 'fetch_url') as fetch_url_mock: + fetch_url_mock.return_value = (Response(), {"status": 202}) + with self.assertRaises(AnsibleExitJson): + self.module.main() assert fetch_url_mock.call_count == 1 url = fetch_url_mock.call_args[0][1] @@ -130,17 +130,17 @@ class TestPagerDutyAlertModule(ModuleTestCase): assert data['links'][0]['text'] == 'PagerDuty' def test_ensure_alert_acknowledged(self): - set_module_args({ + with set_module_args({ 'state': 'acknowledged', 'api_version': 'v2', 'integration_key': 'test', 'incident_key': 'incident_test_id', - }) + }): - with patch.object(pagerduty_alert, 'fetch_url') as fetch_url_mock: - fetch_url_mock.return_value = (Response(), {"status": 202}) - with self.assertRaises(AnsibleExitJson): - self.module.main() + with patch.object(pagerduty_alert, 'fetch_url') as fetch_url_mock: + fetch_url_mock.return_value = (Response(), {"status": 202}) + with self.assertRaises(AnsibleExitJson): + self.module.main() assert fetch_url_mock.call_count == 1 url = fetch_url_mock.call_args[0][1] diff --git a/tests/unit/plugins/modules/test_pagerduty_change.py b/tests/unit/plugins/modules/test_pagerduty_change.py index d596d6ab83..bbc54747a4 100644 --- a/tests/unit/plugins/modules/test_pagerduty_change.py +++ b/tests/unit/plugins/modules/test_pagerduty_change.py @@ -7,9 +7,9 @@ __metaclass__ = type import json import pytest -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible_collections.community.general.plugins.modules import pagerduty_change -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args class TestPagerDutyChangeModule(ModuleTestCase): @@ -26,19 +26,19 @@ class TestPagerDutyChangeModule(ModuleTestCase): def test_module_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() def test_ensure_change_event_created_with_minimal_data(self): - set_module_args({ + with set_module_args({ 'integration_key': 'test', 'summary': 'Testing' - }) + }): - with patch.object(pagerduty_change, 'fetch_url') as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 202}) - with self.assertRaises(AnsibleExitJson): - self.module.main() + with patch.object(pagerduty_change, 'fetch_url') as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 202}) + with self.assertRaises(AnsibleExitJson): + self.module.main() assert fetch_url_mock.call_count == 1 url = fetch_url_mock.call_args[0][1] @@ -51,7 +51,7 @@ class TestPagerDutyChangeModule(ModuleTestCase): assert data['payload']['source'] == 'Ansible' def test_ensure_change_event_created_with_full_data(self): - set_module_args({ + with set_module_args({ 'integration_key': 'test', 'summary': 'Testing', 'source': 'My Ansible Script', @@ -61,12 +61,12 @@ class TestPagerDutyChangeModule(ModuleTestCase): 'environment': 'production', 'link_url': 'https://pagerduty.com', 'link_text': 'PagerDuty' - }) + }): - with patch.object(pagerduty_change, 'fetch_url') as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 202}) - with self.assertRaises(AnsibleExitJson): - self.module.main() + with patch.object(pagerduty_change, 'fetch_url') as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 202}) + with self.assertRaises(AnsibleExitJson): + self.module.main() assert fetch_url_mock.call_count == 1 url = fetch_url_mock.call_args[0][1] diff --git a/tests/unit/plugins/modules/test_pamd.py b/tests/unit/plugins/modules/test_pamd.py index 4c49cebed7..274a50e553 100644 --- a/tests/unit/plugins/modules/test_pamd.py +++ b/tests/unit/plugins/modules/test_pamd.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.modules.pamd import PamdRule from ansible_collections.community.general.plugins.modules.pamd import PamdLine diff --git a/tests/unit/plugins/modules/test_parted.py b/tests/unit/plugins/modules/test_parted.py index 1e010343bd..97c009d3e7 100644 --- a/tests/unit/plugins/modules/test_parted.py +++ b/tests/unit/plugins/modules/test_parted.py @@ -5,11 +5,11 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import patch, call +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, call from ansible_collections.community.general.plugins.modules import parted as parted_module from ansible_collections.community.general.plugins.modules.parted import parse_parted_version from ansible_collections.community.general.plugins.modules.parted import parse_partition_info -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args # Example of output : parted -s -m /dev/sdb -- unit 'MB' print parted_output1 = """ @@ -187,93 +187,93 @@ class TestParted(ModuleTestCase): self.assertEqual(parse_partition_info(parted_output2, 'MB'), parted_dict2) def test_partition_already_exists(self): - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 1, 'state': 'present', - }) - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=False) + }): + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): + self.execute_module(changed=False) def test_create_new_partition(self): - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 4, 'state': 'present', - }) - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script='unit KiB mkpart primary 0% 100%') + }): + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): + self.execute_module(changed=True, script='unit KiB mkpart primary 0% 100%') def test_create_new_partition_1G(self): - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 4, 'state': 'present', 'part_end': '1GiB', - }) - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script='unit KiB mkpart primary 0% 1GiB') + }): + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): + self.execute_module(changed=True, script='unit KiB mkpart primary 0% 1GiB') def test_create_new_partition_minus_1G(self): - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 4, 'state': 'present', 'fs_type': 'ext2', 'part_start': '-1GiB', - }) - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script='unit KiB mkpart primary ext2 -1GiB 100%') + }): + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): + self.execute_module(changed=True, script='unit KiB mkpart primary ext2 -1GiB 100%') def test_remove_partition_number_1(self): - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 1, 'state': 'absent', - }) - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script='rm 1') + }): + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): + self.execute_module(changed=True, script='rm 1') def test_resize_partition(self): - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 3, 'state': 'present', 'part_end': '100%', 'resize': True - }) - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script='resizepart 3 100%') + }): + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): + self.execute_module(changed=True, script='resizepart 3 100%') def test_change_flag(self): # Flags are set in a second run of parted(). # Between the two runs, the partition dict is updated. # use checkmode here allow us to continue even if the dictionary is # not updated. - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 3, 'state': 'present', 'flags': ['lvm', 'boot'], '_ansible_check_mode': True, - }) + }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.parted.reset_mock() - self.execute_module(changed=True) - # When using multiple flags: - # order of execution is non deterministic, because set() operations are used in - # the current implementation. - expected_calls_order1 = [call('unit KiB set 3 lvm on set 3 boot on ', - '/dev/sdb', 'optimal')] - expected_calls_order2 = [call('unit KiB set 3 boot on set 3 lvm on ', - '/dev/sdb', 'optimal')] - self.assertTrue(self.parted.mock_calls == expected_calls_order1 or - self.parted.mock_calls == expected_calls_order2) + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): + self.parted.reset_mock() + self.execute_module(changed=True) + # When using multiple flags: + # order of execution is non deterministic, because set() operations are used in + # the current implementation. + expected_calls_order1 = [call('unit KiB set 3 lvm on set 3 boot on ', + '/dev/sdb', 'optimal')] + expected_calls_order2 = [call('unit KiB set 3 boot on set 3 lvm on ', + '/dev/sdb', 'optimal')] + self.assertTrue(self.parted.mock_calls == expected_calls_order1 or + self.parted.mock_calls == expected_calls_order2) def test_create_new_primary_lvm_partition(self): # use check_mode, see previous test comment - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 4, 'flags': ["boot"], @@ -281,15 +281,15 @@ class TestParted(ModuleTestCase): 'part_start': '257GiB', 'fs_type': 'ext3', '_ansible_check_mode': True, - }) - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script='unit KiB mkpart primary ext3 257GiB 100% unit KiB set 4 boot on') + }): + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): + self.execute_module(changed=True, script='unit KiB mkpart primary ext3 257GiB 100% unit KiB set 4 boot on') def test_create_label_gpt(self): # Like previous test, current implementation use parted to create the partition and # then retrieve and update the dictionary. Use check_mode to force to continue even if # dictionary is not updated. - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 1, 'flags': ["lvm"], @@ -297,48 +297,48 @@ class TestParted(ModuleTestCase): 'name': 'lvmpartition', 'state': 'present', '_ansible_check_mode': True, - }) - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict2): - self.execute_module(changed=True, script='unit KiB mklabel gpt mkpart primary 0% 100% unit KiB name 1 \'"lvmpartition"\' set 1 lvm on') + }): + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict2): + self.execute_module(changed=True, script='unit KiB mklabel gpt mkpart primary 0% 100% unit KiB name 1 \'"lvmpartition"\' set 1 lvm on') def test_change_label_gpt(self): # When partitions already exists and label is changed, mkpart should be called even when partition already exists, # because new empty label will be created anyway - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 1, 'state': 'present', 'label': 'gpt', '_ansible_check_mode': True, - }) - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script='unit KiB mklabel gpt mkpart primary 0% 100%') + }): + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): + self.execute_module(changed=True, script='unit KiB mklabel gpt mkpart primary 0% 100%') def test_check_mode_unchanged(self): # Test that get_device_info result is checked in check mode too # No change on partition 1 - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 1, 'state': 'present', 'flags': ['some_flag'], '_ansible_check_mode': True, - }) - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict3): - self.execute_module(changed=False) + }): + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict3): + self.execute_module(changed=False) def test_check_mode_changed(self): # Test that get_device_info result is checked in check mode too # Flag change on partition 1 - set_module_args({ + with set_module_args({ 'device': '/dev/sdb', 'number': 1, 'state': 'present', 'flags': ['other_flag'], '_ansible_check_mode': True, - }) - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict3): - self.execute_module(changed=True) + }): + with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict3): + self.execute_module(changed=True) def test_version_info(self): """Test that the parse_parted_version returns the expected tuple""" diff --git a/tests/unit/plugins/modules/test_pkgin.py b/tests/unit/plugins/modules/test_pkgin.py index dea5a05b5c..750c10da62 100644 --- a/tests/unit/plugins/modules/test_pkgin.py +++ b/tests/unit/plugins/modules/test_pkgin.py @@ -5,8 +5,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import mock -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible_collections.community.general.plugins.modules import pkgin diff --git a/tests/unit/plugins/modules/test_pmem.py b/tests/unit/plugins/modules/test_pmem.py index cea673da0b..8f3788e407 100644 --- a/tests/unit/plugins/modules/test_pmem.py +++ b/tests/unit/plugins/modules/test_pmem.py @@ -11,8 +11,8 @@ import json pytest.importorskip('xmltodict') -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args, AnsibleFailJson, AnsibleExitJson -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args, AnsibleFailJson, AnsibleExitJson +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible_collections.community.general.plugins.modules import pmem as pmem_module @@ -321,81 +321,81 @@ class TestPmem(ModuleTestCase): def test_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson): - set_module_args({}) - pmem_module.main() + with set_module_args({}): + pmem_module.main() def test_fail_when_appdirect_only(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'appdirect': 10, - }) - pmem_module.main() + }): + pmem_module.main() def test_fail_when_MemosyMode_only(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'memorymode': 70, - }) - pmem_module.main() + }): + pmem_module.main() def test_fail_when_reserved_only(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'reserved': 10, - }) - pmem_module.main() + }): + pmem_module.main() def test_fail_when_appdirect_memorymode_reserved_total_not_100(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'appdirect': 10, 'memorymode': 70, 'reserved': 10, - }) - pmem_module.main() + }): + pmem_module.main() def test_when_appdirect_memorymode(self): - set_module_args({ + with set_module_args({ 'appdirect': 10, 'memorymode': 70, - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[goal_plain, goal, dimmlist]): - with self.assertRaises(AnsibleExitJson) as result: - pmem_module.main() - self.result_check(result, False, [25769803776], [188978561024], [328230764544]) + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[goal_plain, goal, dimmlist]): + with self.assertRaises(AnsibleExitJson) as result: + pmem_module.main() + self.result_check(result, False, [25769803776], [188978561024], [328230764544]) def test_when_appdirect_memorymode_reserved(self): - set_module_args({ + with set_module_args({ 'appdirect': 10, 'memorymode': 70, 'reserved': 20, - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[goal_plain, goal, dimmlist]): - with self.assertRaises(AnsibleExitJson) as result: - pmem_module.main() - self.result_check(result, False, [25769803776], [188978561024], [328230764544]) + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[goal_plain, goal, dimmlist]): + with self.assertRaises(AnsibleExitJson) as result: + pmem_module.main() + self.result_check(result, False, [25769803776], [188978561024], [328230764544]) def test_when_appdirect_notinterleaved_memorymode_reserved(self): - set_module_args({ + with set_module_args({ 'appdirect': 10, 'appdirect_interleaved': False, 'memorymode': 70, 'reserved': 20, - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[goal_plain, goal, dimmlist]): - with self.assertRaises(AnsibleExitJson) as result: - pmem_module.main() - self.result_check(result, False, [25769803776], [188978561024], [328230764544]) + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[goal_plain, goal, dimmlist]): + with self.assertRaises(AnsibleExitJson) as result: + pmem_module.main() + self.result_check(result, False, [25769803776], [188978561024], [328230764544]) def test_fail_when_socket_id_appdirect(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'socket': [ { 'id': 0, @@ -406,12 +406,12 @@ class TestPmem(ModuleTestCase): 'appdirect': 10, }, ], - }) - pmem_module.main() + }): + pmem_module.main() def test_fail_when_socket0_id_memorymode_socket1_id_appdirect(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'socket': [ { 'id': 0, @@ -422,12 +422,12 @@ class TestPmem(ModuleTestCase): 'appdirect': 10, }, ], - }) - pmem_module.main() + }): + pmem_module.main() def test_fail_when_socket0_without_id(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'socket': [ { 'appdirect': 10, @@ -439,11 +439,11 @@ class TestPmem(ModuleTestCase): 'memorymode': 70, }, ], - }) - pmem_module.main() + }): + pmem_module.main() def test_when_socket0_and_1_appdirect_memorymode(self): - set_module_args({ + with set_module_args({ 'socket': [ { 'id': 0, @@ -456,18 +456,18 @@ class TestPmem(ModuleTestCase): 'memorymode': 70, }, ], - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ - show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]): - with self.assertRaises(AnsibleExitJson) as result: - pmem_module.main() - self.result_check( - result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272]) + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ + show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]): + with self.assertRaises(AnsibleExitJson) as result: + pmem_module.main() + self.result_check( + result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272]) def test_when_socket0_and_1_appdirect_memorymode_reserved(self): - set_module_args({ + with set_module_args({ 'socket': [ { 'id': 0, @@ -482,18 +482,18 @@ class TestPmem(ModuleTestCase): 'reserved': 20, }, ], - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ - show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]): - with self.assertRaises(AnsibleExitJson) as result: - pmem_module.main() - self.result_check( - result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272]) + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ + show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]): + with self.assertRaises(AnsibleExitJson) as result: + pmem_module.main() + self.result_check( + result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272]) def test_when_socket0_appdirect_notinterleaved_memorymode_reserved_socket1_appdirect_memorymode_reserved(self): - set_module_args({ + with set_module_args({ 'socket': [ { 'id': 0, @@ -509,19 +509,19 @@ class TestPmem(ModuleTestCase): 'reserved': 20, }, ], - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ - show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]): - with self.assertRaises(AnsibleExitJson) as result: - pmem_module.main() - self.result_check( - result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272]) + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ + show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]): + with self.assertRaises(AnsibleExitJson) as result: + pmem_module.main() + self.result_check( + result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272]) def test_fail_when_namespace_without_mode(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'namespace': [ { 'size': '1GB', @@ -532,12 +532,12 @@ class TestPmem(ModuleTestCase): 'type': 'blk', }, ], - }) - pmem_module.main() + }): + pmem_module.main() def test_fail_when_region_is_empty(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'namespace': [ { 'size': '1GB', @@ -545,15 +545,15 @@ class TestPmem(ModuleTestCase): 'mode': 'sector', }, ], - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region_empty]): - pmem_module.main() + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ndctl_region_empty]): + pmem_module.main() def test_fail_when_namespace_invalid_size(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'namespace': [ { 'size': '1XXX', @@ -561,15 +561,15 @@ class TestPmem(ModuleTestCase): 'mode': 'sector', }, ], - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region]): - pmem_module.main() + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ndctl_region]): + pmem_module.main() def test_fail_when_size_is_invalid_alignment(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'namespace': [ { 'size': '400MB', @@ -582,15 +582,15 @@ class TestPmem(ModuleTestCase): 'mode': 'sector' }, ], - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region]): - pmem_module.main() + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ndctl_region]): + pmem_module.main() def test_fail_when_blk_is_unsupported_type(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'namespace': [ { 'size': '4GB', @@ -603,15 +603,15 @@ class TestPmem(ModuleTestCase): 'mode': 'sector' }, ], - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region]): - pmem_module.main() + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ndctl_region]): + pmem_module.main() def test_fail_when_size_isnot_set_to_multiple_namespaces(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'namespace': [ { 'type': 'pmem', @@ -623,15 +623,15 @@ class TestPmem(ModuleTestCase): 'mode': 'sector' }, ], - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region]): - pmem_module.main() + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ndctl_region]): + pmem_module.main() def test_fail_when_size_of_namespace_over_available(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'namespace': [ { 'size': '400GB', @@ -644,30 +644,30 @@ class TestPmem(ModuleTestCase): 'mode': 'sector' }, ], - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region]): - pmem_module.main() + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ndctl_region]): + pmem_module.main() def test_when_namespace0_without_size(self): - set_module_args({ + with set_module_args({ 'namespace': [ { 'type': 'pmem', 'mode': 'sector' }, ], - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region, ndctl_create_without_size, ndctl_list_N]): - with self.assertRaises(AnsibleExitJson) as result: - pmem_module.main() - self.result_check_ns(result, ndctl_list_N) + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ndctl_region, ndctl_create_without_size, ndctl_list_N]): + with self.assertRaises(AnsibleExitJson) as result: + pmem_module.main() + self.result_check_ns(result, ndctl_list_N) def test_when_namespace0_with_namespace_append(self): - set_module_args({ + with set_module_args({ 'namespace': [ { 'size': '640MB', @@ -676,16 +676,16 @@ class TestPmem(ModuleTestCase): }, ], 'namespace_append': True, - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region, ndctl_create_640M, ndctl_list_N_two_namespaces]): - with self.assertRaises(AnsibleExitJson) as result: - pmem_module.main() - self.result_check_ns(result, ndctl_list_N_two_namespaces) + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ndctl_region, ndctl_create_640M, ndctl_list_N_two_namespaces]): + with self.assertRaises(AnsibleExitJson) as result: + pmem_module.main() + self.result_check_ns(result, ndctl_list_N_two_namespaces) def test_when_namespace0_1GiB_pmem_sector_namespace1_640MiB_pmem_raw(self): - set_module_args({ + with set_module_args({ 'namespace': [ { 'size': '1GB', @@ -698,10 +698,10 @@ class TestPmem(ModuleTestCase): 'mode': 'raw', }, ], - }) - with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region, ndctl_create_1G, ndctl_create_640M, ndctl_list_N_two_namespaces]): - with self.assertRaises(AnsibleExitJson) as result: - pmem_module.main() - self.result_check_ns(result, ndctl_list_N_two_namespaces) + }): + with patch( + 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', + side_effect=[ndctl_region, ndctl_create_1G, ndctl_create_640M, ndctl_list_N_two_namespaces]): + with self.assertRaises(AnsibleExitJson) as result: + pmem_module.main() + self.result_check_ns(result, ndctl_list_N_two_namespaces) diff --git a/tests/unit/plugins/modules/test_pritunl_org.py b/tests/unit/plugins/modules/test_pritunl_org.py index 94809784b1..354c81eb8b 100644 --- a/tests/unit/plugins/modules/test_pritunl_org.py +++ b/tests/unit/plugins/modules/test_pritunl_org.py @@ -12,14 +12,14 @@ from ansible.module_utils.six import iteritems from ansible_collections.community.general.plugins.modules import ( pritunl_org, ) -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import ( PritunlDeleteOrganizationMock, PritunlListOrganizationMock, PritunlListOrganizationAfterPostMock, PritunlPostOrganizationMock, ) -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( AnsibleExitJson, AnsibleFailJson, ModuleTestCase, @@ -64,14 +64,14 @@ class TestPritunlOrg(ModuleTestCase): def test_without_parameters(self): """Test without parameters""" - set_module_args({}) - with self.assertRaises(AnsibleFailJson): - self.module.main() + with set_module_args({}): + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_present(self): """Test Pritunl organization creation.""" org_params = {"name": "NewOrg"} - set_module_args( + with set_module_args( dict_merge( { "pritunl_api_token": "token", @@ -80,32 +80,32 @@ class TestPritunlOrg(ModuleTestCase): }, org_params, ) - ) - # Test creation - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as mock_get: - with self.patch_add_pritunl_organization( - side_effect=PritunlPostOrganizationMock - ) as mock_add: - with self.assertRaises(AnsibleExitJson) as create_result: - self.module.main() + ): + # Test creation + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as mock_get: + with self.patch_add_pritunl_organization( + side_effect=PritunlPostOrganizationMock + ) as mock_add: + with self.assertRaises(AnsibleExitJson) as create_result: + self.module.main() - create_exc = create_result.exception.args[0] + create_exc = create_result.exception.args[0] - self.assertTrue(create_exc["changed"]) - self.assertEqual(create_exc["response"]["name"], org_params["name"]) - self.assertEqual(create_exc["response"]["user_count"], 0) + self.assertTrue(create_exc["changed"]) + self.assertEqual(create_exc["response"]["name"], org_params["name"]) + self.assertEqual(create_exc["response"]["user_count"], 0) - # Test module idempotency - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationAfterPostMock - ) as mock_get: - with self.patch_add_pritunl_organization( - side_effect=PritunlPostOrganizationMock - ) as mock_add: - with self.assertRaises(AnsibleExitJson) as idempotent_result: - self.module.main() + # Test module idempotency + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationAfterPostMock + ) as mock_get: + with self.patch_add_pritunl_organization( + side_effect=PritunlPostOrganizationMock + ) as mock_add: + with self.assertRaises(AnsibleExitJson) as idempotent_result: + self.module.main() idempotent_exc = idempotent_result.exception.args[0] @@ -120,7 +120,7 @@ class TestPritunlOrg(ModuleTestCase): def test_absent(self): """Test organization removal from Pritunl.""" org_params = {"name": "NewOrg"} - set_module_args( + with set_module_args( dict_merge( { "state": "absent", @@ -130,31 +130,31 @@ class TestPritunlOrg(ModuleTestCase): }, org_params, ) - ) - # Test deletion - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationAfterPostMock - ) as mock_get: - with self.patch_delete_pritunl_organization( - side_effect=PritunlDeleteOrganizationMock - ) as mock_delete: - with self.assertRaises(AnsibleExitJson) as delete_result: - self.module.main() + ): + # Test deletion + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationAfterPostMock + ) as mock_get: + with self.patch_delete_pritunl_organization( + side_effect=PritunlDeleteOrganizationMock + ) as mock_delete: + with self.assertRaises(AnsibleExitJson) as delete_result: + self.module.main() - delete_exc = delete_result.exception.args[0] + delete_exc = delete_result.exception.args[0] - self.assertTrue(delete_exc["changed"]) - self.assertEqual(delete_exc["response"], {}) + self.assertTrue(delete_exc["changed"]) + self.assertEqual(delete_exc["response"], {}) - # Test module idempotency - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as mock_get: - with self.patch_delete_pritunl_organization( - side_effect=PritunlDeleteOrganizationMock - ) as mock_add: - with self.assertRaises(AnsibleExitJson) as idempotent_result: - self.module.main() + # Test module idempotency + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as mock_get: + with self.patch_delete_pritunl_organization( + side_effect=PritunlDeleteOrganizationMock + ) as mock_add: + with self.assertRaises(AnsibleExitJson) as idempotent_result: + self.module.main() idempotent_exc = idempotent_result.exception.args[0] @@ -172,33 +172,31 @@ class TestPritunlOrg(ModuleTestCase): "pritunl_url": "https://pritunl.domain.com", "name": "GumGum", } - set_module_args(module_args) + with set_module_args(module_args): + # Test deletion + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as mock_get: + with self.patch_delete_pritunl_organization( + side_effect=PritunlDeleteOrganizationMock + ) as mock_delete: + with self.assertRaises(AnsibleFailJson) as failure_result: + self.module.main() - # Test deletion - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as mock_get: - with self.patch_delete_pritunl_organization( - side_effect=PritunlDeleteOrganizationMock - ) as mock_delete: - with self.assertRaises(AnsibleFailJson) as failure_result: - self.module.main() + failure_exc = failure_result.exception.args[0] - failure_exc = failure_result.exception.args[0] + self.assertRegex(failure_exc["msg"], "Can not remove organization") - self.assertRegex(failure_exc["msg"], "Can not remove organization") - - # Switch force=True which should run successfully - set_module_args(dict_merge(module_args, {"force": True})) - - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as mock_get: - with self.patch_delete_pritunl_organization( - side_effect=PritunlDeleteOrganizationMock - ) as mock_delete: - with self.assertRaises(AnsibleExitJson) as delete_result: - self.module.main() + # Switch force=True which should run successfully + with set_module_args(dict_merge(module_args, {"force": True})): + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as mock_get: + with self.patch_delete_pritunl_organization( + side_effect=PritunlDeleteOrganizationMock + ) as mock_delete: + with self.assertRaises(AnsibleExitJson) as delete_result: + self.module.main() delete_exc = delete_result.exception.args[0] diff --git a/tests/unit/plugins/modules/test_pritunl_org_info.py b/tests/unit/plugins/modules/test_pritunl_org_info.py index dc33c3d8c6..c65ea2cfa6 100644 --- a/tests/unit/plugins/modules/test_pritunl_org_info.py +++ b/tests/unit/plugins/modules/test_pritunl_org_info.py @@ -10,12 +10,12 @@ import sys from ansible_collections.community.general.plugins.modules import ( pritunl_org_info, ) -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import ( PritunlListOrganizationMock, PritunlEmptyOrganizationMock, ) -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( AnsibleExitJson, AnsibleFailJson, ModuleTestCase, @@ -49,9 +49,9 @@ class TestPritunlOrgInfo(ModuleTestCase): with self.patch_get_pritunl_organizations( side_effect=PritunlListOrganizationMock ) as org_mock: - set_module_args({}) - with self.assertRaises(AnsibleFailJson): - self.module.main() + with set_module_args({}): + with self.assertRaises(AnsibleFailJson): + self.module.main() self.assertEqual(org_mock.call_count, 0) @@ -61,14 +61,14 @@ class TestPritunlOrgInfo(ModuleTestCase): side_effect=PritunlEmptyOrganizationMock ) as org_mock: with self.assertRaises(AnsibleExitJson) as result: - set_module_args( + with set_module_args( { "pritunl_api_token": "token", "pritunl_api_secret": "secret", "pritunl_url": "https://pritunl.domain.com", } - ) - self.module.main() + ): + self.module.main() self.assertEqual(org_mock.call_count, 1) @@ -81,15 +81,15 @@ class TestPritunlOrgInfo(ModuleTestCase): side_effect=PritunlListOrganizationMock ) as org_mock: with self.assertRaises(AnsibleExitJson) as result: - set_module_args( + with set_module_args( { "pritunl_api_token": "token", "pritunl_api_secret": "secret", "pritunl_url": "https://pritunl.domain.com", "org": "GumGum", } - ) - self.module.main() + ): + self.module.main() self.assertEqual(org_mock.call_count, 1) @@ -102,15 +102,15 @@ class TestPritunlOrgInfo(ModuleTestCase): side_effect=PritunlListOrganizationMock ) as org_mock: with self.assertRaises(AnsibleFailJson) as result: - set_module_args( + with set_module_args( { "pritunl_api_token": "token", "pritunl_api_secret": "secret", "pritunl_url": "https://pritunl.domain.com", "org": "Unknown", } - ) - self.module.main() + ): + self.module.main() self.assertEqual(org_mock.call_count, 1) @@ -123,14 +123,14 @@ class TestPritunlOrgInfo(ModuleTestCase): side_effect=PritunlListOrganizationMock ) as org_mock: with self.assertRaises(AnsibleExitJson) as result: - set_module_args( + with set_module_args( { "pritunl_api_token": "token", "pritunl_api_secret": "secret", "pritunl_url": "https://pritunl.domain.com", } - ) - self.module.main() + ): + self.module.main() self.assertEqual(org_mock.call_count, 1) diff --git a/tests/unit/plugins/modules/test_pritunl_user.py b/tests/unit/plugins/modules/test_pritunl_user.py index 1120839186..ae66a4d90c 100644 --- a/tests/unit/plugins/modules/test_pritunl_user.py +++ b/tests/unit/plugins/modules/test_pritunl_user.py @@ -12,7 +12,7 @@ from ansible.module_utils.six import iteritems from ansible_collections.community.general.plugins.modules import ( pritunl_user, ) -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import ( PritunlDeleteUserMock, PritunlListOrganizationMock, @@ -20,7 +20,7 @@ from ansible_collections.community.general.tests.unit.plugins.module_utils.net_t PritunlPostUserMock, PritunlPutUserMock, ) -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( AnsibleExitJson, AnsibleFailJson, ModuleTestCase, @@ -94,9 +94,9 @@ class TestPritunlUser(ModuleTestCase): def test_without_parameters(self): """Test without parameters""" - set_module_args({}) - with self.assertRaises(AnsibleFailJson): - self.module.main() + with set_module_args({}): + with self.assertRaises(AnsibleFailJson): + self.module.main() @mock_pritunl_api def test_present(self): @@ -105,7 +105,7 @@ class TestPritunlUser(ModuleTestCase): "user_name": "alice", "user_email": "alice@company.com", } - set_module_args( + with set_module_args( dict_merge( { "pritunl_api_token": "token", @@ -115,13 +115,12 @@ class TestPritunlUser(ModuleTestCase): }, user_params, ) - ) - - with self.patch_update_pritunl_users( - side_effect=PritunlPostUserMock - ) as post_mock: - with self.assertRaises(AnsibleExitJson) as create_result: - self.module.main() + ): + with self.patch_update_pritunl_users( + side_effect=PritunlPostUserMock + ) as post_mock: + with self.assertRaises(AnsibleExitJson) as create_result: + self.module.main() create_exc = create_result.exception.args[0] @@ -137,7 +136,7 @@ class TestPritunlUser(ModuleTestCase): "user_email": "bob@company.com", "user_disabled": True, } - set_module_args( + with set_module_args( dict_merge( { "pritunl_api_token": "token", @@ -147,14 +146,12 @@ class TestPritunlUser(ModuleTestCase): }, new_user_params, ) - ) - - with self.patch_update_pritunl_users( - side_effect=PritunlPutUserMock - ) as put_mock: - - with self.assertRaises(AnsibleExitJson) as update_result: - self.module.main() + ): + with self.patch_update_pritunl_users( + side_effect=PritunlPutUserMock + ) as put_mock: + with self.assertRaises(AnsibleExitJson) as update_result: + self.module.main() update_exc = update_result.exception.args[0] @@ -168,7 +165,7 @@ class TestPritunlUser(ModuleTestCase): @mock_pritunl_api def test_absent(self): """Test user removal from Pritunl.""" - set_module_args( + with set_module_args( { "state": "absent", "pritunl_api_token": "token", @@ -177,10 +174,9 @@ class TestPritunlUser(ModuleTestCase): "organization": "GumGum", "user_name": "florian", } - ) - - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() + ): + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() exc = result.exception.args[0] @@ -190,7 +186,7 @@ class TestPritunlUser(ModuleTestCase): @mock_pritunl_api def test_absent_failure(self): """Test user removal from a non existing organization.""" - set_module_args( + with set_module_args( { "state": "absent", "pritunl_api_token": "token", @@ -199,10 +195,9 @@ class TestPritunlUser(ModuleTestCase): "organization": "Unknown", "user_name": "floria@company.com", } - ) - - with self.assertRaises(AnsibleFailJson) as result: - self.module.main() + ): + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() exc = result.exception.args[0] diff --git a/tests/unit/plugins/modules/test_pritunl_user_info.py b/tests/unit/plugins/modules/test_pritunl_user_info.py index 5aae15d966..ce3db8d87b 100644 --- a/tests/unit/plugins/modules/test_pritunl_user_info.py +++ b/tests/unit/plugins/modules/test_pritunl_user_info.py @@ -10,12 +10,12 @@ import sys from ansible_collections.community.general.plugins.modules import ( pritunl_user_info, ) -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import ( PritunlListOrganizationMock, PritunlListUserMock, ) -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( AnsibleExitJson, AnsibleFailJson, ModuleTestCase, @@ -59,9 +59,9 @@ class TestPritunlUserInfo(ModuleTestCase): with self.patch_get_pritunl_users( side_effect=PritunlListUserMock ) as user_mock: - set_module_args({}) - with self.assertRaises(AnsibleFailJson): - self.module.main() + with set_module_args({}): + with self.assertRaises(AnsibleFailJson): + self.module.main() self.assertEqual(org_mock.call_count, 0) self.assertEqual(user_mock.call_count, 0) @@ -75,15 +75,15 @@ class TestPritunlUserInfo(ModuleTestCase): side_effect=PritunlListUserMock ) as user_mock: with self.assertRaises(AnsibleFailJson) as result: - set_module_args( + with set_module_args( { "pritunl_api_token": "token", "pritunl_api_secret": "secret", "pritunl_url": "https://pritunl.domain.com", "organization": "Unknown", } - ) - self.module.main() + ): + self.module.main() self.assertEqual(org_mock.call_count, 1) self.assertEqual(user_mock.call_count, 0) @@ -103,15 +103,15 @@ class TestPritunlUserInfo(ModuleTestCase): side_effect=PritunlListUserMock ) as user_mock: with self.assertRaises(AnsibleExitJson) as result: - set_module_args( + with set_module_args( { "pritunl_api_token": "token", "pritunl_api_secret": "secret", "pritunl_url": "https://pritunl.domain.com", "organization": "GumGum", } - ) - self.module.main() + ): + self.module.main() self.assertEqual(org_mock.call_count, 1) self.assertEqual(user_mock.call_count, 1) @@ -137,7 +137,7 @@ class TestPritunlUserInfo(ModuleTestCase): side_effect=PritunlListUserMock ) as user_mock: with self.assertRaises(AnsibleExitJson) as result: - set_module_args( + with set_module_args( { "pritunl_api_token": "token", "pritunl_api_secret": "secret", @@ -146,8 +146,8 @@ class TestPritunlUserInfo(ModuleTestCase): "user_name": expected_user_name, "user_type": expected_user_type, } - ) - self.module.main() + ): + self.module.main() self.assertEqual(org_mock.call_count, 1) self.assertEqual(user_mock.call_count, 1) diff --git a/tests/unit/plugins/modules/test_proxmox_backup.py b/tests/unit/plugins/modules/test_proxmox_backup.py deleted file mode 100644 index 8da4de4dee..0000000000 --- a/tests/unit/plugins/modules/test_proxmox_backup.py +++ /dev/null @@ -1,366 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2019, Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -import \ - ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils -from ansible_collections.community.general.plugins.modules import proxmox_backup -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( - AnsibleExitJson, AnsibleFailJson, set_module_args, ModuleTestCase) -from ansible_collections.community.general.tests.unit.compat.mock import patch - -__metaclass__ = type - -import pytest - -proxmoxer = pytest.importorskip('proxmoxer') - - -MINIMAL_PERMISSIONS = { - '/sdn/zones': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1}, - '/nodes': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1}, - '/sdn': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1}, - '/vms': {'VM.Audit': 1, - 'Sys.Audit': 1, - 'Mapping.Audit': 1, - 'VM.Backup': 1, - 'Datastore.Audit': 1, - 'SDN.Audit': 1, - 'Pool.Audit': 1}, - '/': {'Datastore.Audit': 1, 'Datastore.AllocateSpace': 1}, - '/storage/local-zfs': {'Datastore.AllocateSpace': 1, - 'Datastore.Audit': 1}, - '/storage': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1}, - '/access': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1}, - '/vms/101': {'VM.Backup': 1, - 'Mapping.Audit': 1, - 'Datastore.AllocateSpace': 0, - 'Sys.Audit': 1, - 'VM.Audit': 1, - 'SDN.Audit': 1, - 'Pool.Audit': 1, - 'Datastore.Audit': 1}, - '/vms/100': {'VM.Backup': 1, - 'Mapping.Audit': 1, - 'Datastore.AllocateSpace': 0, - 'Sys.Audit': 1, - 'VM.Audit': 1, - 'SDN.Audit': 1, - 'Pool.Audit': 1, - 'Datastore.Audit': 1}, - '/pool': {'Datastore.Audit': 1, 'Datastore.AllocateSpace': 1}, } - -STORAGE = [{'type': 'pbs', - 'username': 'test@pbs', - 'datastore': 'Backup-Pool', - 'server': '10.0.0.1', - 'shared': 1, - 'fingerprint': '94:fd:ac:e7:d5:36:0e:11:5b:23:05:40:d2:a4:e1:8a:c1:52:41:01:07:28:c0:4d:c5:ee:df:7f:7c:03:ab:41', - 'prune-backups': 'keep-all=1', - 'storage': 'backup', - 'content': 'backup', - 'digest': 'ca46a68d7699de061c139d714892682ea7c9d681'}, - {'nodes': 'node1,node2,node3', - 'sparse': 1, - 'type': 'zfspool', - 'content': 'rootdir,images', - 'digest': 'ca46a68d7699de061c139d714892682ea7c9d681', - 'pool': 'rpool/data', - 'storage': 'local-zfs'}] - - -VMS = [{"diskwrite": 0, - "vmid": 100, - "node": "node1", - "id": "lxc/100", - "maxdisk": 10000, - "template": 0, - "disk": 10000, - "uptime": 10000, - "maxmem": 10000, - "maxcpu": 1, - "netin": 10000, - "type": "lxc", - "netout": 10000, - "mem": 10000, - "diskread": 10000, - "cpu": 0.01, - "name": "test-lxc", - "status": "running"}, - {"diskwrite": 0, - "vmid": 101, - "node": "node2", - "id": "kvm/101", - "maxdisk": 10000, - "template": 0, - "disk": 10000, - "uptime": 10000, - "maxmem": 10000, - "maxcpu": 1, - "netin": 10000, - "type": "lxc", - "netout": 10000, - "mem": 10000, - "diskread": 10000, - "cpu": 0.01, - "name": "test-kvm", - "status": "running"} - ] - -NODES = [{'level': '', - 'type': 'node', - 'node': 'node1', - 'status': 'online', - 'id': 'node/node1', - 'cgroup-mode': 2}, - {'status': 'online', - 'id': 'node/node2', - 'cgroup-mode': 2, - 'level': '', - 'node': 'node2', - 'type': 'node'}, - {'status': 'online', - 'id': 'node/node3', - 'cgroup-mode': 2, - 'level': '', - 'node': 'node3', - 'type': 'node'}, - ] - -TASK_API_RETURN = { - "node1": { - 'starttime': 1732606253, - 'status': 'stopped', - 'type': 'vzdump', - 'pstart': 517463911, - 'upid': 'UPID:node1:003F8C63:1E7FB79C:67449780:vzdump:100:root@pam:', - 'id': '100', - 'node': 'hypervisor', - 'pid': 541669, - 'user': 'test@pve', - 'exitstatus': 'OK'}, - "node2": { - 'starttime': 1732606253, - 'status': 'stopped', - 'type': 'vzdump', - 'pstart': 517463911, - 'upid': 'UPID:node2:000029DD:1599528B:6108F068:vzdump:101:root@pam:', - 'id': '101', - 'node': 'hypervisor', - 'pid': 541669, - 'user': 'test@pve', - 'exitstatus': 'OK'}, -} - - -VZDUMP_API_RETURN = { - "node1": "UPID:node1:003F8C63:1E7FB79C:67449780:vzdump:100:root@pam:", - "node2": "UPID:node2:000029DD:1599528B:6108F068:vzdump:101:root@pam:", - "node3": "OK", -} - - -TASKLOG_API_RETURN = {"node1": [{'n': 1, - 't': "INFO: starting new backup job: vzdump 100 --mode snapshot --node node1 " - "--notes-template '{{guestname}}' --storage backup --notification-mode auto"}, - {'t': 'INFO: Starting Backup of VM 100 (lxc)', - 'n': 2}, - {'n': 23, 't': 'INFO: adding notes to backup'}, - {'n': 24, - 't': 'INFO: Finished Backup of VM 100 (00:00:03)'}, - {'n': 25, - 't': 'INFO: Backup finished at 2024-11-25 16:28:03'}, - {'t': 'INFO: Backup job finished successfully', - 'n': 26}, - {'n': 27, 't': 'TASK OK'}], - "node2": [{'n': 1, - 't': "INFO: starting new backup job: vzdump 101 --mode snapshot --node node2 " - "--notes-template '{{guestname}}' --storage backup --notification-mode auto"}, - {'t': 'INFO: Starting Backup of VM 101 (kvm)', - 'n': 2}, - {'n': 24, - 't': 'INFO: Finished Backup of VM 100 (00:00:03)'}, - {'n': 25, - 't': 'INFO: Backup finished at 2024-11-25 16:28:03'}, - {'t': 'INFO: Backup job finished successfully', - 'n': 26}, - {'n': 27, 't': 'TASK OK'}], - } - - -def return_valid_resources(resource_type, *args, **kwargs): - if resource_type == "vm": - return VMS - if resource_type == "node": - return NODES - - -def return_vzdump_api(node, *args, **kwargs): - if node in ("node1", "node2", "node3"): - return VZDUMP_API_RETURN[node] - - -def return_logs_api(node, *args, **kwargs): - if node in ("node1", "node2"): - return TASKLOG_API_RETURN[node] - - -def return_task_status_api(node, *args, **kwargs): - if node in ("node1", "node2"): - return TASK_API_RETURN[node] - - -class TestProxmoxBackup(ModuleTestCase): - def setUp(self): - super(TestProxmoxBackup, self).setUp() - proxmox_utils.HAS_PROXMOXER = True - self.module = proxmox_backup - self.connect_mock = patch( - "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect", - ).start() - self.mock_get_permissions = patch.object( - proxmox_backup.ProxmoxBackupAnsible, "_get_permissions").start() - self.mock_get_storages = patch.object(proxmox_utils.ProxmoxAnsible, - "get_storages").start() - self.mock_get_resources = patch.object( - proxmox_backup.ProxmoxBackupAnsible, "_get_resources").start() - self.mock_get_tasklog = patch.object( - proxmox_backup.ProxmoxBackupAnsible, "_get_tasklog").start() - self.mock_post_vzdump = patch.object( - proxmox_backup.ProxmoxBackupAnsible, "_post_vzdump").start() - self.mock_get_taskok = patch.object( - proxmox_backup.ProxmoxBackupAnsible, "_get_taskok").start() - self.mock_get_permissions.return_value = MINIMAL_PERMISSIONS - self.mock_get_storages.return_value = STORAGE - self.mock_get_resources.side_effect = return_valid_resources - self.mock_get_taskok.side_effect = return_task_status_api - self.mock_get_tasklog.side_effect = return_logs_api - self.mock_post_vzdump.side_effect = return_vzdump_api - - def tearDown(self): - self.connect_mock.stop() - self.mock_get_permissions.stop() - self.mock_get_storages.stop() - self.mock_get_resources.stop() - super(TestProxmoxBackup, self).tearDown() - - def test_proxmox_backup_without_argument(self): - set_module_args({}) - with pytest.raises(AnsibleFailJson): - proxmox_backup.main() - - def test_create_backup_check_mode(self): - set_module_args({"api_user": "root@pam", - "api_password": "secret", - "api_host": "127.0.0.1", - "mode": "all", - "storage": "backup", - "_ansible_check_mode": True, - }) - with pytest.raises(AnsibleExitJson) as exc_info: - proxmox_backup.main() - - result = exc_info.value.args[0] - - assert result["changed"] is True - assert result["msg"] == "Backups would be created" - assert len(result["backups"]) == 0 - assert self.mock_get_taskok.call_count == 0 - assert self.mock_get_tasklog.call_count == 0 - assert self.mock_post_vzdump.call_count == 0 - - def test_create_backup_all_mode(self): - set_module_args({"api_user": "root@pam", - "api_password": "secret", - "api_host": "127.0.0.1", - "mode": "all", - "storage": "backup", - }) - with pytest.raises(AnsibleExitJson) as exc_info: - proxmox_backup.main() - - result = exc_info.value.args[0] - assert result["changed"] is True - assert result["msg"] == "Backup tasks created" - for backup_result in result["backups"]: - assert backup_result["upid"] in { - VZDUMP_API_RETURN[key] for key in VZDUMP_API_RETURN} - assert self.mock_get_taskok.call_count == 0 - assert self.mock_post_vzdump.call_count == 3 - - def test_create_backup_include_mode_with_wait(self): - set_module_args({"api_user": "root@pam", - "api_password": "secret", - "api_host": "127.0.0.1", - "mode": "include", - "node": "node1", - "storage": "backup", - "vmids": [100], - "wait": True - }) - with pytest.raises(AnsibleExitJson) as exc_info: - proxmox_backup.main() - - result = exc_info.value.args[0] - assert result["changed"] is True - assert result["msg"] == "Backups succeeded" - for backup_result in result["backups"]: - assert backup_result["upid"] in { - VZDUMP_API_RETURN[key] for key in VZDUMP_API_RETURN} - assert self.mock_get_taskok.call_count == 1 - assert self.mock_post_vzdump.call_count == 1 - - def test_fail_insufficient_permissions(self): - set_module_args({"api_user": "root@pam", - "api_password": "secret", - "api_host": "127.0.0.1", - "mode": "include", - "storage": "backup", - "performance_tweaks": "max-workers=2", - "vmids": [100], - "wait": True - }) - with pytest.raises(AnsibleFailJson) as exc_info: - proxmox_backup.main() - - result = exc_info.value.args[0] - assert result["msg"] == "Insufficient permission: Performance_tweaks and bandwidth require 'Sys.Modify' permission for '/'" - assert self.mock_get_taskok.call_count == 0 - assert self.mock_post_vzdump.call_count == 0 - - def test_fail_missing_node(self): - set_module_args({"api_user": "root@pam", - "api_password": "secret", - "api_host": "127.0.0.1", - "mode": "include", - "storage": "backup", - "node": "nonexistingnode", - "vmids": [100], - "wait": True - }) - with pytest.raises(AnsibleFailJson) as exc_info: - proxmox_backup.main() - - result = exc_info.value.args[0] - assert result["msg"] == "Node nonexistingnode was specified, but does not exist on the cluster" - assert self.mock_get_taskok.call_count == 0 - assert self.mock_post_vzdump.call_count == 0 - - def test_fail_missing_storage(self): - set_module_args({"api_user": "root@pam", - "api_password": "secret", - "api_host": "127.0.0.1", - "mode": "include", - "storage": "nonexistingstorage", - "vmids": [100], - "wait": True - }) - with pytest.raises(AnsibleFailJson) as exc_info: - proxmox_backup.main() - - result = exc_info.value.args[0] - assert result["msg"] == "Storage nonexistingstorage does not exist in the cluster" - assert self.mock_get_taskok.call_count == 0 - assert self.mock_post_vzdump.call_count == 0 diff --git a/tests/unit/plugins/modules/test_proxmox_kvm.py b/tests/unit/plugins/modules/test_proxmox_kvm.py deleted file mode 100644 index 4e2cf032c1..0000000000 --- a/tests/unit/plugins/modules/test_proxmox_kvm.py +++ /dev/null @@ -1,168 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2021, Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import sys - -import pytest - -proxmoxer = pytest.importorskip("proxmoxer") -mandatory_py_version = pytest.mark.skipif( - sys.version_info < (2, 7), - reason="The proxmoxer dependency requires python2.7 or higher", -) - -from ansible_collections.community.general.plugins.modules import proxmox_kvm -from ansible_collections.community.general.tests.unit.compat.mock import ( - patch, - DEFAULT, -) -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( - AnsibleExitJson, - AnsibleFailJson, - ModuleTestCase, - set_module_args, -) -import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils - - -class TestProxmoxKvmModule(ModuleTestCase): - def setUp(self): - super(TestProxmoxKvmModule, self).setUp() - proxmox_utils.HAS_PROXMOXER = True - self.module = proxmox_kvm - self.connect_mock = patch( - "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect" - ).start() - self.get_node_mock = patch.object( - proxmox_utils.ProxmoxAnsible, "get_node" - ).start() - self.get_vm_mock = patch.object(proxmox_utils.ProxmoxAnsible, "get_vm").start() - self.create_vm_mock = patch.object( - proxmox_kvm.ProxmoxKvmAnsible, "create_vm" - ).start() - - def tearDown(self): - self.create_vm_mock.stop() - self.get_vm_mock.stop() - self.get_node_mock.stop() - self.connect_mock.stop() - super(TestProxmoxKvmModule, self).tearDown() - - def test_module_fail_when_required_args_missing(self): - with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() - - def test_module_exits_unchaged_when_provided_vmid_exists(self): - set_module_args( - { - "api_host": "host", - "api_user": "user", - "api_password": "password", - "vmid": "100", - "node": "pve", - } - ) - self.get_vm_mock.return_value = [{"vmid": "100"}] - with pytest.raises(AnsibleExitJson) as exc_info: - self.module.main() - - assert self.get_vm_mock.call_count == 1 - result = exc_info.value.args[0] - assert result["changed"] is False - assert result["msg"] == "VM with vmid <100> already exists" - - def test_vm_created_when_vmid_not_exist_but_name_already_exist(self): - set_module_args( - { - "api_host": "host", - "api_user": "user", - "api_password": "password", - "vmid": "100", - "name": "existing.vm.local", - "node": "pve", - } - ) - self.get_vm_mock.return_value = None - with pytest.raises(AnsibleExitJson) as exc_info: - self.module.main() - - assert self.get_vm_mock.call_count == 1 - assert self.get_node_mock.call_count == 1 - result = exc_info.value.args[0] - assert result["changed"] is True - assert result["msg"] == "VM existing.vm.local with vmid 100 deployed" - - def test_vm_not_created_when_name_already_exist_and_vmid_not_set(self): - set_module_args( - { - "api_host": "host", - "api_user": "user", - "api_password": "password", - "name": "existing.vm.local", - "node": "pve", - } - ) - with patch.object(proxmox_utils.ProxmoxAnsible, "get_vmid") as get_vmid_mock: - get_vmid_mock.return_value = { - "vmid": 100, - "name": "existing.vm.local", - } - with pytest.raises(AnsibleExitJson) as exc_info: - self.module.main() - - assert get_vmid_mock.call_count == 1 - result = exc_info.value.args[0] - assert result["changed"] is False - - def test_vm_created_when_name_doesnt_exist_and_vmid_not_set(self): - set_module_args( - { - "api_host": "host", - "api_user": "user", - "api_password": "password", - "name": "existing.vm.local", - "node": "pve", - } - ) - self.get_vm_mock.return_value = None - with patch.multiple( - proxmox_utils.ProxmoxAnsible, get_vmid=DEFAULT, get_nextvmid=DEFAULT - ) as utils_mock: - utils_mock["get_vmid"].return_value = None - utils_mock["get_nextvmid"].return_value = 101 - with pytest.raises(AnsibleExitJson) as exc_info: - self.module.main() - - assert utils_mock["get_vmid"].call_count == 1 - assert utils_mock["get_nextvmid"].call_count == 1 - result = exc_info.value.args[0] - assert result["changed"] is True - assert result["msg"] == "VM existing.vm.local with vmid 101 deployed" - - def test_parse_mac(self): - assert ( - proxmox_kvm.parse_mac("virtio=00:11:22:AA:BB:CC,bridge=vmbr0,firewall=1") - == "00:11:22:AA:BB:CC" - ) - - def test_parse_dev(self): - assert ( - proxmox_kvm.parse_dev("local-lvm:vm-1000-disk-0,format=qcow2") - == "local-lvm:vm-1000-disk-0" - ) - assert ( - proxmox_kvm.parse_dev("local-lvm:vm-101-disk-1,size=8G") - == "local-lvm:vm-101-disk-1" - ) - assert ( - proxmox_kvm.parse_dev("local-zfs:vm-1001-disk-0") - == "local-zfs:vm-1001-disk-0" - ) diff --git a/tests/unit/plugins/modules/test_proxmox_snap.py b/tests/unit/plugins/modules/test_proxmox_snap.py deleted file mode 100644 index 545fcd1f59..0000000000 --- a/tests/unit/plugins/modules/test_proxmox_snap.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2019, Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import sys - -import pytest - -proxmoxer = pytest.importorskip('proxmoxer') -mandatory_py_version = pytest.mark.skipif( - sys.version_info < (2, 7), - reason='The proxmoxer dependency requires python2.7 or higher' -) - -from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, patch -from ansible_collections.community.general.plugins.modules import proxmox_snap -import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args - - -def get_resources(type): - return [{"diskwrite": 0, - "vmid": 100, - "node": "localhost", - "id": "lxc/100", - "maxdisk": 10000, - "template": 0, - "disk": 10000, - "uptime": 10000, - "maxmem": 10000, - "maxcpu": 1, - "netin": 10000, - "type": "lxc", - "netout": 10000, - "mem": 10000, - "diskread": 10000, - "cpu": 0.01, - "name": "test-lxc", - "status": "running"}] - - -def fake_api(mocker): - r = mocker.MagicMock() - r.cluster.resources.get = MagicMock(side_effect=get_resources) - return r - - -def test_proxmox_snap_without_argument(capfd): - set_module_args({}) - with pytest.raises(SystemExit) as results: - proxmox_snap.main() - - out, err = capfd.readouterr() - assert not err - assert json.loads(out)['failed'] - - -@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect') -def test_create_snapshot_check_mode(connect_mock, capfd, mocker): - set_module_args({"hostname": "test-lxc", - "api_user": "root@pam", - "api_password": "secret", - "api_host": "127.0.0.1", - "state": "present", - "snapname": "test", - "timeout": "1", - "force": True, - "_ansible_check_mode": True}) - proxmox_utils.HAS_PROXMOXER = True - connect_mock.side_effect = lambda: fake_api(mocker) - with pytest.raises(SystemExit) as results: - proxmox_snap.main() - - out, err = capfd.readouterr() - assert not err - assert not json.loads(out)['changed'] - - -@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect') -def test_remove_snapshot_check_mode(connect_mock, capfd, mocker): - set_module_args({"hostname": "test-lxc", - "api_user": "root@pam", - "api_password": "secret", - "api_host": "127.0.0.1", - "state": "absent", - "snapname": "test", - "timeout": "1", - "force": True, - "_ansible_check_mode": True}) - proxmox_utils.HAS_PROXMOXER = True - connect_mock.side_effect = lambda: fake_api(mocker) - with pytest.raises(SystemExit) as results: - proxmox_snap.main() - - out, err = capfd.readouterr() - assert not err - assert not json.loads(out)['changed'] - - -@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect') -def test_rollback_snapshot_check_mode(connect_mock, capfd, mocker): - set_module_args({"hostname": "test-lxc", - "api_user": "root@pam", - "api_password": "secret", - "api_host": "127.0.0.1", - "state": "rollback", - "snapname": "test", - "timeout": "1", - "force": True, - "_ansible_check_mode": True}) - proxmox_utils.HAS_PROXMOXER = True - connect_mock.side_effect = lambda: fake_api(mocker) - with pytest.raises(SystemExit) as results: - proxmox_snap.main() - - out, err = capfd.readouterr() - assert not err - output = json.loads(out) - assert not output['changed'] - assert output['msg'] == "Snapshot test does not exist" diff --git a/tests/unit/plugins/modules/test_proxmox_storage_contents_info.py b/tests/unit/plugins/modules/test_proxmox_storage_contents_info.py deleted file mode 100644 index df2625dba6..0000000000 --- a/tests/unit/plugins/modules/test_proxmox_storage_contents_info.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2023, Julian Vanden Broeck -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import pytest - -proxmoxer = pytest.importorskip("proxmoxer") - -from ansible_collections.community.general.plugins.modules import proxmox_storage_contents_info -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( - AnsibleExitJson, - AnsibleFailJson, - ModuleTestCase, - set_module_args, -) -import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils - -NODE1 = "pve" -RAW_LIST_OUTPUT = [ - { - "content": "backup", - "ctime": 1702528474, - "format": "pbs-vm", - "size": 273804166061, - "subtype": "qemu", - "vmid": 931, - "volid": "datastore:backup/vm/931/2023-12-14T04:34:34Z", - }, - { - "content": "backup", - "ctime": 1702582560, - "format": "pbs-vm", - "size": 273804166059, - "subtype": "qemu", - "vmid": 931, - "volid": "datastore:backup/vm/931/2023-12-14T19:36:00Z", - }, -] - - -def get_module_args(node, storage, content="all", vmid=None): - return { - "api_host": "host", - "api_user": "user", - "api_password": "password", - "node": node, - "storage": storage, - "content": content, - "vmid": vmid, - } - - -class TestProxmoxStorageContentsInfo(ModuleTestCase): - def setUp(self): - super(TestProxmoxStorageContentsInfo, self).setUp() - proxmox_utils.HAS_PROXMOXER = True - self.module = proxmox_storage_contents_info - self.connect_mock = patch( - "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect", - ).start() - self.connect_mock.return_value.nodes.return_value.storage.return_value.content.return_value.get.return_value = ( - RAW_LIST_OUTPUT - ) - self.connect_mock.return_value.nodes.get.return_value = [{"node": NODE1}] - - def tearDown(self): - self.connect_mock.stop() - super(TestProxmoxStorageContentsInfo, self).tearDown() - - def test_module_fail_when_required_args_missing(self): - with pytest.raises(AnsibleFailJson) as exc_info: - set_module_args({}) - self.module.main() - - def test_storage_contents_info(self): - with pytest.raises(AnsibleExitJson) as exc_info: - set_module_args(get_module_args(node=NODE1, storage="datastore")) - expected_output = {} - self.module.main() - - result = exc_info.value.args[0] - assert not result["changed"] - assert result["proxmox_storage_content"] == RAW_LIST_OUTPUT diff --git a/tests/unit/plugins/modules/test_proxmox_tasks_info.py b/tests/unit/plugins/modules/test_proxmox_tasks_info.py deleted file mode 100644 index 5c228655be..0000000000 --- a/tests/unit/plugins/modules/test_proxmox_tasks_info.py +++ /dev/null @@ -1,203 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2021, Andreas Botzner (@paginabianca) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -# -# Proxmox Tasks module unit tests. -# The API responses used in these tests were recorded from PVE version 6.4-8 - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import sys - -import pytest - -proxmoxer = pytest.importorskip('proxmoxer') -mandatory_py_version = pytest.mark.skipif( - sys.version_info < (2, 7), - reason='The proxmoxer dependency requires python2.7 or higher' -) - -from ansible_collections.community.general.plugins.modules import proxmox_tasks_info -import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args - -NODE = 'node01' -TASK_UPID = 'UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:' -TASKS = [ - { - "endtime": 1629092710, - "id": "networking", - "node": "iaclab-01-01", - "pid": 3539, - "pstart": 474062216, - "starttime": 1629092709, - "status": "OK", - "type": "srvreload", - "upid": "UPID:iaclab-01-01:00000DD3:1C419D88:6119FB65:srvreload:networking:root@pam:", - "user": "root@pam" - }, - { - "endtime": 1627975785, - "id": "networking", - "node": "iaclab-01-01", - "pid": 10717, - "pstart": 362369675, - "starttime": 1627975784, - "status": "command 'ifreload -a' failed: exit code 1", - "type": "srvreload", - "upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:", - "user": "root@pam" - }, - { - "endtime": 1627975503, - "id": "networking", - "node": "iaclab-01-01", - "pid": 6778, - "pstart": 362341540, - "starttime": 1627975503, - "status": "OK", - "type": "srvreload", - "upid": "UPID:iaclab-01-01:00001A7A:1598E4A4:6108EF4F:srvreload:networking:root@pam:", - "user": "root@pam" - } -] -EXPECTED_TASKS = [ - { - "endtime": 1629092710, - "id": "networking", - "node": "iaclab-01-01", - "pid": 3539, - "pstart": 474062216, - "starttime": 1629092709, - "status": "OK", - "type": "srvreload", - "upid": "UPID:iaclab-01-01:00000DD3:1C419D88:6119FB65:srvreload:networking:root@pam:", - "user": "root@pam", - "failed": False - }, - { - "endtime": 1627975785, - "id": "networking", - "node": "iaclab-01-01", - "pid": 10717, - "pstart": 362369675, - "starttime": 1627975784, - "status": "command 'ifreload -a' failed: exit code 1", - "type": "srvreload", - "upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:", - "user": "root@pam", - "failed": True - }, - { - "endtime": 1627975503, - "id": "networking", - "node": "iaclab-01-01", - "pid": 6778, - "pstart": 362341540, - "starttime": 1627975503, - "status": "OK", - "type": "srvreload", - "upid": "UPID:iaclab-01-01:00001A7A:1598E4A4:6108EF4F:srvreload:networking:root@pam:", - "user": "root@pam", - "failed": False - } -] - -EXPECTED_SINGLE_TASK = [ - { - "endtime": 1627975785, - "id": "networking", - "node": "iaclab-01-01", - "pid": 10717, - "pstart": 362369675, - "starttime": 1627975784, - "status": "command 'ifreload -a' failed: exit code 1", - "type": "srvreload", - "upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:", - "user": "root@pam", - "failed": True - }, -] - - -@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect') -def test_without_required_parameters(connect_mock, capfd, mocker): - set_module_args({}) - with pytest.raises(SystemExit): - proxmox_tasks_info.main() - out, err = capfd.readouterr() - assert not err - assert json.loads(out)['failed'] - - -def mock_api_tasks_response(mocker): - m = mocker.MagicMock() - g = mocker.MagicMock() - m.nodes = mocker.MagicMock(return_value=g) - g.tasks.get = mocker.MagicMock(return_value=TASKS) - return m - - -@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect') -def test_get_tasks(connect_mock, capfd, mocker): - set_module_args({'api_host': 'proxmoxhost', - 'api_user': 'root@pam', - 'api_password': 'supersecret', - 'node': NODE}) - - connect_mock.side_effect = lambda: mock_api_tasks_response(mocker) - proxmox_utils.HAS_PROXMOXER = True - - with pytest.raises(SystemExit): - proxmox_tasks_info.main() - out, err = capfd.readouterr() - assert not err - assert len(json.loads(out)['proxmox_tasks']) != 0 - assert not json.loads(out)['changed'] - - -@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect') -def test_get_single_task(connect_mock, capfd, mocker): - set_module_args({'api_host': 'proxmoxhost', - 'api_user': 'root@pam', - 'api_password': 'supersecret', - 'node': NODE, - 'task': TASK_UPID}) - - connect_mock.side_effect = lambda: mock_api_tasks_response(mocker) - proxmox_utils.HAS_PROXMOXER = True - - with pytest.raises(SystemExit): - proxmox_tasks_info.main() - out, err = capfd.readouterr() - assert not err - assert len(json.loads(out)['proxmox_tasks']) == 1 - assert json.loads(out) - assert not json.loads(out)['changed'] - - -@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect') -def test_get_non_existent_task(connect_mock, capfd, mocker): - set_module_args({'api_host': 'proxmoxhost', - 'api_user': 'root@pam', - 'api_password': 'supersecret', - 'node': NODE, - 'task': 'UPID:nonexistent'}) - - connect_mock.side_effect = lambda: mock_api_tasks_response(mocker) - proxmox_utils.HAS_PROXMOXER = True - - with pytest.raises(SystemExit): - proxmox_tasks_info.main() - out, err = capfd.readouterr() - assert not err - assert json.loads(out)['failed'] - assert 'proxmox_tasks' not in json.loads(out) - assert not json.loads(out)['changed'] - assert json.loads( - out)['msg'] == 'Task: UPID:nonexistent does not exist on node: node01.' diff --git a/tests/unit/plugins/modules/test_proxmox_template.py b/tests/unit/plugins/modules/test_proxmox_template.py deleted file mode 100644 index dc09a44b35..0000000000 --- a/tests/unit/plugins/modules/test_proxmox_template.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2023, Sergei Antipov -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import os -import sys - -import pytest - -proxmoxer = pytest.importorskip('proxmoxer') -mandatory_py_version = pytest.mark.skipif( - sys.version_info < (2, 7), - reason='The proxmoxer dependency requires python2.7 or higher' -) - -from ansible_collections.community.general.plugins.modules import proxmox_template -from ansible_collections.community.general.tests.unit.compat.mock import patch, Mock -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( - AnsibleFailJson, - ModuleTestCase, - set_module_args, -) -import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils - - -class TestProxmoxTemplateModule(ModuleTestCase): - def setUp(self): - super(TestProxmoxTemplateModule, self).setUp() - proxmox_utils.HAS_PROXMOXER = True - self.module = proxmox_template - self.connect_mock = patch( - "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect" - ) - self.connect_mock.start() - - def tearDown(self): - self.connect_mock.stop() - super(TestProxmoxTemplateModule, self).tearDown() - - @patch("os.stat") - @patch.multiple(os.path, exists=Mock(return_value=True), isfile=Mock(return_value=True)) - def test_module_fail_when_toolbelt_not_installed_and_file_size_is_big(self, mock_stat): - self.module.HAS_REQUESTS_TOOLBELT = False - mock_stat.return_value.st_size = 268435460 - set_module_args( - { - "api_host": "host", - "api_user": "user", - "api_password": "password", - "node": "pve", - "src": "/tmp/mock.iso", - "content_type": "iso" - } - ) - with pytest.raises(AnsibleFailJson) as exc_info: - self.module.main() - - result = exc_info.value.args[0] - assert result["failed"] is True - assert result["msg"] == "'requests_toolbelt' module is required to upload files larger than 256MB" diff --git a/tests/unit/plugins/modules/test_proxmox_vm_info.py b/tests/unit/plugins/modules/test_proxmox_vm_info.py deleted file mode 100644 index 94bbbc948d..0000000000 --- a/tests/unit/plugins/modules/test_proxmox_vm_info.py +++ /dev/null @@ -1,714 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2023, Sergei Antipov -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import sys - -import pytest - -proxmoxer = pytest.importorskip("proxmoxer") -mandatory_py_version = pytest.mark.skipif( - sys.version_info < (2, 7), - reason="The proxmoxer dependency requires python2.7 or higher", -) - -from ansible_collections.community.general.plugins.modules import proxmox_vm_info -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( - AnsibleExitJson, - AnsibleFailJson, - ModuleTestCase, - set_module_args, -) -import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils - -NODE1 = "pve" -NODE2 = "pve2" -RAW_CLUSTER_OUTPUT = [ - { - "cpu": 0.174069059487628, - "disk": 0, - "diskread": 6656, - "diskwrite": 0, - "id": "qemu/100", - "maxcpu": 1, - "maxdisk": 34359738368, - "maxmem": 4294967296, - "mem": 35304543, - "name": "pxe.home.arpa", - "netin": 416956, - "netout": 17330, - "node": NODE1, - "status": "running", - "template": 0, - "type": "qemu", - "uptime": 669, - "vmid": 100, - }, - { - "cpu": 0, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "id": "qemu/101", - "maxcpu": 1, - "maxdisk": 0, - "maxmem": 536870912, - "mem": 0, - "name": "test1", - "netin": 0, - "netout": 0, - "node": NODE2, - "pool": "pool1", - "status": "stopped", - "template": 0, - "type": "qemu", - "uptime": 0, - "vmid": 101, - }, - { - "cpu": 0, - "disk": 352190464, - "diskread": 0, - "diskwrite": 0, - "id": "lxc/102", - "maxcpu": 2, - "maxdisk": 10737418240, - "maxmem": 536870912, - "mem": 28192768, - "name": "test-lxc.home.arpa", - "netin": 102757, - "netout": 446, - "node": NODE1, - "status": "running", - "template": 0, - "type": "lxc", - "uptime": 161, - "vmid": 102, - }, - { - "cpu": 0, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "id": "lxc/103", - "maxcpu": 2, - "maxdisk": 10737418240, - "maxmem": 536870912, - "mem": 0, - "name": "test1-lxc.home.arpa", - "netin": 0, - "netout": 0, - "node": NODE2, - "pool": "pool1", - "status": "stopped", - "template": 0, - "type": "lxc", - "uptime": 0, - "vmid": 103, - }, - { - "cpu": 0, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "id": "lxc/104", - "maxcpu": 2, - "maxdisk": 10737418240, - "maxmem": 536870912, - "mem": 0, - "name": "test-lxc.home.arpa", - "netin": 0, - "netout": 0, - "node": NODE2, - "pool": "pool1", - "status": "stopped", - "template": 0, - "type": "lxc", - "uptime": 0, - "vmid": 104, - }, - { - "cpu": 0, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "id": "lxc/105", - "maxcpu": 2, - "maxdisk": 10737418240, - "maxmem": 536870912, - "mem": 0, - "name": "", - "netin": 0, - "netout": 0, - "node": NODE2, - "pool": "pool1", - "status": "stopped", - "template": 0, - "type": "lxc", - "uptime": 0, - "vmid": 105, - }, -] -RAW_LXC_OUTPUT = [ - { - "cpu": 0, - "cpus": 2, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "maxdisk": 10737418240, - "maxmem": 536870912, - "maxswap": 536870912, - "mem": 0, - "name": "test1-lxc.home.arpa", - "netin": 0, - "netout": 0, - "status": "stopped", - "swap": 0, - "type": "lxc", - "uptime": 0, - "vmid": "103", - }, - { - "cpu": 0, - "cpus": 2, - "disk": 352190464, - "diskread": 0, - "diskwrite": 0, - "maxdisk": 10737418240, - "maxmem": 536870912, - "maxswap": 536870912, - "mem": 28192768, - "name": "test-lxc.home.arpa", - "netin": 102757, - "netout": 446, - "pid": 4076752, - "status": "running", - "swap": 0, - "type": "lxc", - "uptime": 161, - "vmid": "102", - }, - { - "cpu": 0, - "cpus": 2, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "maxdisk": 10737418240, - "maxmem": 536870912, - "maxswap": 536870912, - "mem": 0, - "name": "test-lxc.home.arpa", - "netin": 0, - "netout": 0, - "status": "stopped", - "swap": 0, - "type": "lxc", - "uptime": 0, - "vmid": "104", - }, - { - "cpu": 0, - "cpus": 2, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "maxdisk": 10737418240, - "maxmem": 536870912, - "maxswap": 536870912, - "mem": 0, - "name": "", - "netin": 0, - "netout": 0, - "status": "stopped", - "swap": 0, - "type": "lxc", - "uptime": 0, - "vmid": "105", - }, -] -RAW_QEMU_OUTPUT = [ - { - "cpu": 0, - "cpus": 1, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "maxdisk": 0, - "maxmem": 536870912, - "mem": 0, - "name": "test1", - "netin": 0, - "netout": 0, - "status": "stopped", - "uptime": 0, - "vmid": 101, - }, - { - "cpu": 0.174069059487628, - "cpus": 1, - "disk": 0, - "diskread": 6656, - "diskwrite": 0, - "maxdisk": 34359738368, - "maxmem": 4294967296, - "mem": 35304543, - "name": "pxe.home.arpa", - "netin": 416956, - "netout": 17330, - "pid": 4076688, - "status": "running", - "uptime": 669, - "vmid": 100, - }, -] -EXPECTED_VMS_OUTPUT = [ - { - "cpu": 0.174069059487628, - "cpus": 1, - "disk": 0, - "diskread": 6656, - "diskwrite": 0, - "id": "qemu/100", - "maxcpu": 1, - "maxdisk": 34359738368, - "maxmem": 4294967296, - "mem": 35304543, - "name": "pxe.home.arpa", - "netin": 416956, - "netout": 17330, - "node": NODE1, - "pid": 4076688, - "status": "running", - "template": False, - "type": "qemu", - "uptime": 669, - "vmid": 100, - }, - { - "cpu": 0, - "cpus": 1, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "id": "qemu/101", - "maxcpu": 1, - "maxdisk": 0, - "maxmem": 536870912, - "mem": 0, - "name": "test1", - "netin": 0, - "netout": 0, - "node": NODE2, - "pool": "pool1", - "status": "stopped", - "template": False, - "type": "qemu", - "uptime": 0, - "vmid": 101, - }, - { - "cpu": 0, - "cpus": 2, - "disk": 352190464, - "diskread": 0, - "diskwrite": 0, - "id": "lxc/102", - "maxcpu": 2, - "maxdisk": 10737418240, - "maxmem": 536870912, - "maxswap": 536870912, - "mem": 28192768, - "name": "test-lxc.home.arpa", - "netin": 102757, - "netout": 446, - "node": NODE1, - "pid": 4076752, - "status": "running", - "swap": 0, - "template": False, - "type": "lxc", - "uptime": 161, - "vmid": 102, - }, - { - "cpu": 0, - "cpus": 2, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "id": "lxc/103", - "maxcpu": 2, - "maxdisk": 10737418240, - "maxmem": 536870912, - "maxswap": 536870912, - "mem": 0, - "name": "test1-lxc.home.arpa", - "netin": 0, - "netout": 0, - "node": NODE2, - "pool": "pool1", - "status": "stopped", - "swap": 0, - "template": False, - "type": "lxc", - "uptime": 0, - "vmid": 103, - }, - { - "cpu": 0, - "cpus": 2, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "id": "lxc/104", - "maxcpu": 2, - "maxdisk": 10737418240, - "maxmem": 536870912, - "maxswap": 536870912, - "mem": 0, - "name": "test-lxc.home.arpa", - "netin": 0, - "netout": 0, - "node": NODE2, - "pool": "pool1", - "status": "stopped", - "swap": 0, - "template": False, - "type": "lxc", - "uptime": 0, - "vmid": 104, - }, - { - "cpu": 0, - "cpus": 2, - "disk": 0, - "diskread": 0, - "diskwrite": 0, - "id": "lxc/105", - "maxcpu": 2, - "maxdisk": 10737418240, - "maxmem": 536870912, - "maxswap": 536870912, - "mem": 0, - "name": "", - "netin": 0, - "netout": 0, - "node": NODE2, - "pool": "pool1", - "status": "stopped", - "swap": 0, - "template": False, - "type": "lxc", - "uptime": 0, - "vmid": 105, - }, -] - - -def get_module_args(type="all", node=None, vmid=None, name=None, config="none"): - return { - "api_host": "host", - "api_user": "user", - "api_password": "password", - "node": node, - "type": type, - "vmid": vmid, - "name": name, - "config": config, - } - - -class TestProxmoxVmInfoModule(ModuleTestCase): - def setUp(self): - super(TestProxmoxVmInfoModule, self).setUp() - proxmox_utils.HAS_PROXMOXER = True - self.module = proxmox_vm_info - self.connect_mock = patch( - "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect", - ).start() - self.connect_mock.return_value.nodes.return_value.lxc.return_value.get.return_value = ( - RAW_LXC_OUTPUT - ) - self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.return_value = ( - RAW_QEMU_OUTPUT - ) - self.connect_mock.return_value.cluster.return_value.resources.return_value.get.return_value = ( - RAW_CLUSTER_OUTPUT - ) - self.connect_mock.return_value.nodes.get.return_value = [{"node": NODE1}] - - def tearDown(self): - self.connect_mock.stop() - super(TestProxmoxVmInfoModule, self).tearDown() - - def test_module_fail_when_required_args_missing(self): - with pytest.raises(AnsibleFailJson) as exc_info: - set_module_args({}) - self.module.main() - - result = exc_info.value.args[0] - assert result["msg"] == "missing required arguments: api_host, api_user" - - def test_get_lxc_vms_information(self): - with pytest.raises(AnsibleExitJson) as exc_info: - set_module_args(get_module_args(type="lxc")) - expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "lxc"] - self.module.main() - - result = exc_info.value.args[0] - assert result["changed"] is False - assert result["proxmox_vms"] == expected_output - - def test_get_qemu_vms_information(self): - with pytest.raises(AnsibleExitJson) as exc_info: - set_module_args(get_module_args(type="qemu")) - expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "qemu"] - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == expected_output - - def test_get_all_vms_information(self): - with pytest.raises(AnsibleExitJson) as exc_info: - set_module_args(get_module_args()) - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == EXPECTED_VMS_OUTPUT - - def test_vmid_is_converted_to_int(self): - with pytest.raises(AnsibleExitJson) as exc_info: - set_module_args(get_module_args(type="lxc")) - self.module.main() - - result = exc_info.value.args[0] - assert isinstance(result["proxmox_vms"][0]["vmid"], int) - - def test_get_specific_lxc_vm_information(self): - with pytest.raises(AnsibleExitJson) as exc_info: - vmid = 102 - expected_output = [ - vm - for vm in EXPECTED_VMS_OUTPUT - if vm["vmid"] == vmid and vm["type"] == "lxc" - ] - set_module_args(get_module_args(type="lxc", vmid=vmid)) - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == expected_output - assert len(result["proxmox_vms"]) == 1 - - def test_get_specific_qemu_vm_information(self): - with pytest.raises(AnsibleExitJson) as exc_info: - vmid = 100 - expected_output = [ - vm - for vm in EXPECTED_VMS_OUTPUT - if vm["vmid"] == vmid and vm["type"] == "qemu" - ] - set_module_args(get_module_args(type="qemu", vmid=vmid)) - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == expected_output - assert len(result["proxmox_vms"]) == 1 - - def test_get_specific_vm_information(self): - with pytest.raises(AnsibleExitJson) as exc_info: - vmid = 100 - expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["vmid"] == vmid] - set_module_args(get_module_args(type="all", vmid=vmid)) - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == expected_output - assert len(result["proxmox_vms"]) == 1 - - def test_get_specific_vm_information_by_using_name(self): - name = "test1-lxc.home.arpa" - self.connect_mock.return_value.cluster.resources.get.return_value = [ - {"name": name, "vmid": "103"} - ] - - with pytest.raises(AnsibleExitJson) as exc_info: - expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name] - set_module_args(get_module_args(type="all", name=name)) - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == expected_output - assert len(result["proxmox_vms"]) == 1 - - def test_get_multiple_vms_with_the_same_name(self): - name = "test-lxc.home.arpa" - self.connect_mock.return_value.cluster.resources.get.return_value = [ - {"name": name, "vmid": "102"}, - {"name": name, "vmid": "104"}, - ] - - with pytest.raises(AnsibleExitJson) as exc_info: - expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name] - set_module_args(get_module_args(type="all", name=name)) - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == expected_output - assert len(result["proxmox_vms"]) == 2 - - def test_get_vm_with_an_empty_name(self): - name = "" - self.connect_mock.return_value.cluster.resources.get.return_value = [ - {"name": name, "vmid": "105"}, - ] - - with pytest.raises(AnsibleExitJson) as exc_info: - expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name] - set_module_args(get_module_args(type="all", name=name)) - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == expected_output - assert len(result["proxmox_vms"]) == 1 - - def test_get_all_lxc_vms_from_specific_node(self): - with pytest.raises(AnsibleExitJson) as exc_info: - expected_output = [ - vm - for vm in EXPECTED_VMS_OUTPUT - if vm["node"] == NODE1 and vm["type"] == "lxc" - ] - set_module_args(get_module_args(type="lxc", node=NODE1)) - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == expected_output - assert len(result["proxmox_vms"]) == 1 - - def test_get_all_qemu_vms_from_specific_node(self): - with pytest.raises(AnsibleExitJson) as exc_info: - expected_output = [ - vm - for vm in EXPECTED_VMS_OUTPUT - if vm["node"] == NODE1 and vm["type"] == "qemu" - ] - set_module_args(get_module_args(type="qemu", node=NODE1)) - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == expected_output - assert len(result["proxmox_vms"]) == 1 - - def test_get_all_vms_from_specific_node(self): - with pytest.raises(AnsibleExitJson) as exc_info: - expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["node"] == NODE1] - set_module_args(get_module_args(node=NODE1)) - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == expected_output - assert len(result["proxmox_vms"]) == 2 - - def test_module_returns_empty_list_when_vm_does_not_exist(self): - with pytest.raises(AnsibleExitJson) as exc_info: - vmid = 200 - set_module_args(get_module_args(type="all", vmid=vmid)) - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == [] - - def test_module_fail_when_qemu_request_fails(self): - self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.side_effect = IOError( - "Some mocked connection error." - ) - with pytest.raises(AnsibleFailJson) as exc_info: - set_module_args(get_module_args(type="qemu")) - self.module.main() - - result = exc_info.value.args[0] - assert "Failed to retrieve QEMU VMs information:" in result["msg"] - - def test_module_fail_when_lxc_request_fails(self): - self.connect_mock.return_value.nodes.return_value.lxc.return_value.get.side_effect = IOError( - "Some mocked connection error." - ) - with pytest.raises(AnsibleFailJson) as exc_info: - set_module_args(get_module_args(type="lxc")) - self.module.main() - - result = exc_info.value.args[0] - assert "Failed to retrieve LXC VMs information:" in result["msg"] - - def test_module_fail_when_cluster_resources_request_fails(self): - self.connect_mock.return_value.cluster.return_value.resources.return_value.get.side_effect = IOError( - "Some mocked connection error." - ) - with pytest.raises(AnsibleFailJson) as exc_info: - set_module_args(get_module_args()) - self.module.main() - - result = exc_info.value.args[0] - assert ( - "Failed to retrieve VMs information from cluster resources:" - in result["msg"] - ) - - def test_module_fail_when_node_does_not_exist(self): - with pytest.raises(AnsibleFailJson) as exc_info: - set_module_args(get_module_args(type="all", node="NODE3")) - self.module.main() - - result = exc_info.value.args[0] - assert result["msg"] == "Node NODE3 doesn't exist in PVE cluster" - - def test_call_to_get_vmid_is_not_used_when_vmid_provided(self): - with patch( - "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible.get_vmid" - ) as get_vmid_mock: - with pytest.raises(AnsibleExitJson): - vmid = 100 - set_module_args( - get_module_args(type="all", vmid=vmid, name="something") - ) - self.module.main() - - assert get_vmid_mock.call_count == 0 - - def test_config_returned_when_specified_qemu_vm_with_config(self): - config_vm_value = { - 'scsi0': 'local-lvm:vm-101-disk-0,iothread=1,size=32G', - 'net0': 'virtio=4E:79:9F:A8:EE:E4,bridge=vmbr0,firewall=1', - 'scsihw': 'virtio-scsi-single', - 'cores': 1, - 'name': 'test1', - 'ostype': 'l26', - 'boot': 'order=scsi0;ide2;net0', - 'memory': 2048, - 'sockets': 1, - } - (self.connect_mock.return_value.nodes.return_value.qemu.return_value. - config.return_value.get.return_value) = config_vm_value - - with pytest.raises(AnsibleExitJson) as exc_info: - vmid = 101 - set_module_args(get_module_args( - type="qemu", - vmid=vmid, - config="current", - )) - expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["vmid"] == vmid] - expected_output[0]["config"] = config_vm_value - self.module.main() - - result = exc_info.value.args[0] - assert result["proxmox_vms"] == expected_output diff --git a/tests/unit/plugins/modules/test_puppet.py b/tests/unit/plugins/modules/test_puppet.py index efdb042a5a..7a1a231693 100644 --- a/tests/unit/plugins/modules/test_puppet.py +++ b/tests/unit/plugins/modules/test_puppet.py @@ -14,7 +14,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import puppet -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(puppet, __name__) +UTHelper.from_module(puppet, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_puppet.yaml b/tests/unit/plugins/modules/test_puppet.yaml index 668571273c..df813c6231 100644 --- a/tests/unit/plugins/modules/test_puppet.yaml +++ b/tests/unit/plugins/modules/test_puppet.yaml @@ -4,228 +4,231 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: puppet_agent_plain - input: {} - output: - changed: false - mocks: - run_command: - - command: [/testbin/puppet, config, print, agent_disabled_lockfile] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} - rc: 0 - out: "blah, anything" - err: "" - - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - environ: *env-def - rc: 0 - out: "" - err: "" -- id: puppet_agent_certname - input: - certname: potatobox - output: - changed: false - mocks: - run_command: - - command: [/testbin/puppet, config, print, agent_disabled_lockfile] - environ: *env-def - rc: 0 - out: "blah, anything" - err: "" - - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --certname=potatobox - environ: *env-def - rc: 0 - out: "" - err: "" -- id: puppet_agent_tags_abc - input: - tags: [a, b, c] - output: - changed: false - mocks: - run_command: - - command: [/testbin/puppet, config, print, agent_disabled_lockfile] - environ: *env-def - rc: 0 - out: "blah, anything" - err: "" - - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --tags - - a,b,c - environ: *env-def - rc: 0 - out: "" - err: "" -- id: puppet_agent_skip_tags_def - input: - skip_tags: [d, e, f] - output: - changed: false - mocks: - run_command: - - command: [/testbin/puppet, config, print, agent_disabled_lockfile] - environ: *env-def - rc: 0 - out: "blah, anything" - err: "" - - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --skip_tags - - d,e,f - environ: *env-def - rc: 0 - out: "" - err: "" -- id: puppet_agent_noop_false - input: - noop: false - output: - changed: false - mocks: - run_command: - - command: [/testbin/puppet, config, print, agent_disabled_lockfile] - environ: *env-def - rc: 0 - out: "blah, anything" - err: "" - - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --no-noop - environ: *env-def - rc: 0 - out: "" - err: "" -- id: puppet_agent_noop_true - input: - noop: true - output: - changed: false - mocks: - run_command: - - command: [/testbin/puppet, config, print, agent_disabled_lockfile] - environ: *env-def - rc: 0 - out: "blah, anything" - err: "" - - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --noop - environ: *env-def - rc: 0 - out: "" - err: "" -- id: puppet_agent_waitforlock - input: - waitforlock: 30 - output: - changed: false - mocks: - run_command: - - command: [/testbin/puppet, config, print, agent_disabled_lockfile] - environ: *env-def - rc: 0 - out: "blah, anything" - err: "" - - command: - - /testbin/timeout - - -s - - "9" - - 30m - - /testbin/puppet - - agent - - --onetime - - --no-daemonize - - --no-usecacheonfailure - - --no-splay - - --detailed-exitcodes - - --verbose - - --color - - "0" - - --waitforlock - - "30" - environ: *env-def - rc: 0 - out: "" - err: "" +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} +test_cases: + - id: puppet_agent_plain + input: {} + output: + changed: false + mocks: + run_command: + - command: [/testbin/puppet, config, print, agent_disabled_lockfile] + environ: *env-def + rc: 0 + out: blah, anything + err: '' + - command: + - /testbin/timeout + - -s + - '9' + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - '0' + environ: *env-def + rc: 0 + out: '' + err: '' + - id: puppet_agent_certname + input: + certname: potatobox + output: + changed: false + mocks: + run_command: + - command: [/testbin/puppet, config, print, agent_disabled_lockfile] + environ: *env-def + rc: 0 + out: blah, anything + err: '' + - command: + - /testbin/timeout + - -s + - '9' + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - '0' + - --certname=potatobox + environ: *env-def + rc: 0 + out: '' + err: '' + - id: puppet_agent_tags_abc + input: + tags: [a, b, c] + output: + changed: false + mocks: + run_command: + - command: [/testbin/puppet, config, print, agent_disabled_lockfile] + environ: *env-def + rc: 0 + out: blah, anything + err: '' + - command: + - /testbin/timeout + - -s + - '9' + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - '0' + - --tags + - a,b,c + environ: *env-def + rc: 0 + out: '' + err: '' + - id: puppet_agent_skip_tags_def + input: + skip_tags: [d, e, f] + output: + changed: false + mocks: + run_command: + - command: [/testbin/puppet, config, print, agent_disabled_lockfile] + environ: *env-def + rc: 0 + out: blah, anything + err: '' + - command: + - /testbin/timeout + - -s + - '9' + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - '0' + - --skip_tags + - d,e,f + environ: *env-def + rc: 0 + out: '' + err: '' + - id: puppet_agent_noop_false + input: + noop: false + output: + changed: false + mocks: + run_command: + - command: [/testbin/puppet, config, print, agent_disabled_lockfile] + environ: *env-def + rc: 0 + out: blah, anything + err: '' + - command: + - /testbin/timeout + - -s + - '9' + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - '0' + - --no-noop + environ: *env-def + rc: 0 + out: '' + err: '' + - id: puppet_agent_noop_true + input: + noop: true + output: + changed: false + mocks: + run_command: + - command: [/testbin/puppet, config, print, agent_disabled_lockfile] + environ: *env-def + rc: 0 + out: blah, anything + err: '' + - command: + - /testbin/timeout + - -s + - '9' + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - '0' + - --noop + environ: *env-def + rc: 0 + out: '' + err: '' + - id: puppet_agent_waitforlock + input: + waitforlock: 30 + output: + changed: false + mocks: + run_command: + - command: [/testbin/puppet, config, print, agent_disabled_lockfile] + environ: *env-def + rc: 0 + out: blah, anything + err: '' + - command: + - /testbin/timeout + - -s + - '9' + - 30m + - /testbin/puppet + - agent + - --onetime + - --no-daemonize + - --no-usecacheonfailure + - --no-splay + - --detailed-exitcodes + - --verbose + - --color + - '0' + - --waitforlock + - '30' + environ: *env-def + rc: 0 + out: '' + err: '' diff --git a/tests/unit/plugins/modules/test_redhat_subscription.py b/tests/unit/plugins/modules/test_redhat_subscription.py index 7be3740d26..bbdbbdab7d 100644 --- a/tests/unit/plugins/modules/test_redhat_subscription.py +++ b/tests/unit/plugins/modules/test_redhat_subscription.py @@ -199,11 +199,6 @@ TEST_CASES = [ {'check_rc': False}, (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '') ), - ( - ['/testbin/subscription-manager', 'remove', '--all'], - {'check_rc': True}, - (0, '', '') - ), ( ['/testbin/subscription-manager', 'unregister'], {'check_rc': True}, diff --git a/tests/unit/plugins/modules/test_redis_data.py b/tests/unit/plugins/modules/test_redis_data.py index da195f70a6..ef1af3d2e0 100644 --- a/tests/unit/plugins/modules/test_redis_data.py +++ b/tests/unit/plugins/modules/test_redis_data.py @@ -13,7 +13,7 @@ import json from redis import __version__ from ansible_collections.community.general.plugins.modules import redis_data -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args HAS_REDIS_USERNAME_OPTION = True if tuple(map(int, __version__.split('.'))) < (3, 4, 0): @@ -21,9 +21,9 @@ if tuple(map(int, __version__.split('.'))) < (3, 4, 0): def test_redis_data_without_arguments(capfd): - set_module_args({}) - with pytest.raises(SystemExit) as results: - redis_data.main() + with set_module_args({}): + with pytest.raises(SystemExit) as results: + redis_data.main() out, err = capfd.readouterr() assert not err assert json.loads(out)['failed'] @@ -31,16 +31,16 @@ def test_redis_data_without_arguments(capfd): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_key(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'value': 'baz', - '_ansible_check_mode': False}) - mocker.patch('redis.Redis.get', return_value='bar') - mocker.patch('redis.Redis.set', return_value=True) - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + 'value': 'baz', + '_ansible_check_mode': False}): + mocker.patch('redis.Redis.get', return_value='bar') + mocker.patch('redis.Redis.set', return_value=True) + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -52,17 +52,17 @@ def test_redis_data_key(capfd, mocker): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_existing_key_nx(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'value': 'baz', - 'non_existing': True, - '_ansible_check_mode': False}) - mocker.patch('redis.Redis.get', return_value='bar') - mocker.patch('redis.Redis.set', return_value=None) - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + 'value': 'baz', + 'non_existing': True, + '_ansible_check_mode': False}): + mocker.patch('redis.Redis.get', return_value='bar') + mocker.patch('redis.Redis.set', return_value=None) + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -76,17 +76,17 @@ def test_redis_data_existing_key_nx(capfd, mocker): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_non_existing_key_xx(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'value': 'baz', - 'existing': True, - '_ansible_check_mode': False}) - mocker.patch('redis.Redis.get', return_value=None) - mocker.patch('redis.Redis.set', return_value=None) - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + 'value': 'baz', + 'existing': True, + '_ansible_check_mode': False}): + mocker.patch('redis.Redis.get', return_value=None) + mocker.patch('redis.Redis.set', return_value=None) + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -100,15 +100,15 @@ def test_redis_data_non_existing_key_xx(capfd, mocker): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_delete_present_key(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'absent'}) - mocker.patch('redis.Redis.get', return_value='bar') - mocker.patch('redis.Redis.delete', return_value=1) - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + 'state': 'absent'}): + mocker.patch('redis.Redis.get', return_value='bar') + mocker.patch('redis.Redis.delete', return_value=1) + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -118,15 +118,15 @@ def test_redis_data_delete_present_key(capfd, mocker): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_delete_absent_key(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'absent'}) - mocker.patch('redis.Redis.delete', return_value=0) - mocker.patch('redis.Redis.get', return_value=None) - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + 'state': 'absent'}): + mocker.patch('redis.Redis.delete', return_value=0) + mocker.patch('redis.Redis.get', return_value=None) + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -136,14 +136,14 @@ def test_redis_data_delete_absent_key(capfd, mocker): @pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") def test_redis_data_fail_username(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'value': 'baz', - '_ansible_check_mode': False}) - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + 'value': 'baz', + '_ansible_check_mode': False}): + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -153,15 +153,15 @@ def test_redis_data_fail_username(capfd, mocker): def test_redis_data_key_no_username(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'value': 'baz', - '_ansible_check_mode': False}) - mocker.patch('redis.Redis.get', return_value='bar') - mocker.patch('redis.Redis.set', return_value=True) - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + 'value': 'baz', + '_ansible_check_mode': False}): + mocker.patch('redis.Redis.get', return_value='bar') + mocker.patch('redis.Redis.set', return_value=True) + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -172,15 +172,15 @@ def test_redis_data_key_no_username(capfd, mocker): def test_redis_delete_key_no_username(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'absent', - '_ansible_check_mode': False}) - mocker.patch('redis.Redis.get', return_value='bar') - mocker.patch('redis.Redis.delete', return_value=1) - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + 'state': 'absent', + '_ansible_check_mode': False}): + mocker.patch('redis.Redis.get', return_value='bar') + mocker.patch('redis.Redis.delete', return_value=1) + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -189,15 +189,15 @@ def test_redis_delete_key_no_username(capfd, mocker): def test_redis_delete_key_non_existent_key(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'absent', - '_ansible_check_mode': False}) - mocker.patch('redis.Redis.get', return_value=None) - mocker.patch('redis.Redis.delete', return_value=0) - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + 'state': 'absent', + '_ansible_check_mode': False}): + mocker.patch('redis.Redis.get', return_value=None) + mocker.patch('redis.Redis.delete', return_value=0) + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -206,15 +206,15 @@ def test_redis_delete_key_non_existent_key(capfd, mocker): def test_redis_set_key_check_mode_nochange(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'present', - 'value': 'bar', - '_ansible_check_mode': True}) - mocker.patch('redis.Redis.get', return_value='bar') - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + 'state': 'present', + 'value': 'bar', + '_ansible_check_mode': True}): + mocker.patch('redis.Redis.get', return_value='bar') + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -225,15 +225,15 @@ def test_redis_set_key_check_mode_nochange(capfd, mocker): def test_redis_set_key_check_mode_delete_nx(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'present', - 'value': 'baz', - '_ansible_check_mode': True}) - mocker.patch('redis.Redis.get', return_value=None) - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + 'state': 'present', + 'value': 'baz', + '_ansible_check_mode': True}): + mocker.patch('redis.Redis.get', return_value=None) + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -243,15 +243,15 @@ def test_redis_set_key_check_mode_delete_nx(capfd, mocker): def test_redis_set_key_check_mode_delete(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'present', - 'value': 'baz', - '_ansible_check_mode': True}) - mocker.patch('redis.Redis.get', return_value='bar') - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + 'state': 'present', + 'value': 'baz', + '_ansible_check_mode': True}): + mocker.patch('redis.Redis.get', return_value='bar') + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err @@ -261,15 +261,15 @@ def test_redis_set_key_check_mode_delete(capfd, mocker): def test_redis_set_key_check_mode(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'present', - 'value': 'baz', - '_ansible_check_mode': True}) - mocker.patch('redis.Redis.get', return_value='bar') - with pytest.raises(SystemExit): - redis_data.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + 'state': 'present', + 'value': 'baz', + '_ansible_check_mode': True}): + mocker.patch('redis.Redis.get', return_value='bar') + with pytest.raises(SystemExit): + redis_data.main() out, err = capfd.readouterr() print(out) assert not err diff --git a/tests/unit/plugins/modules/test_redis_data_incr.py b/tests/unit/plugins/modules/test_redis_data_incr.py index d819b2f7e2..9ae0631f03 100644 --- a/tests/unit/plugins/modules/test_redis_data_incr.py +++ b/tests/unit/plugins/modules/test_redis_data_incr.py @@ -14,7 +14,7 @@ import redis from redis import __version__ from ansible_collections.community.general.plugins.modules import redis_data_incr -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args HAS_REDIS_USERNAME_OPTION = True @@ -25,9 +25,9 @@ if HAS_REDIS_USERNAME_OPTION: def test_redis_data_incr_without_arguments(capfd): - set_module_args({}) - with pytest.raises(SystemExit) as results: - redis_data_incr.main() + with set_module_args({}): + with pytest.raises(SystemExit) as results: + redis_data_incr.main() out, err = capfd.readouterr() assert not err assert json.loads(out)['failed'] @@ -35,13 +35,13 @@ def test_redis_data_incr_without_arguments(capfd): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_incr(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', }) - mocker.patch('redis.Redis.incr', return_value=57) - with pytest.raises(SystemExit): - redis_data_incr.main() + with set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', }): + mocker.patch('redis.Redis.incr', return_value=57) + with pytest.raises(SystemExit): + redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err @@ -53,14 +53,14 @@ def test_redis_data_incr(capfd, mocker): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_incr_int(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'increment_int': 10}) - mocker.patch('redis.Redis.incrby', return_value=57) - with pytest.raises(SystemExit): - redis_data_incr.main() + with set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + 'increment_int': 10}): + mocker.patch('redis.Redis.incrby', return_value=57) + with pytest.raises(SystemExit): + redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err @@ -72,14 +72,14 @@ def test_redis_data_incr_int(capfd, mocker): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_inc_float(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'increment_float': '5.5'}) - mocker.patch('redis.Redis.incrbyfloat', return_value=57.45) - with pytest.raises(SystemExit): - redis_data_incr.main() + with set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + 'increment_float': '5.5'}): + mocker.patch('redis.Redis.incrbyfloat', return_value=57.45) + with pytest.raises(SystemExit): + redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err @@ -91,13 +91,15 @@ def test_redis_data_inc_float(capfd, mocker): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_incr_float_wrong_value(capfd): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'increment_float': 'not_a_number'}) - with pytest.raises(SystemExit): - redis_data_incr.main() + with set_module_args({ + 'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + 'increment_float': 'not_a_number' + }): + with pytest.raises(SystemExit): + redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err @@ -106,13 +108,13 @@ def test_redis_data_incr_float_wrong_value(capfd): @pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") def test_redis_data_incr_fail_username(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False}) - with pytest.raises(SystemExit): - redis_data_incr.main() + with set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False}): + with pytest.raises(SystemExit): + redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err @@ -122,12 +124,12 @@ def test_redis_data_incr_fail_username(capfd, mocker): def test_redis_data_incr_no_username(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', }) - mocker.patch('redis.Redis.incr', return_value=57) - with pytest.raises(SystemExit): - redis_data_incr.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', }): + mocker.patch('redis.Redis.incr', return_value=57) + with pytest.raises(SystemExit): + redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err @@ -138,13 +140,13 @@ def test_redis_data_incr_no_username(capfd, mocker): def test_redis_data_incr_float_no_username(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'increment_float': '5.5'}) - mocker.patch('redis.Redis.incrbyfloat', return_value=57.45) - with pytest.raises(SystemExit): - redis_data_incr.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + 'increment_float': '5.5'}): + mocker.patch('redis.Redis.incrbyfloat', return_value=57.45) + with pytest.raises(SystemExit): + redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err @@ -155,13 +157,13 @@ def test_redis_data_incr_float_no_username(capfd, mocker): def test_redis_data_incr_check_mode(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': True}) - mocker.patch('redis.Redis.get', return_value=10) - with pytest.raises(SystemExit): - redis_data_incr.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': True}): + mocker.patch('redis.Redis.get', return_value=10) + with pytest.raises(SystemExit): + redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err @@ -171,13 +173,13 @@ def test_redis_data_incr_check_mode(capfd, mocker): def test_redis_data_incr_check_mode_not_incrementable(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': True}) - mocker.patch('redis.Redis.get', return_value='bar') - with pytest.raises(SystemExit): - redis_data_incr.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': True}): + mocker.patch('redis.Redis.get', return_value='bar') + with pytest.raises(SystemExit): + redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err @@ -190,14 +192,14 @@ def test_redis_data_incr_check_mode_not_incrementable(capfd, mocker): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_incr_check_mode_permissions(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': True}) - redis.Redis.get = mocker.Mock(side_effect=NoPermissionError( - "this user has no permissions to run the 'get' command or its subcommand")) - with pytest.raises(SystemExit): - redis_data_incr.main() + with set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': True}): + redis.Redis.get = mocker.Mock(side_effect=NoPermissionError( + "this user has no permissions to run the 'get' command or its subcommand")) + with pytest.raises(SystemExit): + redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err diff --git a/tests/unit/plugins/modules/test_redis_data_info.py b/tests/unit/plugins/modules/test_redis_data_info.py index 302e003bf1..af09f09823 100644 --- a/tests/unit/plugins/modules/test_redis_data_info.py +++ b/tests/unit/plugins/modules/test_redis_data_info.py @@ -14,7 +14,7 @@ from redis import __version__ from ansible_collections.community.general.plugins.modules import ( redis_data_info) -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args HAS_REDIS_USERNAME_OPTION = True @@ -23,9 +23,9 @@ if tuple(map(int, __version__.split('.'))) < (3, 4, 0): def test_redis_data_info_without_arguments(capfd): - set_module_args({}) - with pytest.raises(SystemExit): - redis_data_info.main() + with set_module_args({}): + with pytest.raises(SystemExit): + redis_data_info.main() out, err = capfd.readouterr() assert not err assert json.loads(out)['failed'] @@ -33,14 +33,16 @@ def test_redis_data_info_without_arguments(capfd): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_info_existing_key(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False}) - mocker.patch('redis.Redis.get', return_value='bar') - with pytest.raises(SystemExit): - redis_data_info.main() + with set_module_args({ + 'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False + }): + mocker.patch('redis.Redis.get', return_value='bar') + with pytest.raises(SystemExit): + redis_data_info.main() out, err = capfd.readouterr() print(out) assert not err @@ -50,14 +52,16 @@ def test_redis_data_info_existing_key(capfd, mocker): @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_info_absent_key(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False}) - mocker.patch('redis.Redis.get', return_value=None) - with pytest.raises(SystemExit): - redis_data_info.main() + with set_module_args({ + 'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False + }): + mocker.patch('redis.Redis.get', return_value=None) + with pytest.raises(SystemExit): + redis_data_info.main() out, err = capfd.readouterr() print(out) assert not err @@ -67,13 +71,15 @@ def test_redis_data_info_absent_key(capfd, mocker): @pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") def test_redis_data_fail_username(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False}) - with pytest.raises(SystemExit): - redis_data_info.main() + with set_module_args({ + 'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False + }): + with pytest.raises(SystemExit): + redis_data_info.main() out, err = capfd.readouterr() print(out) assert not err @@ -84,13 +90,15 @@ def test_redis_data_fail_username(capfd, mocker): @pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") def test_redis_data_info_absent_key_no_username(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False}) - mocker.patch('redis.Redis.get', return_value=None) - with pytest.raises(SystemExit): - redis_data_info.main() + with set_module_args({ + 'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False + }): + mocker.patch('redis.Redis.get', return_value=None) + with pytest.raises(SystemExit): + redis_data_info.main() out, err = capfd.readouterr() print(out) assert not err @@ -100,13 +108,15 @@ def test_redis_data_info_absent_key_no_username(capfd, mocker): @pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") def test_redis_data_info_existing_key_no_username(capfd, mocker): - set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False}) - mocker.patch('redis.Redis.get', return_value='bar') - with pytest.raises(SystemExit): - redis_data_info.main() + with set_module_args({ + 'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False + }): + mocker.patch('redis.Redis.get', return_value='bar') + with pytest.raises(SystemExit): + redis_data_info.main() out, err = capfd.readouterr() print(out) assert not err diff --git a/tests/unit/plugins/modules/test_redis_info.py b/tests/unit/plugins/modules/test_redis_info.py index 831b8f4052..92ef1f4893 100644 --- a/tests/unit/plugins/modules/test_redis_info.py +++ b/tests/unit/plugins/modules/test_redis_info.py @@ -7,9 +7,9 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, MagicMock from ansible_collections.community.general.plugins.modules import redis_info -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args class FakeRedisClient(MagicMock): @@ -47,8 +47,8 @@ class TestRedisInfoModule(ModuleTestCase): """Test without parameters""" with self.patch_redis_client(side_effect=FakeRedisClient) as redis_client: with self.assertRaises(AnsibleExitJson) as result: - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() self.assertEqual(redis_client.call_count, 1) self.assertEqual(redis_client.call_args, ({'host': 'localhost', 'port': 6379, @@ -64,12 +64,12 @@ class TestRedisInfoModule(ModuleTestCase): """Test with all parameters""" with self.patch_redis_client(side_effect=FakeRedisClient) as redis_client: with self.assertRaises(AnsibleExitJson) as result: - set_module_args({ + with set_module_args({ 'login_host': 'test', 'login_port': 1234, 'login_password': 'PASS' - }) - self.module.main() + }): + self.module.main() self.assertEqual(redis_client.call_count, 1) self.assertEqual(redis_client.call_args, ({'host': 'test', 'port': 1234, @@ -85,7 +85,7 @@ class TestRedisInfoModule(ModuleTestCase): """Test with tls parameters""" with self.patch_redis_client(side_effect=FakeRedisClient) as redis_client: with self.assertRaises(AnsibleExitJson) as result: - set_module_args({ + with set_module_args({ 'login_host': 'test', 'login_port': 1234, 'login_password': 'PASS', @@ -94,8 +94,8 @@ class TestRedisInfoModule(ModuleTestCase): 'client_cert_file': '/etc/ssl/client.pem', 'client_key_file': '/etc/ssl/client.key', 'validate_certs': False - }) - self.module.main() + }): + self.module.main() self.assertEqual(redis_client.call_count, 1) self.assertEqual(redis_client.call_args, ({'host': 'test', 'port': 1234, @@ -111,7 +111,7 @@ class TestRedisInfoModule(ModuleTestCase): """Test failure message""" with self.patch_redis_client(side_effect=FakeRedisClientFail) as redis_client: with self.assertRaises(AnsibleFailJson) as result: - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() self.assertEqual(redis_client.call_count, 1) self.assertEqual(result.exception.args[0]['msg'], 'unable to connect to database: Test Error') diff --git a/tests/unit/plugins/modules/test_rhsm_release.py b/tests/unit/plugins/modules/test_rhsm_release.py index e8b2db6fdc..f07c91ee55 100644 --- a/tests/unit/plugins/modules/test_rhsm_release.py +++ b/tests/unit/plugins/modules/test_rhsm_release.py @@ -5,9 +5,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import call, patch from ansible_collections.community.general.plugins.modules import rhsm_release -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args) @@ -52,15 +52,15 @@ class RhsmRepositoryReleaseModuleTestCase(ModuleTestCase): def test_release_set(self): # test that the module attempts to change the release when the current # release is not the same as the user-specific target release - set_module_args({'release': '7.5'}) - self.module_main_command.side_effect = [ - # first call, get_release: returns different version so set_release is called - (0, '7.4', ''), - # second call, set_release: just needs to exit with 0 rc - (0, '', ''), - ] + with set_module_args({'release': '7.5'}): + self.module_main_command.side_effect = [ + # first call, get_release: returns different version so set_release is called + (0, '7.4', ''), + # second call, set_release: just needs to exit with 0 rc + (0, '', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.assertEqual('7.5', result['current_release']) @@ -72,13 +72,13 @@ class RhsmRepositoryReleaseModuleTestCase(ModuleTestCase): def test_release_set_idempotent(self): # test that the module does not attempt to change the release when # the current release matches the user-specified target release - set_module_args({'release': '7.5'}) - self.module_main_command.side_effect = [ - # first call, get_release: returns same version, set_release is not called - (0, '7.5', ''), - ] + with set_module_args({'release': '7.5'}): + self.module_main_command.side_effect = [ + # first call, get_release: returns same version, set_release is not called + (0, '7.5', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertFalse(result['changed']) self.assertEqual('7.5', result['current_release']) @@ -89,15 +89,15 @@ class RhsmRepositoryReleaseModuleTestCase(ModuleTestCase): def test_release_unset(self): # test that the module attempts to change the release when the current # release is not the same as the user-specific target release - set_module_args({'release': None}) - self.module_main_command.side_effect = [ - # first call, get_release: returns version so set_release is called - (0, '7.5', ''), - # second call, set_release: just needs to exit with 0 rc - (0, '', ''), - ] + with set_module_args({'release': None}): + self.module_main_command.side_effect = [ + # first call, get_release: returns version so set_release is called + (0, '7.5', ''), + # second call, set_release: just needs to exit with 0 rc + (0, '', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.assertIsNone(result['current_release']) @@ -109,13 +109,13 @@ class RhsmRepositoryReleaseModuleTestCase(ModuleTestCase): def test_release_unset_idempotent(self): # test that the module attempts to change the release when the current # release is not the same as the user-specific target release - set_module_args({'release': None}) - self.module_main_command.side_effect = [ - # first call, get_release: returns no version, set_release is not called - (0, 'Release not set', ''), - ] + with set_module_args({'release': None}): + self.module_main_command.side_effect = [ + # first call, get_release: returns no version, set_release is not called + (0, 'Release not set', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertFalse(result['changed']) self.assertIsNone(result['current_release']) @@ -126,9 +126,8 @@ class RhsmRepositoryReleaseModuleTestCase(ModuleTestCase): def test_release_insane(self): # test that insane values for release trigger fail_json insane_value = 'this is an insane release value' - set_module_args({'release': insane_value}) - - result = self.module_main(AnsibleFailJson) + with set_module_args({'release': insane_value}): + result = self.module_main(AnsibleFailJson) # also ensure that the fail msg includes the insane value self.assertIn(insane_value, result['msg']) diff --git a/tests/unit/plugins/modules/test_rpm_ostree_pkg.py b/tests/unit/plugins/modules/test_rpm_ostree_pkg.py index 4888b64027..dded3f1e1c 100644 --- a/tests/unit/plugins/modules/test_rpm_ostree_pkg.py +++ b/tests/unit/plugins/modules/test_rpm_ostree_pkg.py @@ -6,9 +6,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import call, patch from ansible_collections.community.general.plugins.modules import rpm_ostree_pkg -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args) @@ -35,12 +35,12 @@ class RpmOSTreeModuleTestCase(ModuleTestCase): return exc.exception.args[0] def test_present(self): - set_module_args({'name': 'nfs-utils', 'state': 'present'}) - self.module_main_command.side_effect = [ - (0, '', ''), - ] + with set_module_args({'name': 'nfs-utils', 'state': 'present'}): + self.module_main_command.side_effect = [ + (0, '', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.assertEqual(['nfs-utils'], result['packages']) @@ -49,12 +49,12 @@ class RpmOSTreeModuleTestCase(ModuleTestCase): ]) def test_present_unchanged(self): - set_module_args({'name': 'nfs-utils', 'state': 'present'}) - self.module_main_command.side_effect = [ - (77, '', ''), - ] + with set_module_args({'name': 'nfs-utils', 'state': 'present'}): + self.module_main_command.side_effect = [ + (77, '', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertFalse(result['changed']) self.assertEqual(0, result['rc']) @@ -64,12 +64,12 @@ class RpmOSTreeModuleTestCase(ModuleTestCase): ]) def test_present_failed(self): - set_module_args({'name': 'nfs-utils', 'state': 'present'}) - self.module_main_command.side_effect = [ - (1, '', ''), - ] + with set_module_args({'name': 'nfs-utils', 'state': 'present'}): + self.module_main_command.side_effect = [ + (1, '', ''), + ] - result = self.module_main(AnsibleFailJson) + result = self.module_main(AnsibleFailJson) self.assertFalse(result['changed']) self.assertEqual(1, result['rc']) @@ -79,12 +79,12 @@ class RpmOSTreeModuleTestCase(ModuleTestCase): ]) def test_absent(self): - set_module_args({'name': 'nfs-utils', 'state': 'absent'}) - self.module_main_command.side_effect = [ - (0, '', ''), - ] + with set_module_args({'name': 'nfs-utils', 'state': 'absent'}): + self.module_main_command.side_effect = [ + (0, '', ''), + ] - result = self.module_main(AnsibleExitJson) + result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.assertEqual(['nfs-utils'], result['packages']) @@ -93,12 +93,12 @@ class RpmOSTreeModuleTestCase(ModuleTestCase): ]) def test_absent_failed(self): - set_module_args({'name': 'nfs-utils', 'state': 'absent'}) - self.module_main_command.side_effect = [ - (1, '', ''), - ] + with set_module_args({'name': 'nfs-utils', 'state': 'absent'}): + self.module_main_command.side_effect = [ + (1, '', ''), + ] - result = self.module_main(AnsibleFailJson) + result = self.module_main(AnsibleFailJson) self.assertFalse(result['changed']) self.assertEqual(1, result['rc']) diff --git a/tests/unit/plugins/modules/test_rundeck_acl_policy.py b/tests/unit/plugins/modules/test_rundeck_acl_policy.py new file mode 100644 index 0000000000..564446cf3e --- /dev/null +++ b/tests/unit/plugins/modules/test_rundeck_acl_policy.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +from ansible_collections.community.general.plugins.modules import rundeck_acl_policy +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + set_module_args, + AnsibleExitJson, + exit_json, + fail_json +) + + +@pytest.fixture(autouse=True) +def module(): + with patch.multiple( + "ansible.module_utils.basic.AnsibleModule", + exit_json=exit_json, + fail_json=fail_json, + ): + yield + + +# define our two table entries: system ACL vs. project ACL +PROJECT_TABLE = [ + (None, "system/acl"), + ("test_project", "project/test_project/acl"), +] + + +@pytest.mark.parametrize("project, prefix", PROJECT_TABLE) +@patch.object(rundeck_acl_policy, 'api_request') +def test_acl_create(api_request_mock, project, prefix): + """Test creating a new ACL, both system-level and project-level.""" + name = "my_policy" + policy = "test_policy_yaml" + # simulate: GET→404, POST→201, final GET→200 + api_request_mock.side_effect = [ + (None, {'status': 404}), + (None, {'status': 201}), + ({"contents": policy}, {'status': 200}), + ] + args = { + 'name': name, + 'url': "https://rundeck.example.org", + 'api_token': "mytoken", + 'policy': policy, + } + if project: + args['project'] = project + + with pytest.raises(AnsibleExitJson): + with set_module_args(args): + rundeck_acl_policy.main() + + # should have done GET → POST → GET + assert api_request_mock.call_count == 3 + args, kwargs = api_request_mock.call_args_list[1] + assert kwargs['endpoint'] == "%s/%s.aclpolicy" % (prefix, name) + assert kwargs['method'] == 'POST' + + +@pytest.mark.parametrize("project, prefix", PROJECT_TABLE) +@patch.object(rundeck_acl_policy, 'api_request') +def test_acl_unchanged(api_request_mock, project, prefix): + """Test no-op when existing ACL contents match the desired policy.""" + name = "unchanged_policy" + policy = "same_policy_yaml" + # first GET returns matching contents + api_request_mock.return_value = ({"contents": policy}, {'status': 200}) + + args = { + 'name': name, + 'url': "https://rundeck.example.org", + 'api_token': "mytoken", + 'policy': policy, + } + if project: + args['project'] = project + + with pytest.raises(AnsibleExitJson): + with set_module_args(args): + rundeck_acl_policy.main() + + # only a single GET + assert api_request_mock.call_count == 1 + args, kwargs = api_request_mock.call_args + assert kwargs['endpoint'] == "%s/%s.aclpolicy" % (prefix, name) + # default method is GET + assert kwargs.get('method', 'GET') == 'GET' + + +@pytest.mark.parametrize("project, prefix", PROJECT_TABLE) +@patch.object(rundeck_acl_policy, 'api_request') +def test_acl_remove(api_request_mock, project, prefix): + """Test removing an existing ACL, both system- and project-level.""" + name = "remove_me" + # GET finds it, DELETE removes it + api_request_mock.side_effect = [ + ({"contents": "old_yaml"}, {'status': 200}), + (None, {'status': 204}), + ] + + args = { + 'name': name, + 'url': "https://rundeck.example.org", + 'api_token': "mytoken", + 'state': 'absent', + } + if project: + args['project'] = project + + with pytest.raises(AnsibleExitJson): + with set_module_args(args): + rundeck_acl_policy.main() + + # GET → DELETE + assert api_request_mock.call_count == 2 + args, kwargs = api_request_mock.call_args_list[1] + assert kwargs['endpoint'] == "%s/%s.aclpolicy" % (prefix, name) + assert kwargs['method'] == 'DELETE' + + +@pytest.mark.parametrize("project, prefix", PROJECT_TABLE) +@patch.object(rundeck_acl_policy, 'api_request') +def test_acl_remove_nonexistent(api_request_mock, project, prefix): + """Test removing a non-existent ACL results in no change.""" + name = "not_there" + # GET returns 404 + api_request_mock.return_value = (None, {'status': 404}) + + args = { + 'name': name, + 'url': "https://rundeck.example.org", + 'api_token': "mytoken", + 'state': 'absent', + } + if project: + args['project'] = project + + with pytest.raises(AnsibleExitJson): + with set_module_args(args): + rundeck_acl_policy.main() + + # only the initial GET + assert api_request_mock.call_count == 1 + args, kwargs = api_request_mock.call_args + assert kwargs['endpoint'] == "%s/%s.aclpolicy" % (prefix, name) + assert kwargs.get('method', 'GET') == 'GET' diff --git a/tests/unit/plugins/modules/test_scaleway_compute_private_network.py b/tests/unit/plugins/modules/test_scaleway_compute_private_network.py index df6fd91a4a..1432d22df3 100644 --- a/tests/unit/plugins/modules/test_scaleway_compute_private_network.py +++ b/tests/unit/plugins/modules/test_scaleway_compute_private_network.py @@ -11,8 +11,8 @@ import pytest from ansible_collections.community.general.plugins.modules import scaleway_compute_private_network from ansible_collections.community.general.plugins.module_utils.scaleway import Scaleway, Response -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch def response_without_nics(): @@ -62,9 +62,9 @@ def response_remove_nics(): def test_scaleway_private_network_without_arguments(capfd): - set_module_args({}) - with pytest.raises(SystemExit) as results: - scaleway_compute_private_network.main() + with set_module_args({}): + with pytest.raises(SystemExit) as results: + scaleway_compute_private_network.main() out, err = capfd.readouterr() assert not err @@ -77,21 +77,22 @@ def test_scaleway_add_nic(capfd): cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90' url = 'servers/' + cid + '/private_nics' - set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "state": "present", - "region": "par1", - "compute_id": cid, - "private_network_id": pnid - }) + with set_module_args({ + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "state": "present", + "region": "par1", + "compute_id": cid, + "private_network_id": pnid + }): - with patch.object(Scaleway, 'get') as mock_scw_get: - mock_scw_get.return_value = response_without_nics() - with patch.object(Scaleway, 'post') as mock_scw_post: - mock_scw_post.return_value = response_when_add_nics() - with pytest.raises(SystemExit) as results: - scaleway_compute_private_network.main() - mock_scw_post.assert_any_call(path=url, data={"private_network_id": pnid}) - mock_scw_get.assert_any_call(url) + with patch.object(Scaleway, 'get') as mock_scw_get: + mock_scw_get.return_value = response_without_nics() + with patch.object(Scaleway, 'post') as mock_scw_post: + mock_scw_post.return_value = response_when_add_nics() + with pytest.raises(SystemExit) as results: + scaleway_compute_private_network.main() + mock_scw_post.assert_any_call(path=url, data={"private_network_id": pnid}) + mock_scw_get.assert_any_call(url) out, err = capfd.readouterr() del os.environ['SCW_API_TOKEN'] @@ -105,18 +106,19 @@ def test_scaleway_add_existing_nic(capfd): cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90' url = 'servers/' + cid + '/private_nics' - set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "state": "present", - "region": "par1", - "compute_id": cid, - "private_network_id": pnid - }) + with set_module_args({ + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "state": "present", + "region": "par1", + "compute_id": cid, + "private_network_id": pnid + }): - with patch.object(Scaleway, 'get') as mock_scw_get: - mock_scw_get.return_value = response_with_nics() - with pytest.raises(SystemExit) as results: - scaleway_compute_private_network.main() - mock_scw_get.assert_any_call(url) + with patch.object(Scaleway, 'get') as mock_scw_get: + mock_scw_get.return_value = response_with_nics() + with pytest.raises(SystemExit) as results: + scaleway_compute_private_network.main() + mock_scw_get.assert_any_call(url) out, err = capfd.readouterr() del os.environ['SCW_API_TOKEN'] @@ -132,21 +134,22 @@ def test_scaleway_remove_existing_nic(capfd): url = 'servers/' + cid + '/private_nics' urlremove = 'servers/' + cid + '/private_nics/' + nicid - set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "state": "absent", - "region": "par1", - "compute_id": cid, - "private_network_id": pnid - }) + with set_module_args({ + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "state": "absent", + "region": "par1", + "compute_id": cid, + "private_network_id": pnid + }): - with patch.object(Scaleway, 'get') as mock_scw_get: - mock_scw_get.return_value = response_with_nics() - with patch.object(Scaleway, 'delete') as mock_scw_delete: - mock_scw_delete.return_value = response_remove_nics() - with pytest.raises(SystemExit) as results: - scaleway_compute_private_network.main() - mock_scw_delete.assert_any_call(urlremove) - mock_scw_get.assert_any_call(url) + with patch.object(Scaleway, 'get') as mock_scw_get: + mock_scw_get.return_value = response_with_nics() + with patch.object(Scaleway, 'delete') as mock_scw_delete: + mock_scw_delete.return_value = response_remove_nics() + with pytest.raises(SystemExit) as results: + scaleway_compute_private_network.main() + mock_scw_delete.assert_any_call(urlremove) + mock_scw_get.assert_any_call(url) out, err = capfd.readouterr() @@ -161,18 +164,19 @@ def test_scaleway_remove_absent_nic(capfd): cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90' url = 'servers/' + cid + '/private_nics' - set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "state": "absent", - "region": "par1", - "compute_id": cid, - "private_network_id": pnid - }) + with set_module_args({ + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "state": "absent", + "region": "par1", + "compute_id": cid, + "private_network_id": pnid + }): - with patch.object(Scaleway, 'get') as mock_scw_get: - mock_scw_get.return_value = response_without_nics() - with pytest.raises(SystemExit) as results: - scaleway_compute_private_network.main() - mock_scw_get.assert_any_call(url) + with patch.object(Scaleway, 'get') as mock_scw_get: + mock_scw_get.return_value = response_without_nics() + with pytest.raises(SystemExit) as results: + scaleway_compute_private_network.main() + mock_scw_get.assert_any_call(url) out, err = capfd.readouterr() del os.environ['SCW_API_TOKEN'] diff --git a/tests/unit/plugins/modules/test_scaleway_private_network.py b/tests/unit/plugins/modules/test_scaleway_private_network.py index 21805d3db8..4d76380aa5 100644 --- a/tests/unit/plugins/modules/test_scaleway_private_network.py +++ b/tests/unit/plugins/modules/test_scaleway_private_network.py @@ -12,8 +12,8 @@ import pytest from ansible_collections.community.general.plugins.modules import scaleway_private_network from ansible_collections.community.general.plugins.module_utils.scaleway import Scaleway, Response -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch def response_with_zero_network(): @@ -71,9 +71,9 @@ def response_delete(): def test_scaleway_private_network_without_arguments(capfd): - set_module_args({}) - with pytest.raises(SystemExit) as results: - scaleway_private_network.main() + with set_module_args({}): + with pytest.raises(SystemExit) as results: + scaleway_private_network.main() out, err = capfd.readouterr() assert not err @@ -81,20 +81,21 @@ def test_scaleway_private_network_without_arguments(capfd): def test_scaleway_create_pn(capfd): - set_module_args({"state": "present", - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "region": "par2", - "name": "new_network_name", - "tags": ["tag1"] - }) + with set_module_args({ + "state": "present", + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "region": "par2", + "name": "new_network_name", + "tags": ["tag1"] + }): - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - with patch.object(Scaleway, 'get') as mock_scw_get: - mock_scw_get.return_value = response_with_zero_network() - with patch.object(Scaleway, 'post') as mock_scw_post: - mock_scw_post.return_value = response_create_new() - with pytest.raises(SystemExit) as results: - scaleway_private_network.main() + os.environ['SCW_API_TOKEN'] = 'notrealtoken' + with patch.object(Scaleway, 'get') as mock_scw_get: + mock_scw_get.return_value = response_with_zero_network() + with patch.object(Scaleway, 'post') as mock_scw_post: + mock_scw_post.return_value = response_create_new() + with pytest.raises(SystemExit) as results: + scaleway_private_network.main() mock_scw_post.assert_any_call(path='private-networks/', data={'name': 'new_network_name', 'project_id': 'a123b4cd-ef5g-678h-90i1-jk2345678l90', 'tags': ['tag1']}) @@ -105,18 +106,19 @@ def test_scaleway_create_pn(capfd): def test_scaleway_existing_pn(capfd): - set_module_args({"state": "present", - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "region": "par2", - "name": "new_network_name", - "tags": ["tag1"] - }) + with set_module_args({ + "state": "present", + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "region": "par2", + "name": "new_network_name", + "tags": ["tag1"] + }): - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - with patch.object(Scaleway, 'get') as mock_scw_get: - mock_scw_get.return_value = response_with_new_network() - with pytest.raises(SystemExit) as results: - scaleway_private_network.main() + os.environ['SCW_API_TOKEN'] = 'notrealtoken' + with patch.object(Scaleway, 'get') as mock_scw_get: + mock_scw_get.return_value = response_with_new_network() + with pytest.raises(SystemExit) as results: + scaleway_private_network.main() mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10}) out, err = capfd.readouterr() @@ -127,20 +129,21 @@ def test_scaleway_existing_pn(capfd): def test_scaleway_add_tag_pn(capfd): - set_module_args({"state": "present", - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "region": "par2", - "name": "new_network_name", - "tags": ["newtag"] - }) + with set_module_args({ + "state": "present", + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "region": "par2", + "name": "new_network_name", + "tags": ["newtag"] + }): - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - with patch.object(Scaleway, 'get') as mock_scw_get: - mock_scw_get.return_value = response_with_new_network() - with patch.object(Scaleway, 'patch') as mock_scw_patch: - mock_scw_patch.return_value = response_create_new_newtag() - with pytest.raises(SystemExit) as results: - scaleway_private_network.main() + os.environ['SCW_API_TOKEN'] = 'notrealtoken' + with patch.object(Scaleway, 'get') as mock_scw_get: + mock_scw_get.return_value = response_with_new_network() + with patch.object(Scaleway, 'patch') as mock_scw_patch: + mock_scw_patch.return_value = response_create_new_newtag() + with pytest.raises(SystemExit) as results: + scaleway_private_network.main() mock_scw_patch.assert_any_call(path='private-networks/c123b4cd-ef5g-678h-90i1-jk2345678l90', data={'name': 'new_network_name', 'tags': ['newtag']}) mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10}) @@ -152,20 +155,21 @@ def test_scaleway_add_tag_pn(capfd): def test_scaleway_remove_pn(capfd): - set_module_args({"state": "absent", - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "region": "par2", - "name": "new_network_name", - "tags": ["newtag"] - }) + with set_module_args({ + "state": "absent", + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "region": "par2", + "name": "new_network_name", + "tags": ["newtag"] + }): - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - with patch.object(Scaleway, 'get') as mock_scw_get: - mock_scw_get.return_value = response_with_new_network() - with patch.object(Scaleway, 'delete') as mock_scw_delete: - mock_scw_delete.return_value = response_delete() - with pytest.raises(SystemExit) as results: - scaleway_private_network.main() + os.environ['SCW_API_TOKEN'] = 'notrealtoken' + with patch.object(Scaleway, 'get') as mock_scw_get: + mock_scw_get.return_value = response_with_new_network() + with patch.object(Scaleway, 'delete') as mock_scw_delete: + mock_scw_delete.return_value = response_delete() + with pytest.raises(SystemExit) as results: + scaleway_private_network.main() mock_scw_delete.assert_any_call('private-networks/c123b4cd-ef5g-678h-90i1-jk2345678l90') mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10}) @@ -177,18 +181,19 @@ def test_scaleway_remove_pn(capfd): def test_scaleway_absent_pn_not_exists(capfd): - set_module_args({"state": "absent", - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "region": "par2", - "name": "new_network_name", - "tags": ["newtag"] - }) + with set_module_args({ + "state": "absent", + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "region": "par2", + "name": "new_network_name", + "tags": ["newtag"] + }): - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - with patch.object(Scaleway, 'get') as mock_scw_get: - mock_scw_get.return_value = response_with_zero_network() - with pytest.raises(SystemExit) as results: - scaleway_private_network.main() + os.environ['SCW_API_TOKEN'] = 'notrealtoken' + with patch.object(Scaleway, 'get') as mock_scw_get: + mock_scw_get.return_value = response_with_zero_network() + with pytest.raises(SystemExit) as results: + scaleway_private_network.main() mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10}) out, err = capfd.readouterr() diff --git a/tests/unit/plugins/modules/test_simpleinit_msb.py b/tests/unit/plugins/modules/test_simpleinit_msb.py index d97e9b5f29..ab3e0a1409 100644 --- a/tests/unit/plugins/modules/test_simpleinit_msb.py +++ b/tests/unit/plugins/modules/test_simpleinit_msb.py @@ -6,8 +6,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args from ansible_collections.community.general.plugins.modules.simpleinit_msb import SimpleinitMSB, build_module @@ -91,110 +91,112 @@ class TestSimpleinitMSB(ModuleTestCase): def tearDown(self): super(TestSimpleinitMSB, self).tearDown() - def init_module(self, args): - set_module_args(args) - - return SimpleinitMSB(build_module()) - @patch('os.path.exists', return_value=True) @patch('ansible.module_utils.basic.AnsibleModule.get_bin_path', return_value="/sbin/telinit") def test_get_service_tools(self, *args, **kwargs): - simpleinit_msb = self.init_module({ + with set_module_args({ 'name': 'smgl-suspend-single', 'state': 'running', - }) + }): + simpleinit_msb = SimpleinitMSB(build_module()) - simpleinit_msb.get_service_tools() + simpleinit_msb.get_service_tools() - self.assertEqual(simpleinit_msb.telinit_cmd, "/sbin/telinit") + self.assertEqual(simpleinit_msb.telinit_cmd, "/sbin/telinit") @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command') def test_service_exists(self, execute_command): - simpleinit_msb = self.init_module({ + with set_module_args({ 'name': 'smgl-suspend-single', 'state': 'running', - }) + }): + simpleinit_msb = SimpleinitMSB(build_module()) - execute_command.return_value = (0, _TELINIT_LIST, "") + execute_command.return_value = (0, _TELINIT_LIST, "") - simpleinit_msb.service_exists() + simpleinit_msb.service_exists() @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command') def test_service_exists_not(self, execute_command): - simpleinit_msb = self.init_module({ + with set_module_args({ 'name': 'ntp', 'state': 'running', - }) + }): + simpleinit_msb = SimpleinitMSB(build_module()) - execute_command.return_value = (0, _TELINIT_LIST, "") + execute_command.return_value = (0, _TELINIT_LIST, "") - with self.assertRaises(AnsibleFailJson) as context: - simpleinit_msb.service_exists() + with self.assertRaises(AnsibleFailJson) as context: + simpleinit_msb.service_exists() - self.assertEqual("telinit could not find the requested service: ntp", context.exception.args[0]["msg"]) + self.assertEqual("telinit could not find the requested service: ntp", context.exception.args[0]["msg"]) @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_exists') @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command') def test_check_service_enabled(self, execute_command, service_exists): - simpleinit_msb = self.init_module({ + with set_module_args({ 'name': 'nscd', 'state': 'running', 'enabled': 'true', - }) + }): + simpleinit_msb = SimpleinitMSB(build_module()) - service_exists.return_value = True - execute_command.return_value = (0, _TELINIT_LIST_ENABLED, "") + service_exists.return_value = True + execute_command.return_value = (0, _TELINIT_LIST_ENABLED, "") - self.assertTrue(simpleinit_msb.service_enabled()) + self.assertTrue(simpleinit_msb.service_enabled()) - # Race condition check - with patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_enabled', return_value=False): - execute_command.return_value = (0, "", _TELINIT_ALREADY_ENABLED) + # Race condition check + with patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_enabled', return_value=False): + execute_command.return_value = (0, "", _TELINIT_ALREADY_ENABLED) - simpleinit_msb.service_enable() + simpleinit_msb.service_enable() - self.assertFalse(simpleinit_msb.changed) + self.assertFalse(simpleinit_msb.changed) @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_exists') @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command') def test_check_service_disabled(self, execute_command, service_exists): - simpleinit_msb = self.init_module({ + with set_module_args({ 'name': 'sysstat', 'state': 'stopped', 'enabled': 'false', - }) + }): + simpleinit_msb = SimpleinitMSB(build_module()) - service_exists.return_value = True - execute_command.return_value = (0, _TELINIT_LIST_DISABLED, "") + service_exists.return_value = True + execute_command.return_value = (0, _TELINIT_LIST_DISABLED, "") - self.assertFalse(simpleinit_msb.service_enabled()) + self.assertFalse(simpleinit_msb.service_enabled()) - # Race condition check - with patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_enabled', return_value=True): - execute_command.return_value = (0, "", _TELINIT_ALREADY_DISABLED) + # Race condition check + with patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_enabled', return_value=True): + execute_command.return_value = (0, "", _TELINIT_ALREADY_DISABLED) - simpleinit_msb.service_enable() + simpleinit_msb.service_enable() - self.assertFalse(simpleinit_msb.changed) + self.assertFalse(simpleinit_msb.changed) @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_control') def test_check_service_running(self, service_control): - simpleinit_msb = self.init_module({ + with set_module_args({ 'name': 'sshd', 'state': 'running', - }) + }): + simpleinit_msb = SimpleinitMSB(build_module()) - service_control.return_value = (0, _TELINIT_STATUS_RUNNING, "") + service_control.return_value = (0, _TELINIT_STATUS_RUNNING, "") - self.assertFalse(simpleinit_msb.get_service_status()) + self.assertFalse(simpleinit_msb.get_service_status()) @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_control') def test_check_service_running_not(self, service_control): - simpleinit_msb = self.init_module({ + with set_module_args({ 'name': 'smgl-metalog', 'state': 'running', - }) + }): + simpleinit_msb = SimpleinitMSB(build_module()) - service_control.return_value = (0, _TELINIT_STATUS_RUNNING_NOT, "") + service_control.return_value = (0, _TELINIT_STATUS_RUNNING_NOT, "") - self.assertFalse(simpleinit_msb.get_service_status()) + self.assertFalse(simpleinit_msb.get_service_status()) diff --git a/tests/unit/plugins/modules/test_slack.py b/tests/unit/plugins/modules/test_slack.py index 52ac9b7f37..e0c87f907f 100644 --- a/tests/unit/plugins/modules/test_slack.py +++ b/tests/unit/plugins/modules/test_slack.py @@ -7,9 +7,9 @@ __metaclass__ = type import json import pytest -from ansible_collections.community.general.tests.unit.compat.mock import Mock, patch +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import Mock, patch from ansible_collections.community.general.plugins.modules import slack -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args class TestSlackModule(ModuleTestCase): @@ -28,110 +28,121 @@ class TestSlackModule(ModuleTestCase): def test_without_required_parameters(self): """Failure must occurs when all parameters are missing""" with self.assertRaises(AnsibleFailJson): - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() def test_invalid_old_token(self): """Failure if there is an old style token""" - set_module_args({ + with set_module_args({ 'token': 'test', - }) - with self.assertRaises(AnsibleFailJson): - self.module.main() + }): + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_successful_message(self): """tests sending a message. This is example 1 from the docs""" - set_module_args({ + with set_module_args({ 'token': 'XXXX/YYYY/ZZZZ', 'msg': 'test' - }) + }): + with patch.object(slack, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 200}) + with self.assertRaises(AnsibleExitJson): + self.module.main() - with patch.object(slack, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 200}) - with self.assertRaises(AnsibleExitJson): - self.module.main() - - self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['username'] == "Ansible" - assert call_data['text'] == "test" - assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" + self.assertTrue(fetch_url_mock.call_count, 1) + call_data = json.loads(fetch_url_mock.call_args[1]['data']) + assert call_data['username'] == "Ansible" + assert call_data['text'] == "test" + assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" def test_failed_message(self): """tests failing to send a message""" - set_module_args({ + with set_module_args({ 'token': 'XXXX/YYYY/ZZZZ', 'msg': 'test' - }) - - with patch.object(slack, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'test'}) - with self.assertRaises(AnsibleFailJson): - self.module.main() + }): + with patch.object(slack, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'test'}) + with self.assertRaises(AnsibleFailJson): + self.module.main() def test_message_with_thread(self): """tests sending a message with a thread""" - set_module_args({ + with set_module_args({ 'token': 'XXXX/YYYY/ZZZZ', 'msg': 'test', 'thread_id': '100.00' - }) + }): + with patch.object(slack, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 200}) + with self.assertRaises(AnsibleExitJson): + self.module.main() - with patch.object(slack, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 200}) - with self.assertRaises(AnsibleExitJson): - self.module.main() - - self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['username'] == "Ansible" - assert call_data['text'] == "test" - assert call_data['thread_ts'] == '100.00' - assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" + self.assertTrue(fetch_url_mock.call_count, 1) + call_data = json.loads(fetch_url_mock.call_args[1]['data']) + assert call_data['username'] == "Ansible" + assert call_data['text'] == "test" + assert call_data['thread_ts'] == '100.00' + assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" # https://github.com/ansible-collections/community.general/issues/1097 def test_ts_in_message_does_not_cause_edit(self): - set_module_args({ + with set_module_args({ 'token': 'xoxa-123456789abcdef', 'msg': 'test with ts' - }) + }): + with patch.object(slack, "fetch_url") as fetch_url_mock: + mock_response = Mock() + mock_response.read.return_value = '{"fake":"data"}' + fetch_url_mock.return_value = (mock_response, {"status": 200}) + with self.assertRaises(AnsibleExitJson): + self.module.main() - with patch.object(slack, "fetch_url") as fetch_url_mock: - mock_response = Mock() - mock_response.read.return_value = '{"fake":"data"}' - fetch_url_mock.return_value = (mock_response, {"status": 200}) - with self.assertRaises(AnsibleExitJson): - self.module.main() + self.assertTrue(fetch_url_mock.call_count, 1) + self.assertEqual(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.postMessage") - self.assertTrue(fetch_url_mock.call_count, 1) - self.assertEqual(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.postMessage") + def test_govslack_message(self): + with set_module_args({ + 'token': 'xoxa-123456789abcdef', + 'domain': 'slack-gov.com', + 'msg': 'test with ts' + }): + with patch.object(slack, "fetch_url") as fetch_url_mock: + mock_response = Mock() + mock_response.read.return_value = '{"fake":"data"}' + fetch_url_mock.return_value = (mock_response, {"status": 200}) + with self.assertRaises(AnsibleExitJson): + self.module.main() + + self.assertTrue(fetch_url_mock.call_count, 1) + self.assertEqual(fetch_url_mock.call_args[1]['url'], "https://slack-gov.com/api/chat.postMessage") def test_edit_message(self): - set_module_args({ + with set_module_args({ 'token': 'xoxa-123456789abcdef', 'msg': 'test2', 'message_id': '12345' - }) + }): + with patch.object(slack, "fetch_url") as fetch_url_mock: + mock_response = Mock() + mock_response.read.return_value = '{"messages":[{"ts":"12345","msg":"test1"}]}' + fetch_url_mock.side_effect = [ + (mock_response, {"status": 200}), + (mock_response, {"status": 200}), + ] + with self.assertRaises(AnsibleExitJson): + self.module.main() - with patch.object(slack, "fetch_url") as fetch_url_mock: - mock_response = Mock() - mock_response.read.return_value = '{"messages":[{"ts":"12345","msg":"test1"}]}' - fetch_url_mock.side_effect = [ - (mock_response, {"status": 200}), - (mock_response, {"status": 200}), - ] - with self.assertRaises(AnsibleExitJson): - self.module.main() - - self.assertTrue(fetch_url_mock.call_count, 2) - self.assertEqual(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.update") - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - self.assertEqual(call_data['ts'], "12345") + self.assertTrue(fetch_url_mock.call_count, 2) + self.assertEqual(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.update") + call_data = json.loads(fetch_url_mock.call_args[1]['data']) + self.assertEqual(call_data['ts'], "12345") def test_message_with_blocks(self): """tests sending a message with blocks""" - set_module_args({ + with set_module_args({ 'token': 'XXXX/YYYY/ZZZZ', 'msg': 'test', 'blocks': [{ @@ -153,28 +164,27 @@ class TestSlackModule(ModuleTestCase): 'emoji': True } }] - }) + }): + with patch.object(slack, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 200}) + with self.assertRaises(AnsibleExitJson): + self.module.main() - with patch.object(slack, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 200}) - with self.assertRaises(AnsibleExitJson): - self.module.main() - - self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['username'] == "Ansible" - assert call_data['blocks'][1]['text']['text'] == "test" - assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" + self.assertTrue(fetch_url_mock.call_count, 1) + call_data = json.loads(fetch_url_mock.call_args[1]['data']) + assert call_data['username'] == "Ansible" + assert call_data['blocks'][1]['text']['text'] == "test" + assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" def test_message_with_invalid_color(self): """tests sending invalid color value to module""" - set_module_args({ + with set_module_args({ 'token': 'XXXX/YYYY/ZZZZ', 'msg': 'test', 'color': 'aa', - }) - with self.assertRaises(AnsibleFailJson) as exec_info: - self.module.main() + }): + with self.assertRaises(AnsibleFailJson) as exec_info: + self.module.main() msg = "Color value specified should be either one of" \ " ['normal', 'good', 'warning', 'danger'] or any valid" \ diff --git a/tests/unit/plugins/modules/test_snap.py b/tests/unit/plugins/modules/test_snap.py index de7f35353a..e1897be5f2 100644 --- a/tests/unit/plugins/modules/test_snap.py +++ b/tests/unit/plugins/modules/test_snap.py @@ -9,7 +9,7 @@ __metaclass__ = type import sys from ansible_collections.community.general.plugins.modules import snap -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock issue_6803_status_out = """Name Version Rev Tracking Publisher Notes @@ -376,103 +376,129 @@ issue_6803_kubectl_out = ( "\r\u001b[0m\u001b[?25h\u001b[Kkubectl (1.27/stable) v1.27.2 from Canonical** installed\n" ) -TEST_CASES = [ - dict( - id="simple case", - input={"name": ["hello-world"]}, - output=dict(changed=True, snaps_installed=["hello-world"]), - flags={}, - mocks=dict( - run_command=[ - dict( - command=['/testbin/snap', 'info', 'hello-world'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out='name: hello-world\n', - err="", - ), - dict( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out="", - err="", - ), - dict( - command=['/testbin/snap', 'install', 'hello-world'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out="hello-world (12345/stable) v12345 from Canonical** installed\n", - err="", - ), - dict( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=( - "Name Version Rev Tracking Publisher Notes" - "core20 20220826 1623 latest/stable canonical** base" - "lxd 5.6-794016a 23680 latest/stable/… canonical** -" - "hello-world 5.6-794016a 23680 latest/stable/… canonical** -" - "snapd 2.57.4 17336 latest/stable canonical** snapd" - ""), - err="", - ), - ], - ), - ), - dict( - id="issue_6803", - input={"name": ["microk8s", "kubectl"], "classic": True}, - output=dict(changed=True, snaps_installed=["microk8s", "kubectl"]), - flags={}, - mocks=dict( - run_command=[ - dict( - command=['/testbin/snap', 'info', 'microk8s', 'kubectl'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out='name: microk8s\n---\nname: kubectl\n', - err="", - ), - dict( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=issue_6803_status_out, - err="", - ), - dict( - command=['/testbin/snap', 'install', '--classic', 'microk8s'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=issue_6803_microk8s_out, - err="", - ), - dict( - command=['/testbin/snap', 'install', '--classic', 'kubectl'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=issue_6803_kubectl_out, - err="", - ), - dict( - command=['/testbin/snap', 'list'], - environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, - rc=0, - out=( - "Name Version Rev Tracking Publisher Notes" - "core20 20220826 1623 latest/stable canonical** base" - "lxd 5.6-794016a 23680 latest/stable/… canonical** -" - "microk8s 5.6-794016a 23680 latest/stable/… canonical** -" - "kubectl 5.6-794016a 23680 latest/stable/… canonical** -" - "snapd 2.57.4 17336 latest/stable canonical** snapd" - ""), - err="", - ), - ], - ), - ), -] -Helper.from_spec(sys.modules[__name__], snap, TEST_CASES) +default_env = {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False} +default_version_out = """\ +snap 2.66.1+24.04 +snapd 2.66.1+24.04 +series 16 +ubuntu 24.04 +kernel 6.8.0-49-generic +""" + +TEST_SPEC = dict( + test_cases=[ + dict( + id="simple case", + input={"name": ["hello-world"]}, + output=dict(changed=True, snaps_installed=["hello-world"]), + flags={}, + mocks=dict( + run_command=[ + dict( + command=['/testbin/snap', 'version'], + environ=default_env, + rc=0, + out=default_version_out, + err="", + ), + dict( + command=['/testbin/snap', 'info', 'hello-world'], + environ=default_env, + rc=0, + out='name: hello-world\n', + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ=default_env, + rc=0, + out="", + err="", + ), + dict( + command=['/testbin/snap', 'install', 'hello-world'], + environ=default_env, + rc=0, + out="hello-world (12345/stable) v12345 from Canonical** installed\n", + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ=default_env, + rc=0, + out=( + "Name Version Rev Tracking Publisher Notes" + "core20 20220826 1623 latest/stable canonical** base" + "lxd 5.6-794016a 23680 latest/stable/… canonical** -" + "hello-world 5.6-794016a 23680 latest/stable/… canonical** -" + "snapd 2.57.4 17336 latest/stable canonical** snapd" + ""), + err="", + ), + ], + ), + ), + dict( + id="issue_6803", + input={"name": ["microk8s", "kubectl"], "classic": True}, + output=dict(changed=True, snaps_installed=["microk8s", "kubectl"]), + flags={}, + mocks=dict( + run_command=[ + dict( + command=['/testbin/snap', 'version'], + environ=default_env, + rc=0, + out=default_version_out, + err="", + ), + dict( + command=['/testbin/snap', 'info', 'microk8s', 'kubectl'], + environ=default_env, + rc=0, + out='name: microk8s\n---\nname: kubectl\n', + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ=default_env, + rc=0, + out=issue_6803_status_out, + err="", + ), + dict( + command=['/testbin/snap', 'install', '--classic', 'microk8s'], + environ=default_env, + rc=0, + out=issue_6803_microk8s_out, + err="", + ), + dict( + command=['/testbin/snap', 'install', '--classic', 'kubectl'], + environ=default_env, + rc=0, + out=issue_6803_kubectl_out, + err="", + ), + dict( + command=['/testbin/snap', 'list'], + environ=default_env, + rc=0, + out=( + "Name Version Rev Tracking Publisher Notes" + "core20 20220826 1623 latest/stable canonical** base" + "lxd 5.6-794016a 23680 latest/stable/… canonical** -" + "microk8s 5.6-794016a 23680 latest/stable/… canonical** -" + "kubectl 5.6-794016a 23680 latest/stable/… canonical** -" + "snapd 2.57.4 17336 latest/stable canonical** snapd" + ""), + err="", + ), + ], + ), + ), + ] +) + +UTHelper.from_spec(snap, sys.modules[__name__], TEST_SPEC, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_solaris_zone.py b/tests/unit/plugins/modules/test_solaris_zone.py index 20b550875c..a797d4c9c1 100644 --- a/tests/unit/plugins/modules/test_solaris_zone.py +++ b/tests/unit/plugins/modules/test_solaris_zone.py @@ -13,7 +13,7 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.modules import ( solaris_zone ) -from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( set_module_args, ) @@ -54,16 +54,16 @@ def test_zone_create(mocked_zone_create, capfd): """ test zone creation """ - set_module_args( + with set_module_args( { "name": "z1", "state": "installed", "path": "/zones/z1", "_ansible_check_mode": False, } - ) - with pytest.raises(SystemExit): - solaris_zone.main() + ): + with pytest.raises(SystemExit): + solaris_zone.main() out, err = capfd.readouterr() results = json.loads(out) @@ -75,16 +75,16 @@ def test_zone_delete(mocked_zone_delete, capfd): """ test zone deletion """ - set_module_args( + with set_module_args( { "name": "z1", "state": "absent", "path": "/zones/z1", "_ansible_check_mode": False, } - ) - with pytest.raises(SystemExit): - solaris_zone.main() + ): + with pytest.raises(SystemExit): + solaris_zone.main() out, err = capfd.readouterr() results = json.loads(out) @@ -100,16 +100,16 @@ def test_zone_create_invalid_names(mocked_zone_create, capfd): # 2. Zone name > 64 characters. # 3. Zone name beginning with non-alphanumeric character. for invalid_name in ('foo!bar', 'z' * 65, '_zone'): - set_module_args( + with set_module_args( { "name": invalid_name, "state": "installed", "path": "/zones/" + invalid_name, "_ansible_check_mode": False, } - ) - with pytest.raises(SystemExit): - solaris_zone.main() + ): + with pytest.raises(SystemExit): + solaris_zone.main() out, err = capfd.readouterr() results = json.loads(out) diff --git a/tests/unit/plugins/modules/test_statsd.py b/tests/unit/plugins/modules/test_statsd.py index 7d458c5eb4..e3a152a940 100644 --- a/tests/unit/plugins/modules/test_statsd.py +++ b/tests/unit/plugins/modules/test_statsd.py @@ -6,8 +6,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible_collections.community.general.plugins.modules import statsd -from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, MagicMock +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args class FakeStatsD(MagicMock): @@ -42,36 +42,36 @@ class TestStatsDModule(ModuleTestCase): """Test udp without parameters""" with self.patch_udp_statsd_client(side_effect=FakeStatsD) as fake_statsd: with self.assertRaises(AnsibleFailJson) as result: - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() def test_tcp_without_parameters(self): """Test tcp without parameters""" with self.patch_tcp_statsd_client(side_effect=FakeStatsD) as fake_statsd: with self.assertRaises(AnsibleFailJson) as result: - set_module_args({}) - self.module.main() + with set_module_args({}): + self.module.main() def test_udp_with_parameters(self): """Test udp with parameters""" with self.patch_udp_statsd_client(side_effect=FakeStatsD) as fake_statsd: with self.assertRaises(AnsibleExitJson) as result: - set_module_args({ + with set_module_args({ 'metric': 'my_counter', 'metric_type': 'counter', 'value': 1, - }) - self.module.main() + }): + self.module.main() self.assertEqual(result.exception.args[0]['msg'], 'Sent counter my_counter -> 1 to StatsD') self.assertEqual(result.exception.args[0]['changed'], True) with self.patch_udp_statsd_client(side_effect=FakeStatsD) as fake_statsd: with self.assertRaises(AnsibleExitJson) as result: - set_module_args({ + with set_module_args({ 'metric': 'my_gauge', 'metric_type': 'gauge', 'value': 3, - }) - self.module.main() + }): + self.module.main() self.assertEqual(result.exception.args[0]['msg'], 'Sent gauge my_gauge -> 3 (delta=False) to StatsD') self.assertEqual(result.exception.args[0]['changed'], True) @@ -79,23 +79,23 @@ class TestStatsDModule(ModuleTestCase): """Test tcp with parameters""" with self.patch_tcp_statsd_client(side_effect=FakeStatsD) as fake_statsd: with self.assertRaises(AnsibleExitJson) as result: - set_module_args({ + with set_module_args({ 'protocol': 'tcp', 'metric': 'my_counter', 'metric_type': 'counter', 'value': 1, - }) - self.module.main() + }): + self.module.main() self.assertEqual(result.exception.args[0]['msg'], 'Sent counter my_counter -> 1 to StatsD') self.assertEqual(result.exception.args[0]['changed'], True) with self.patch_tcp_statsd_client(side_effect=FakeStatsD) as fake_statsd: with self.assertRaises(AnsibleExitJson) as result: - set_module_args({ + with set_module_args({ 'protocol': 'tcp', 'metric': 'my_gauge', 'metric_type': 'gauge', 'value': 3, - }) - self.module.main() + }): + self.module.main() self.assertEqual(result.exception.args[0]['msg'], 'Sent gauge my_gauge -> 3 (delta=False) to StatsD') self.assertEqual(result.exception.args[0]['changed'], True) diff --git a/tests/unit/plugins/modules/test_sysupgrade.py b/tests/unit/plugins/modules/test_sysupgrade.py index 77d1f1cd06..6e1d1f4549 100644 --- a/tests/unit/plugins/modules/test_sysupgrade.py +++ b/tests/unit/plugins/modules/test_sysupgrade.py @@ -6,8 +6,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.module_utils import basic -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, AnsibleExitJson, AnsibleFailJson, ModuleTestCase from ansible_collections.community.general.plugins.modules import sysupgrade @@ -48,11 +48,12 @@ class TestSysupgradeModule(ModuleTestCase): """ stderr = "" - with patch.object(basic.AnsibleModule, "run_command") as run_command: - run_command.return_value = (rc, stdout, stderr) - with self.assertRaises(AnsibleExitJson) as result: - self.module.main() - self.assertTrue(result.exception.args[0]['changed']) + with set_module_args({}): + with patch.object(basic.AnsibleModule, "run_command") as run_command: + run_command.return_value = (rc, stdout, stderr) + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() + self.assertTrue(result.exception.args[0]['changed']) def test_upgrade_failed(self): """ Upgrade failed """ @@ -61,9 +62,10 @@ class TestSysupgradeModule(ModuleTestCase): stdout = "" stderr = "sysupgrade: need root privileges" - with patch.object(basic.AnsibleModule, "run_command") as run_command_mock: - run_command_mock.return_value = (rc, stdout, stderr) - with self.assertRaises(AnsibleFailJson) as result: - self.module.main() - self.assertTrue(result.exception.args[0]['failed']) - self.assertIn('need root', result.exception.args[0]['msg']) + with set_module_args({}): + with patch.object(basic.AnsibleModule, "run_command") as run_command_mock: + run_command_mock.return_value = (rc, stdout, stderr) + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() + self.assertTrue(result.exception.args[0]['failed']) + self.assertIn('need root', result.exception.args[0]['msg']) diff --git a/tests/unit/plugins/modules/test_terraform.py b/tests/unit/plugins/modules/test_terraform.py index f6a0593fd3..09fd60dd9c 100644 --- a/tests/unit/plugins/modules/test_terraform.py +++ b/tests/unit/plugins/modules/test_terraform.py @@ -9,13 +9,13 @@ import json import pytest from ansible_collections.community.general.plugins.modules import terraform -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args def test_terraform_without_argument(capfd): - set_module_args({}) - with pytest.raises(SystemExit) as results: - terraform.main() + with set_module_args({}): + with pytest.raises(SystemExit) as results: + terraform.main() out, err = capfd.readouterr() assert not err diff --git a/tests/unit/plugins/modules/test_ufw.py b/tests/unit/plugins/modules/test_ufw.py index da8f0f2c80..982cbb0612 100644 --- a/tests/unit/plugins/modules/test_ufw.py +++ b/tests/unit/plugins/modules/test_ufw.py @@ -4,13 +4,17 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible.module_utils import basic -from ansible.module_utils.common.text.converters import to_bytes import ansible_collections.community.general.plugins.modules.ufw as module - -import json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + set_module_args, + exit_json, + fail_json, +) # mock ufw messages @@ -104,35 +108,6 @@ def do_nothing_func_port_7000(*args, **kwarg): return 0, dry_mode_cmd_with_port_700[args[0]], "" -def set_module_args(args): - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) - """prepare arguments so that they will be picked up during module creation""" - basic._ANSIBLE_ARGS = to_bytes(args) - - -class AnsibleExitJson(Exception): - """Exception class to be raised by module.exit_json and caught by the test case""" - pass - - -class AnsibleFailJson(Exception): - """Exception class to be raised by module.fail_json and caught by the test case""" - pass - - -def exit_json(*args, **kwargs): - """function to patch over exit_json; package return data into an exception""" - if 'changed' not in kwargs: - kwargs['changed'] = False - raise AnsibleExitJson(kwargs) - - -def fail_json(*args, **kwargs): - """function to patch over fail_json; package return data into an exception""" - kwargs['failed'] = True - raise AnsibleFailJson(kwargs) - - def get_bin_path(self, arg, required=False): """Mock AnsibleModule.get_bin_path""" return arg @@ -171,28 +146,28 @@ class TestUFW(unittest.TestCase): self.assertTrue(reg.match("::") is not None) def test_check_mode_add_rules(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'tcp', 'port': '7000', '_ansible_check_mode': True - }) - result = self.__getResult(do_nothing_func_port_7000) + }): + result = self.__getResult(do_nothing_func_port_7000) self.assertFalse(result.exception.args[0]['changed']) def test_check_mode_add_insert_rules(self): - set_module_args({ + with set_module_args({ 'insert': '1', 'rule': 'allow', 'proto': 'tcp', 'port': '7000', '_ansible_check_mode': True - }) - result = self.__getResult(do_nothing_func_port_7000) + }): + result = self.__getResult(do_nothing_func_port_7000) self.assertFalse(result.exception.args[0]['changed']) def test_check_mode_add_detailed_route(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'route': 'yes', 'interface_in': 'foo', @@ -203,13 +178,12 @@ class TestUFW(unittest.TestCase): 'from_port': '7000', 'to_port': '7001', '_ansible_check_mode': True - }) - - result = self.__getResult(do_nothing_func_port_7000) + }): + result = self.__getResult(do_nothing_func_port_7000) self.assertTrue(result.exception.args[0]['changed']) def test_check_mode_add_ambiguous_route(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'route': 'yes', 'interface_in': 'foo', @@ -217,68 +191,66 @@ class TestUFW(unittest.TestCase): 'direction': 'in', 'interface': 'baz', '_ansible_check_mode': True - }) - - with self.assertRaises(AnsibleFailJson) as result: - self.__getResult(do_nothing_func_port_7000) + }): + with self.assertRaises(AnsibleFailJson) as result: + self.__getResult(do_nothing_func_port_7000) exc = result.exception.args[0] self.assertTrue(exc['failed']) self.assertIn('mutually exclusive', exc['msg']) def test_check_mode_add_interface_in(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'tcp', 'port': '7003', 'interface_in': 'foo', '_ansible_check_mode': True - }) - result = self.__getResult(do_nothing_func_port_7000) + }): + result = self.__getResult(do_nothing_func_port_7000) self.assertTrue(result.exception.args[0]['changed']) def test_check_mode_add_interface_out(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'tcp', 'port': '7004', 'interface_out': 'foo', '_ansible_check_mode': True - }) - result = self.__getResult(do_nothing_func_port_7000) + }): + result = self.__getResult(do_nothing_func_port_7000) self.assertTrue(result.exception.args[0]['changed']) def test_check_mode_add_non_route_interface_both(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'tcp', 'port': '7004', 'interface_in': 'foo', 'interface_out': 'bar', '_ansible_check_mode': True - }) - - with self.assertRaises(AnsibleFailJson) as result: - self.__getResult(do_nothing_func_port_7000) + }): + with self.assertRaises(AnsibleFailJson) as result: + self.__getResult(do_nothing_func_port_7000) exc = result.exception.args[0] self.assertTrue(exc['failed']) self.assertIn('combine', exc['msg']) def test_check_mode_add_direction_in(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'tcp', 'port': '7003', 'direction': 'in', 'interface': 'foo', '_ansible_check_mode': True - }) - result = self.__getResult(do_nothing_func_port_7000) + }): + result = self.__getResult(do_nothing_func_port_7000) self.assertTrue(result.exception.args[0]['changed']) def test_check_mode_add_direction_in_with_ip(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'tcp', 'from_ip': '1.1.1.1', @@ -288,24 +260,24 @@ class TestUFW(unittest.TestCase): 'direction': 'in', 'interface': 'foo', '_ansible_check_mode': True - }) - result = self.__getResult(do_nothing_func_port_7000) + }): + result = self.__getResult(do_nothing_func_port_7000) self.assertTrue(result.exception.args[0]['changed']) def test_check_mode_add_direction_out(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'tcp', 'port': '7004', 'direction': 'out', 'interface': 'foo', '_ansible_check_mode': True - }) - result = self.__getResult(do_nothing_func_port_7000) + }): + result = self.__getResult(do_nothing_func_port_7000) self.assertTrue(result.exception.args[0]['changed']) def test_check_mode_add_direction_out_with_ip(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'tcp', 'from_ip': '1.1.1.1', @@ -315,157 +287,149 @@ class TestUFW(unittest.TestCase): 'direction': 'out', 'interface': 'foo', '_ansible_check_mode': True - }) - result = self.__getResult(do_nothing_func_port_7000) + }): + result = self.__getResult(do_nothing_func_port_7000) self.assertTrue(result.exception.args[0]['changed']) def test_check_mode_delete_existing_rules(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'tcp', 'port': '7000', 'delete': 'yes', '_ansible_check_mode': True, - }) - - self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + }): + self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) def test_check_mode_delete_existing_insert_rules(self): - set_module_args({ + with set_module_args({ 'insert': '1', 'rule': 'allow', 'proto': 'tcp', 'port': '7000', 'delete': 'yes', '_ansible_check_mode': True, - }) - - self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + }): + self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) def test_check_mode_delete_not_existing_rules(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'tcp', 'port': '7001', 'delete': 'yes', '_ansible_check_mode': True, - }) - - self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + }): + self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) def test_check_mode_delete_not_existing_insert_rules(self): - set_module_args({ + with set_module_args({ 'insert': '1', 'rule': 'allow', 'proto': 'tcp', 'port': '7001', 'delete': 'yes', '_ansible_check_mode': True, - }) - - self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + }): + self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) def test_enable_mode(self): - set_module_args({ + with set_module_args({ 'state': 'enabled', '_ansible_check_mode': True - }) - - self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + }): + self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) def test_disable_mode(self): - set_module_args({ + with set_module_args({ 'state': 'disabled', '_ansible_check_mode': True - }) - - self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + }): + self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) def test_logging_off(self): - set_module_args({ + with set_module_args({ 'logging': 'off', '_ansible_check_mode': True - }) - - self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + }): + self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) def test_logging_on(self): - set_module_args({ + with set_module_args({ 'logging': 'on', '_ansible_check_mode': True - }) - - self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + }): + self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) def test_default_changed(self): - set_module_args({ + with set_module_args({ 'default': 'allow', "direction": "incoming", '_ansible_check_mode': True - }) - self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + }): + self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) def test_default_not_changed(self): - set_module_args({ + with set_module_args({ 'default': 'deny', "direction": "incoming", '_ansible_check_mode': True - }) - self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + }): + self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) def test_ipv6_remove(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'udp', 'port': '5353', 'from': 'ff02::fb', 'delete': 'yes', '_ansible_check_mode': True, - }) - self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) + }): + self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) def test_ipv6_add_existing(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'udp', 'port': '5353', 'from': 'ff02::fb', '_ansible_check_mode': True, - }) - self.assertFalse(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) + }): + self.assertFalse(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) def test_add_not_existing_ipv4_submask(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'udp', 'port': '1577', 'from': '10.0.0.0/24', '_ansible_check_mode': True, - }) - self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) + }): + self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) def test_ipv4_add_with_existing_ipv6(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'proto': 'udp', 'port': '5353', 'from': '224.0.0.252', '_ansible_check_mode': True, - }) - self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) + }): + self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) def test_ipv6_add_from_nothing(self): - set_module_args({ + with set_module_args({ 'rule': 'allow', 'port': '23', 'to': '::', '_ansible_check_mode': True, - }) - result = self.__getResult(do_nothing_func_nothing).exception.args[0] + }): + result = self.__getResult(do_nothing_func_nothing).exception.args[0] print(result) self.assertTrue(result['changed']) diff --git a/tests/unit/plugins/modules/test_usb_facts.py b/tests/unit/plugins/modules/test_usb_facts.py index 084433492f..9a5434b0a1 100644 --- a/tests/unit/plugins/modules/test_usb_facts.py +++ b/tests/unit/plugins/modules/test_usb_facts.py @@ -5,42 +5,11 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import json - -from ansible_collections.community.general.tests.unit.compat import mock -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat import mock +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible.module_utils import basic -from ansible.module_utils.common.text.converters import to_bytes from ansible_collections.community.general.plugins.modules import usb_facts - - -def set_module_args(args): - """prepare arguments so that they will be picked up during module creation""" - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) - basic._ANSIBLE_ARGS = to_bytes(args) - - -class AnsibleExitJson(Exception): - """Exception class to be raised by module.exit_json and caught by the test case""" - pass - - -class AnsibleFailJson(Exception): - """Exception class to be raised by module.fail_json and caught by the test case""" - pass - - -def exit_json(*args, **kwargs): - """function to patch over exit_json; package return data into an exception""" - if 'changed' not in kwargs: - kwargs['changed'] = False - raise AnsibleExitJson(kwargs) - - -def fail_json(*args, **kwargs): - """function to patch over fail_json; package return data into an exception""" - kwargs['failed'] = True - raise AnsibleFailJson(kwargs) +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, set_module_args, exit_json, fail_json def get_bin_path(self, arg, required=False): @@ -85,8 +54,8 @@ class TestUsbFacts(unittest.TestCase): command_output = data["input"] mock_run_command.return_value = 0, command_output, None with self.assertRaises(AnsibleExitJson) as result: - set_module_args({}) - usb_facts.main() + with set_module_args({}): + usb_facts.main() for output_field in self.output_fields: self.assertEqual(result.exception.args[0]["ansible_facts"]["usb_devices"][0][output_field], data[output_field]) @@ -97,8 +66,8 @@ class TestUsbFacts(unittest.TestCase): with mock.patch.object(basic.AnsibleModule, 'run_command') as mock_run_command: mock_run_command.return_value = 0, input, None with self.assertRaises(AnsibleExitJson) as result: - set_module_args({}) - usb_facts.main() + with set_module_args({}): + usb_facts.main() for index in range(0, len(self.testing_data)): for output_field in self.output_fields: self.assertEqual(result.exception.args[0]["ansible_facts"]["usb_devices"][index][output_field], diff --git a/tests/unit/plugins/modules/test_wdc_redfish_command.py b/tests/unit/plugins/modules/test_wdc_redfish_command.py index 0775ac73dd..464ba6fa8d 100644 --- a/tests/unit/plugins/modules/test_wdc_redfish_command.py +++ b/tests/unit/plugins/modules/test_wdc_redfish_command.py @@ -12,12 +12,12 @@ import tarfile import tempfile import os -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible.module_utils import basic import ansible_collections.community.general.plugins.modules.wdc_redfish_command as module -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE = { "ret": True, @@ -309,30 +309,30 @@ class TestWdcRedfishCommand(unittest.TestCase): def test_module_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson): - set_module_args({}) - module.main() + with set_module_args({}): + module.main() def test_module_fail_when_unknown_category(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'category': 'unknown', 'command': 'FWActivate', 'username': 'USERID', 'password': 'PASSW0RD=21', 'ioms': [], - }) - module.main() + }): + module.main() def test_module_fail_when_unknown_command(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'unknown', 'username': 'USERID', 'password': 'PASSW0RD=21', 'ioms': [], - }) - module.main() + }): + module.main() def test_module_chassis_power_mode_low(self): """Test setting chassis power mode to low (happy path).""" @@ -344,15 +344,15 @@ class TestWdcRedfishCommand(unittest.TestCase): 'resource_id': 'Enclosure', 'baseuri': 'example.com' } - set_module_args(module_args) - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request, - post_request=mock_post_request): - with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(ansible_exit_json)) - self.assertTrue(is_changed(ansible_exit_json)) + with set_module_args(module_args): + with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request, + post_request=mock_post_request): + with self.assertRaises(AnsibleExitJson) as ansible_exit_json: + module.main() + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, + get_exception_message(ansible_exit_json)) + self.assertTrue(is_changed(ansible_exit_json)) def test_module_chassis_power_mode_normal_when_already_normal(self): """Test setting chassis power mode to normal when it already is. Verify we get changed=False.""" @@ -364,14 +364,14 @@ class TestWdcRedfishCommand(unittest.TestCase): 'resource_id': 'Enclosure', 'baseuri': 'example.com' } - set_module_args(module_args) - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request): - with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(ansible_exit_json)) - self.assertFalse(is_changed(ansible_exit_json)) + with set_module_args(module_args): + with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request): + with self.assertRaises(AnsibleExitJson) as ansible_exit_json: + module.main() + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, + get_exception_message(ansible_exit_json)) + self.assertFalse(is_changed(ansible_exit_json)) def test_module_chassis_power_mode_invalid_command(self): """Test that we get an error when issuing an invalid PowerMode command.""" @@ -383,14 +383,14 @@ class TestWdcRedfishCommand(unittest.TestCase): 'resource_id': 'Enclosure', 'baseuri': 'example.com' } - set_module_args(module_args) - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request): - with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - module.main() - expected_error_message = "Invalid Command 'PowerModeExtraHigh'" - self.assertIn(expected_error_message, - get_exception_message(ansible_fail_json)) + with set_module_args(module_args): + with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request): + with self.assertRaises(AnsibleFailJson) as ansible_fail_json: + module.main() + expected_error_message = "Invalid Command 'PowerModeExtraHigh'" + self.assertIn(expected_error_message, + get_exception_message(ansible_fail_json)) def test_module_enclosure_led_indicator_on(self): """Test turning on a valid LED indicator (in this case we use the Enclosure resource).""" @@ -402,16 +402,15 @@ class TestWdcRedfishCommand(unittest.TestCase): "resource_id": "Enclosure", "baseuri": "example.com" } - set_module_args(module_args) - - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request, - post_request=mock_post_request): - with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(ansible_exit_json)) - self.assertTrue(is_changed(ansible_exit_json)) + with set_module_args(module_args): + with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request, + post_request=mock_post_request): + with self.assertRaises(AnsibleExitJson) as ansible_exit_json: + module.main() + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, + get_exception_message(ansible_exit_json)) + self.assertTrue(is_changed(ansible_exit_json)) def test_module_invalid_resource_led_indicator_on(self): """Test turning LED on for an invalid resource id.""" @@ -423,16 +422,15 @@ class TestWdcRedfishCommand(unittest.TestCase): "resource_id": "Disk99", "baseuri": "example.com" } - set_module_args(module_args) - - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request, - post_request=mock_post_request): - with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - module.main() - expected_error_message = "Chassis resource Disk99 not found" - self.assertEqual(expected_error_message, - get_exception_message(ansible_fail_json)) + with set_module_args(module_args): + with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request, + post_request=mock_post_request): + with self.assertRaises(AnsibleFailJson) as ansible_fail_json: + module.main() + expected_error_message = "Chassis resource Disk99 not found" + self.assertEqual(expected_error_message, + get_exception_message(ansible_fail_json)) def test_module_enclosure_led_off_already_off(self): """Test turning LED indicator off when it's already off. Confirm changed is False and no POST occurs.""" @@ -444,15 +442,14 @@ class TestWdcRedfishCommand(unittest.TestCase): "resource_id": "Enclosure", "baseuri": "example.com" } - set_module_args(module_args) - - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request): - with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(ansible_exit_json)) - self.assertFalse(is_changed(ansible_exit_json)) + with set_module_args(module_args): + with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request): + with self.assertRaises(AnsibleExitJson) as ansible_exit_json: + module.main() + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, + get_exception_message(ansible_exit_json)) + self.assertFalse(is_changed(ansible_exit_json)) def test_module_fw_activate_first_iom_unavailable(self): """Test that if the first IOM is not available, the 2nd one is used.""" @@ -467,26 +464,26 @@ class TestWdcRedfishCommand(unittest.TestCase): 'password': 'PASSW0RD=21', 'ioms': ioms } - set_module_args(module_args) + with set_module_args(module_args): - def mock_get_request(*args, **kwargs): - """Mock for get_request that will fail on the 'bad' IOM.""" - if "bad.example.com" in args[1]: - return MOCK_URL_ERROR - else: - return mock_get_request_enclosure_single_tenant(*args, **kwargs) + def mock_get_request(*args, **kwargs): + """Mock for get_request that will fail on the 'bad' IOM.""" + if "bad.example.com" in args[1]: + return MOCK_URL_ERROR + else: + return mock_get_request_enclosure_single_tenant(*args, **kwargs) - with patch.multiple(module.WdcRedfishUtils, - _firmware_activate_uri=mock_fw_activate_url, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request, - post_request=mock_post_request): - with self.assertRaises(AnsibleExitJson) as cm: - module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(cm)) + with patch.multiple(module.WdcRedfishUtils, + _firmware_activate_uri=mock_fw_activate_url, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request, + post_request=mock_post_request): + with self.assertRaises(AnsibleExitJson) as cm: + module.main() + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, + get_exception_message(cm)) def test_module_fw_activate_pass(self): """Test the FW Activate command in a passing scenario.""" @@ -508,69 +505,68 @@ class TestWdcRedfishCommand(unittest.TestCase): 'password': 'PASSW0RD=21', } module_args.update(uri_specifier) - set_module_args(module_args) - - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - _firmware_activate_uri=mock_fw_activate_url, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_single_tenant, - post_request=mock_post_request): - with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(ansible_exit_json)) - self.assertTrue(is_changed(ansible_exit_json)) + with set_module_args(module_args): + with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + _firmware_activate_uri=mock_fw_activate_url, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_single_tenant, + post_request=mock_post_request): + with self.assertRaises(AnsibleExitJson) as ansible_exit_json: + module.main() + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, + get_exception_message(ansible_exit_json)) + self.assertTrue(is_changed(ansible_exit_json)) def test_module_fw_activate_service_does_not_support_fw_activate(self): """Test FW Activate when it is not supported.""" expected_error_message = "Service does not support FWActivate" - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'FWActivate', 'username': 'USERID', 'password': 'PASSW0RD=21', 'ioms': ["example1.example.com"] - }) + }): - def mock_update_uri_response(*args, **kwargs): - return { - "ret": True, - "data": {} # No Actions - } + def mock_update_uri_response(*args, **kwargs): + return { + "ret": True, + "data": {} # No Actions + } - with patch.multiple(module.WdcRedfishUtils, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_update_uri_response): - with self.assertRaises(AnsibleFailJson) as cm: - module.main() - self.assertEqual(expected_error_message, - get_exception_message(cm)) + with patch.multiple(module.WdcRedfishUtils, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_update_uri_response): + with self.assertRaises(AnsibleFailJson) as cm: + module.main() + self.assertEqual(expected_error_message, + get_exception_message(cm)) def test_module_update_and_activate_image_uri_not_http(self): """Test Update and Activate when URI is not http(s)""" expected_error_message = "Bundle URI must be HTTP or HTTPS" - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'UpdateAndActivate', 'username': 'USERID', 'password': 'PASSW0RD=21', 'ioms': ["example1.example.com"], 'update_image_uri': "ftp://example.com/image" - }) - with patch.multiple(module.WdcRedfishUtils, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return): - with self.assertRaises(AnsibleFailJson) as cm: - module.main() - self.assertEqual(expected_error_message, - get_exception_message(cm)) + }): + with patch.multiple(module.WdcRedfishUtils, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return): + with self.assertRaises(AnsibleFailJson) as cm: + module.main() + self.assertEqual(expected_error_message, + get_exception_message(cm)) def test_module_update_and_activate_target_not_ready_for_fw_update(self): """Test Update and Activate when target is not in the correct state.""" @@ -580,38 +576,38 @@ class TestWdcRedfishCommand(unittest.TestCase): mock_status_code, mock_status_description ) - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'UpdateAndActivate', 'username': 'USERID', 'password': 'PASSW0RD=21', 'ioms': ["example1.example.com"], 'update_image_uri': "http://example.com/image" - }) - with patch.object(module.WdcRedfishUtils, "get_simple_update_status") as mock_get_simple_update_status: - mock_get_simple_update_status.return_value = { - "ret": True, - "entries": { - "StatusCode": mock_status_code, - "Description": mock_status_description + }): + with patch.object(module.WdcRedfishUtils, "get_simple_update_status") as mock_get_simple_update_status: + mock_get_simple_update_status.return_value = { + "ret": True, + "entries": { + "StatusCode": mock_status_code, + "Description": mock_status_description + } } - } - with patch.multiple(module.WdcRedfishUtils, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return): - with self.assertRaises(AnsibleFailJson) as cm: - module.main() - self.assertEqual(expected_error_message, - get_exception_message(cm)) + with patch.multiple(module.WdcRedfishUtils, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return): + with self.assertRaises(AnsibleFailJson) as cm: + module.main() + self.assertEqual(expected_error_message, + get_exception_message(cm)) def test_module_update_and_activate_bundle_not_a_tarfile(self): """Test Update and Activate when bundle is not a tarfile""" mock_filename = os.path.abspath(__file__) expected_error_message = ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'UpdateAndActivate', 'username': 'USERID', @@ -622,24 +618,24 @@ class TestWdcRedfishCommand(unittest.TestCase): "username": "image_user", "password": "image_password" } - }) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: - mock_fetch_file.return_value = mock_filename - with patch.multiple(module.WdcRedfishUtils, - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return): - with self.assertRaises(AnsibleFailJson) as cm: - module.main() - self.assertEqual(expected_error_message, - get_exception_message(cm)) + }): + with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + mock_fetch_file.return_value = mock_filename + with patch.multiple(module.WdcRedfishUtils, + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return): + with self.assertRaises(AnsibleFailJson) as cm: + module.main() + self.assertEqual(expected_error_message, + get_exception_message(cm)) def test_module_update_and_activate_bundle_contains_no_firmware_version(self): """Test Update and Activate when bundle contains no firmware version""" expected_error_message = ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'UpdateAndActivate', 'username': 'USERID', @@ -650,29 +646,29 @@ class TestWdcRedfishCommand(unittest.TestCase): "username": "image_user", "password": "image_password" } - }) + }): - tar_name = "empty_tarfile{0}.tar".format(uuid.uuid4()) - empty_tarfile = tarfile.open(os.path.join(self.tempdir, tar_name), "w") - empty_tarfile.close() - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: - mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return): - with self.assertRaises(AnsibleFailJson) as cm: - module.main() - self.assertEqual(expected_error_message, - get_exception_message(cm)) + tar_name = "empty_tarfile{0}.tar".format(uuid.uuid4()) + empty_tarfile = tarfile.open(os.path.join(self.tempdir, tar_name), "w") + empty_tarfile.close() + with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) + with patch.multiple(module.WdcRedfishUtils, + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return): + with self.assertRaises(AnsibleFailJson) as cm: + module.main() + self.assertEqual(expected_error_message, + get_exception_message(cm)) def test_module_update_and_activate_version_already_installed(self): """Test Update and Activate when the bundle version is already installed""" mock_firmware_version = "1.2.3" expected_error_message = ACTION_WAS_SUCCESSFUL_MESSAGE - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'UpdateAndActivate', 'username': 'USERID', @@ -683,31 +679,31 @@ class TestWdcRedfishCommand(unittest.TestCase): "username": "image_user", "password": "image_password" } - }) + }): - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=False) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: - mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_single_tenant): - with self.assertRaises(AnsibleExitJson) as result: - module.main() - self.assertEqual(expected_error_message, - get_exception_message(result)) - self.assertFalse(is_changed(result)) + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, + is_multi_tenant=False) + with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) + with patch.multiple(module.WdcRedfishUtils, + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_single_tenant): + with self.assertRaises(AnsibleExitJson) as result: + module.main() + self.assertEqual(expected_error_message, + get_exception_message(result)) + self.assertFalse(is_changed(result)) def test_module_update_and_activate_version_already_installed_multi_tenant(self): """Test Update and Activate on multi-tenant when version is already installed""" mock_firmware_version = "1.2.3" expected_error_message = ACTION_WAS_SUCCESSFUL_MESSAGE - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'UpdateAndActivate', 'username': 'USERID', @@ -718,30 +714,30 @@ class TestWdcRedfishCommand(unittest.TestCase): "username": "image_user", "password": "image_password" } - }) + }): - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=True) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: - mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_multi_tenant): - with self.assertRaises(AnsibleExitJson) as result: - module.main() - self.assertEqual(expected_error_message, - get_exception_message(result)) - self.assertFalse(is_changed(result)) + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, + is_multi_tenant=True) + with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) + with patch.multiple(module.WdcRedfishUtils, + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_multi_tenant): + with self.assertRaises(AnsibleExitJson) as result: + module.main() + self.assertEqual(expected_error_message, + get_exception_message(result)) + self.assertFalse(is_changed(result)) def test_module_update_and_activate_pass(self): """Test Update and Activate (happy path)""" mock_firmware_version = "1.2.2" - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'UpdateAndActivate', 'username': 'USERID', @@ -752,34 +748,34 @@ class TestWdcRedfishCommand(unittest.TestCase): "username": "image_user", "password": "image_password" } - }) + }): - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=False) + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, + is_multi_tenant=False) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: - mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, - simple_update=mock_simple_update, - _simple_update_status_uri=mocked_url_response, - # _find_updateservice_resource=empty_return, - # _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_single_tenant, - post_request=mock_post_request): + with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) + with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, + simple_update=mock_simple_update, + _simple_update_status_uri=mocked_url_response, + # _find_updateservice_resource=empty_return, + # _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_single_tenant, + post_request=mock_post_request): - with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_simple_update_status" - ) as mock_get_simple_update_status: - mock_get_simple_update_status.side_effect = MOCK_SIMPLE_UPDATE_STATUS_LIST - with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - module.main() - self.assertTrue(is_changed(ansible_exit_json)) - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(ansible_exit_json)) + with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_simple_update_status" + ) as mock_get_simple_update_status: + mock_get_simple_update_status.side_effect = MOCK_SIMPLE_UPDATE_STATUS_LIST + with self.assertRaises(AnsibleExitJson) as ansible_exit_json: + module.main() + self.assertTrue(is_changed(ansible_exit_json)) + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(ansible_exit_json)) def test_module_update_and_activate_pass_multi_tenant(self): """Test Update and Activate with multi-tenant (happy path)""" mock_firmware_version = "1.2.2" - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'UpdateAndActivate', 'username': 'USERID', @@ -790,34 +786,34 @@ class TestWdcRedfishCommand(unittest.TestCase): "username": "image_user", "password": "image_password" } - }) + }): - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=True) + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, + is_multi_tenant=True) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: - mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, - simple_update=mock_simple_update, - _simple_update_status_uri=mocked_url_response, - # _find_updateservice_resource=empty_return, - # _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_multi_tenant, - post_request=mock_post_request): - with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_simple_update_status" - ) as mock_get_simple_update_status: - mock_get_simple_update_status.side_effect = MOCK_SIMPLE_UPDATE_STATUS_LIST - with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - module.main() - self.assertTrue(is_changed(ansible_exit_json)) - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(ansible_exit_json)) + with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) + with patch.multiple(module.WdcRedfishUtils, + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, + simple_update=mock_simple_update, + _simple_update_status_uri=mocked_url_response, + # _find_updateservice_resource=empty_return, + # _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_multi_tenant, + post_request=mock_post_request): + with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_simple_update_status" + ) as mock_get_simple_update_status: + mock_get_simple_update_status.side_effect = MOCK_SIMPLE_UPDATE_STATUS_LIST + with self.assertRaises(AnsibleExitJson) as ansible_exit_json: + module.main() + self.assertTrue(is_changed(ansible_exit_json)) + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(ansible_exit_json)) def test_module_fw_update_multi_tenant_firmware_single_tenant_enclosure(self): """Test Update and Activate using multi-tenant bundle on single-tenant enclosure""" mock_firmware_version = "1.1.1" expected_error_message = "Enclosure multi-tenant is False but bundle multi-tenant is True" - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'UpdateAndActivate', 'username': 'USERID', @@ -828,30 +824,30 @@ class TestWdcRedfishCommand(unittest.TestCase): "username": "image_user", "password": "image_password" } - }) + }): - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=True) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: - mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3(), - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_single_tenant): - with self.assertRaises(AnsibleFailJson) as result: - module.main() - self.assertEqual(expected_error_message, - get_exception_message(result)) + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, + is_multi_tenant=True) + with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) + with patch.multiple(module.WdcRedfishUtils, + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3(), + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_single_tenant): + with self.assertRaises(AnsibleFailJson) as result: + module.main() + self.assertEqual(expected_error_message, + get_exception_message(result)) def test_module_fw_update_single_tentant_firmware_multi_tenant_enclosure(self): """Test Update and Activate using singe-tenant bundle on multi-tenant enclosure""" mock_firmware_version = "1.1.1" expected_error_message = "Enclosure multi-tenant is True but bundle multi-tenant is False" - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'UpdateAndActivate', 'username': 'USERID', @@ -862,24 +858,24 @@ class TestWdcRedfishCommand(unittest.TestCase): "username": "image_user", "password": "image_password" } - }) + }): - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=False) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: - mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3(), - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_multi_tenant): - with self.assertRaises(AnsibleFailJson) as result: - module.main() - self.assertEqual(expected_error_message, - get_exception_message(result)) + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, + is_multi_tenant=False) + with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) + with patch.multiple(module.WdcRedfishUtils, + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3(), + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_multi_tenant): + with self.assertRaises(AnsibleFailJson) as result: + module.main() + self.assertEqual(expected_error_message, + get_exception_message(result)) def generate_temp_bundlefile(self, mock_firmware_version, @@ -896,15 +892,14 @@ class TestWdcRedfishCommand(unittest.TestCase): bundle_tarfile = tarfile.open(os.path.join(self.tempdir, tar_name), "w") package_filename = "oobm-{0}.pkg".format(mock_firmware_version) package_filename_path = os.path.join(self.tempdir, package_filename) - package_file = open(package_filename_path, "w") - package_file.close() + with open(package_filename_path, "w"): + pass bundle_tarfile.add(os.path.join(self.tempdir, package_filename), arcname=package_filename) bin_filename = "firmware.bin" bin_filename_path = os.path.join(self.tempdir, bin_filename) - bin_file = open(bin_filename_path, "wb") - byte_to_write = b'\x80' if is_multi_tenant else b'\xFF' - bin_file.write(byte_to_write * 12) - bin_file.close() + with open(bin_filename_path, "wb") as bin_file: + byte_to_write = b'\x80' if is_multi_tenant else b'\xFF' + bin_file.write(byte_to_write * 12) for filename in [package_filename, bin_filename]: bundle_tarfile.add(os.path.join(self.tempdir, filename), arcname=filename) bundle_tarfile.close() diff --git a/tests/unit/plugins/modules/test_wdc_redfish_info.py b/tests/unit/plugins/modules/test_wdc_redfish_info.py index e1dfb4a276..8d16da50bc 100644 --- a/tests/unit/plugins/modules/test_wdc_redfish_info.py +++ b/tests/unit/plugins/modules/test_wdc_redfish_info.py @@ -6,12 +6,12 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible.module_utils import basic import ansible_collections.community.general.plugins.modules.wdc_redfish_info as module -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json MOCK_SUCCESSFUL_RESPONSE_WITH_ACTIONS = { "ret": True, @@ -77,140 +77,140 @@ class TestWdcRedfishInfo(unittest.TestCase): def test_module_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson): - set_module_args({}) - module.main() + with set_module_args({}): + module.main() def test_module_fail_when_unknown_category(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'category': 'unknown', 'command': 'SimpleUpdateStatus', 'username': 'USERID', 'password': 'PASSW0RD=21', 'ioms': [], - }) - module.main() + }): + module.main() def test_module_fail_when_unknown_command(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'unknown', 'username': 'USERID', 'password': 'PASSW0RD=21', 'ioms': [], - }) - module.main() + }): + module.main() def test_module_simple_update_status_pass(self): - set_module_args({ + with set_module_args({ 'category': 'Update', 'command': 'SimpleUpdateStatus', 'username': 'USERID', 'password': 'PASSW0RD=21', 'ioms': ["example1.example.com"], - }) + }): - def mock_simple_update_status(*args, **kwargs): - return { - "ret": True, - "data": { - "Description": "Ready for FW update", - "ErrorCode": 0, - "EstimatedRemainingMinutes": 0, - "StatusCode": 0 - } - } - - def mocked_string_response(*args, **kwargs): - return "mockedUrl" - - def empty_return(*args, **kwargs): - return {"ret": True} - - with patch.multiple(module.WdcRedfishUtils, - _simple_update_status_uri=mocked_string_response, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_simple_update_status): - with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - module.main() - redfish_facts = get_redfish_facts(ansible_exit_json) - self.assertEqual(mock_simple_update_status()["data"], - redfish_facts["simple_update_status"]["entries"]) - - def test_module_simple_update_status_updateservice_resource_not_found(self): - set_module_args({ - 'category': 'Update', - 'command': 'SimpleUpdateStatus', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - }) - with patch.object(module.WdcRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = { - "ret": True, - "data": {} # Missing UpdateService property - } - with self.assertRaises(AnsibleFailJson) as ansible_exit_json: - module.main() - self.assertEqual("UpdateService resource not found", - get_exception_message(ansible_exit_json)) - - def test_module_simple_update_status_service_does_not_support_simple_update(self): - set_module_args({ - 'category': 'Update', - 'command': 'SimpleUpdateStatus', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - }) - - def mock_get_request_function(uri): - mock_url_string = "mockURL" - if mock_url_string in uri: + def mock_simple_update_status(*args, **kwargs): return { "ret": True, "data": { - "Actions": { # No #UpdateService.SimpleUpdate - } + "Description": "Ready for FW update", + "ErrorCode": 0, + "EstimatedRemainingMinutes": 0, + "StatusCode": 0 } } - else: - return { - "ret": True, - "data": mock_url_string - } - with patch.object(module.WdcRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.side_effect = mock_get_request_function - with self.assertRaises(AnsibleFailJson) as ansible_exit_json: - module.main() - self.assertEqual("UpdateService resource not found", - get_exception_message(ansible_exit_json)) + def mocked_string_response(*args, **kwargs): + return "mockedUrl" - def test_module_simple_update_status_service_does_not_support_fw_activate(self): - set_module_args({ + def empty_return(*args, **kwargs): + return {"ret": True} + + with patch.multiple(module.WdcRedfishUtils, + _simple_update_status_uri=mocked_string_response, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_simple_update_status): + with self.assertRaises(AnsibleExitJson) as ansible_exit_json: + module.main() + redfish_facts = get_redfish_facts(ansible_exit_json) + self.assertEqual(mock_simple_update_status()["data"], + redfish_facts["simple_update_status"]["entries"]) + + def test_module_simple_update_status_updateservice_resource_not_found(self): + with set_module_args({ 'category': 'Update', 'command': 'SimpleUpdateStatus', 'username': 'USERID', 'password': 'PASSW0RD=21', 'ioms': ["example1.example.com"], - }) + }): + with patch.object(module.WdcRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + "ret": True, + "data": {} # Missing UpdateService property + } + with self.assertRaises(AnsibleFailJson) as ansible_exit_json: + module.main() + self.assertEqual("UpdateService resource not found", + get_exception_message(ansible_exit_json)) - def mock_get_request_function(uri): - if uri.endswith("/redfish/v1") or uri.endswith("/redfish/v1/"): - return MOCK_SUCCESSFUL_RESPONSE_WITH_UPDATE_SERVICE_RESOURCE - elif uri.endswith("/mockedUrl"): - return MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE - elif uri.endswith("/UpdateService"): - return MOCK_SUCCESSFUL_RESPONSE_WITH_SIMPLE_UPDATE_BUT_NO_FW_ACTIVATE - else: - raise RuntimeError("Illegal call to get_request in test: " + uri) + def test_module_simple_update_status_service_does_not_support_simple_update(self): + with set_module_args({ + 'category': 'Update', + 'command': 'SimpleUpdateStatus', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'ioms': ["example1.example.com"], + }): - with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_request") as mock_get_request: - mock_get_request.side_effect = mock_get_request_function - with self.assertRaises(AnsibleFailJson) as ansible_exit_json: - module.main() - self.assertEqual("Service does not support FWActivate", - get_exception_message(ansible_exit_json)) + def mock_get_request_function(uri): + mock_url_string = "mockURL" + if mock_url_string in uri: + return { + "ret": True, + "data": { + "Actions": { # No #UpdateService.SimpleUpdate + } + } + } + else: + return { + "ret": True, + "data": mock_url_string + } + + with patch.object(module.WdcRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.side_effect = mock_get_request_function + with self.assertRaises(AnsibleFailJson) as ansible_exit_json: + module.main() + self.assertEqual("UpdateService resource not found", + get_exception_message(ansible_exit_json)) + + def test_module_simple_update_status_service_does_not_support_fw_activate(self): + with set_module_args({ + 'category': 'Update', + 'command': 'SimpleUpdateStatus', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'ioms': ["example1.example.com"], + }): + + def mock_get_request_function(uri): + if uri.endswith("/redfish/v1") or uri.endswith("/redfish/v1/"): + return MOCK_SUCCESSFUL_RESPONSE_WITH_UPDATE_SERVICE_RESOURCE + elif uri.endswith("/mockedUrl"): + return MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE + elif uri.endswith("/UpdateService"): + return MOCK_SUCCESSFUL_RESPONSE_WITH_SIMPLE_UPDATE_BUT_NO_FW_ACTIVATE + else: + raise RuntimeError("Illegal call to get_request in test: " + uri) + + with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_request") as mock_get_request: + mock_get_request.side_effect = mock_get_request_function + with self.assertRaises(AnsibleFailJson) as ansible_exit_json: + module.main() + self.assertEqual("Service does not support FWActivate", + get_exception_message(ansible_exit_json)) diff --git a/tests/unit/plugins/modules/test_xcc_redfish_command.py b/tests/unit/plugins/modules/test_xcc_redfish_command.py index c3902a2f30..9c66661ac7 100644 --- a/tests/unit/plugins/modules/test_xcc_redfish_command.py +++ b/tests/unit/plugins/modules/test_xcc_redfish_command.py @@ -5,12 +5,12 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest from ansible.module_utils import basic import ansible_collections.community.general.plugins.modules.xcc_redfish_command as module -from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json def get_bin_path(self, arg, required=False): @@ -30,33 +30,33 @@ class TestXCCRedfishCommand(unittest.TestCase): def test_module_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson): - set_module_args({}) - module.main() + with set_module_args({}): + module.main() def test_module_fail_when_unknown_category(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'category': 'unknown', 'command': 'VirtualMediaEject', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', - }) - module.main() + }): + module.main() def test_module_fail_when_unknown_command(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'category': 'Manager', 'command': 'unknown', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', - }) - module.main() + }): + module.main() def test_module_command_VirtualMediaInsert_pass(self): - set_module_args({ + with set_module_args({ 'category': 'Manager', 'command': 'VirtualMediaInsert', 'baseuri': '10.245.39.251', @@ -70,20 +70,20 @@ class TestXCCRedfishCommand(unittest.TestCase): 'write_protected': True, 'transfer_protocol_type': 'NFS' } - }) - with patch.object(module.XCCRedfishUtils, '_find_systems_resource') as mock__find_systems_resource: - mock__find_systems_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} - with patch.object(module.XCCRedfishUtils, '_find_managers_resource') as mock__find_managers_resource: - mock__find_managers_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + }): + with patch.object(module.XCCRedfishUtils, '_find_systems_resource') as mock__find_systems_resource: + mock__find_systems_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + with patch.object(module.XCCRedfishUtils, '_find_managers_resource') as mock__find_managers_resource: + mock__find_managers_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} - with patch.object(module.XCCRedfishUtils, 'virtual_media_insert') as mock_virtual_media_insert: - mock_virtual_media_insert.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + with patch.object(module.XCCRedfishUtils, 'virtual_media_insert') as mock_virtual_media_insert: + mock_virtual_media_insert.return_value = {'ret': True, 'changed': True, 'msg': 'success'} - with self.assertRaises(AnsibleExitJson) as result: - module.main() + with self.assertRaises(AnsibleExitJson) as result: + module.main() def test_module_command_VirtualMediaEject_pass(self): - set_module_args({ + with set_module_args({ 'category': 'Manager', 'command': 'VirtualMediaEject', 'baseuri': '10.245.39.251', @@ -93,194 +93,184 @@ class TestXCCRedfishCommand(unittest.TestCase): 'virtual_media': { 'image_url': "nfs://10.245.52.18:/home/nfs/bootable-sr635-20210111-autorun.iso", } - }) - with patch.object(module.XCCRedfishUtils, '_find_systems_resource') as mock__find_systems_resource: - mock__find_systems_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} - with patch.object(module.XCCRedfishUtils, '_find_managers_resource') as mock__find_managers_resource: - mock__find_managers_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + }): + with patch.object(module.XCCRedfishUtils, '_find_systems_resource') as mock__find_systems_resource: + mock__find_systems_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + with patch.object(module.XCCRedfishUtils, '_find_managers_resource') as mock__find_managers_resource: + mock__find_managers_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} - with patch.object(module.XCCRedfishUtils, 'virtual_media_eject') as mock_virtual_media_eject: - mock_virtual_media_eject.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + with patch.object(module.XCCRedfishUtils, 'virtual_media_eject') as mock_virtual_media_eject: + mock_virtual_media_eject.return_value = {'ret': True, 'changed': True, 'msg': 'success'} - with self.assertRaises(AnsibleExitJson) as result: - module.main() + with self.assertRaises(AnsibleExitJson) as result: + module.main() def test_module_command_VirtualMediaEject_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson): - set_module_args({ + with set_module_args({ 'category': 'Manager', 'command': 'VirtualMediaEject', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', - }) - module.main() + }): + module.main() def test_module_command_GetResource_fail_when_required_args_missing(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'GetResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_GetResource_fail_when_get_return_false(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'GetResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/testuri', - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': False, 'msg': '404 error'} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': False, 'msg': '404 error'} - - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_GetResource_pass(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'GetResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/testuri', - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - - with self.assertRaises(AnsibleExitJson) as result: - module.main() + with self.assertRaises(AnsibleExitJson) as result: + module.main() def test_module_command_GetCollectionResource_fail_when_required_args_missing(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'GetCollectionResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_GetCollectionResource_fail_when_get_return_false(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'GetCollectionResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/testuri', - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': False, 'msg': '404 error'} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': False, 'msg': '404 error'} - - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_GetCollectionResource_fail_when_get_not_colection(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'GetCollectionResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/testuri', - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_GetCollectionResource_pass_when_get_empty_collection(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'GetCollectionResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/testuri', - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'Members': [], 'Members@odata.count': 0}} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'Members': [], 'Members@odata.count': 0}} - - with self.assertRaises(AnsibleExitJson) as result: - module.main() + with self.assertRaises(AnsibleExitJson) as result: + module.main() def test_module_command_GetCollectionResource_pass_when_get_collection(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'GetCollectionResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/testuri', - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'Members': [{'@odata.id': '/redfish/v1/testuri/1'}], 'Members@odata.count': 1}} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'Members': [{'@odata.id': '/redfish/v1/testuri/1'}], 'Members@odata.count': 1}} - - with self.assertRaises(AnsibleExitJson) as result: - module.main() + with self.assertRaises(AnsibleExitJson) as result: + module.main() def test_module_command_PatchResource_fail_when_required_args_missing(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PatchResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} + with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: + mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: - mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_PatchResource_fail_when_required_args_missing_no_requestbody(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PatchResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/testuri', - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} + with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: + mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: - mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_PatchResource_fail_when_noexisting_property_in_requestbody(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PatchResource', 'baseuri': '10.245.39.251', @@ -288,19 +278,18 @@ class TestXCCRedfishCommand(unittest.TestCase): 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/testuri', 'request_body': {'teststr': 'yyyy', 'otherkey': 'unknownkey'} - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} + with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: + mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: - mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} - - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_PatchResource_fail_when_get_return_false(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PatchResource', 'baseuri': '10.245.39.251', @@ -308,19 +297,18 @@ class TestXCCRedfishCommand(unittest.TestCase): 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/testuri', 'request_body': {'teststr': 'yyyy'} - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} + with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: + mock_patch_request.return_value = {'ret': False, 'msg': '500 internal error'} - with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: - mock_patch_request.return_value = {'ret': False, 'msg': '500 internal error'} - - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_PatchResource_pass(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PatchResource', 'baseuri': '10.245.39.251', @@ -328,170 +316,165 @@ class TestXCCRedfishCommand(unittest.TestCase): 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/testuri', 'request_body': {'teststr': 'yyyy'} - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} + with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: + mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'yyyy', '@odata.etag': '322e0d45d9572723c98'}} - with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: - mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'yyyy', '@odata.etag': '322e0d45d9572723c98'}} - - with self.assertRaises(AnsibleExitJson) as result: - module.main() + with self.assertRaises(AnsibleExitJson) as result: + module.main() def test_module_command_PostResource_fail_when_required_args_missing(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PostResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', - }) - - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } - }, + } } - } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_PostResource_fail_when_invalid_resourceuri(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PostResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/testuri', - }) - - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } - }, + } } - } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_PostResource_fail_when_no_requestbody(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PostResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', - }) - - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } - }, + } } - } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_PostResource_fail_when_no_requestbody(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PostResource', 'baseuri': '10.245.39.251', 'username': 'USERID', 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', - }) - - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } - }, + } } - } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_PostResource_fail_when_requestbody_mismatch_with_data_from_actioninfo_uri(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PostResource', 'baseuri': '10.245.39.251', @@ -499,39 +482,38 @@ class TestXCCRedfishCommand(unittest.TestCase): 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', 'request_body': {'PasswordName': 'UefiAdminPassword', 'NewPassword': 'PASSW0RD=='} - }) - - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Parameters': [], - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Parameters': [], + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } - }, + } } - } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_PostResource_fail_when_get_return_false(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PostResource', 'baseuri': '10.245.39.251', @@ -539,19 +521,18 @@ class TestXCCRedfishCommand(unittest.TestCase): 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', 'request_body': {'PasswordName': 'UefiAdminPassword', 'NewPassword': 'PASSW0RD=='} - }) + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': False, 'msg': '404 error'} - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': False, 'msg': '404 error'} + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} - - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_PostResource_fail_when_post_return_false(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PostResource', 'baseuri': '10.245.39.251', @@ -559,38 +540,37 @@ class TestXCCRedfishCommand(unittest.TestCase): 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios', 'request_body': {} - }) - - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } - }, + } } - } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': False, 'msg': '500 internal error'} + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': False, 'msg': '500 internal error'} - with self.assertRaises(AnsibleFailJson) as result: - module.main() + with self.assertRaises(AnsibleFailJson) as result: + module.main() def test_module_command_PostResource_pass(self): - set_module_args({ + with set_module_args({ 'category': 'Raw', 'command': 'PostResource', 'baseuri': '10.245.39.251', @@ -598,32 +578,31 @@ class TestXCCRedfishCommand(unittest.TestCase): 'password': 'PASSW0RD=21', 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios', 'request_body': {} - }) - - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + }): + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } - }, + } } - } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True, 'msg': 'post success'} + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True, 'msg': 'post success'} - with self.assertRaises(AnsibleExitJson) as result: - module.main() + with self.assertRaises(AnsibleExitJson) as result: + module.main() diff --git a/tests/unit/plugins/modules/test_xdg_mime.py b/tests/unit/plugins/modules/test_xdg_mime.py new file mode 100644 index 0000000000..b897777632 --- /dev/null +++ b/tests/unit/plugins/modules/test_xdg_mime.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Marcos Alano +# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.community.general.plugins.modules import xdg_mime +from .uthelper import UTHelper, RunCommandMock + + +UTHelper.from_module(xdg_mime, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_xdg_mime.yaml b/tests/unit/plugins/modules/test_xdg_mime.yaml new file mode 100644 index 0000000000..83bc15f901 --- /dev/null +++ b/tests/unit/plugins/modules/test_xdg_mime.yaml @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Marcos Alano +# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# TODO: add tests for setting multiple mime types at once +--- +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} + input: &input + mime_types: x-scheme-handler/http + handler: google-chrome.desktop + get_version: &get_version + command: [/testbin/xdg-mime, --version] + environ: *env-def + rc: 0 + out: "xdg-mime 1.2.1\n" + err: '' + query_mime_type: &query_mime_type + command: [/testbin/xdg-mime, query, default, x-scheme-handler/http] + environ: *env-def + rc: 0 + out: '' + err: '' + set_handler: &set_handler + command: [/testbin/xdg-mime, default, google-chrome.desktop, x-scheme-handler/http] + environ: *env-def + rc: 0 + out: '' + err: '' +test_cases: + - id: test_set_handler + input: *input + output: + current_handlers: [''] + changed: true + mocks: + run_command: + - *get_version + - *query_mime_type + - *set_handler + - id: test_set_handler_check + input: *input + output: + current_handlers: ['google-chrome.desktop'] + changed: false + flags: + check: true + mocks: + run_command: + - *get_version + - <<: *query_mime_type + out: | + google-chrome.desktop + - id: test_set_handler_idempot + input: *input + output: + current_handlers: ['google-chrome.desktop'] + changed: false + mocks: + run_command: + - *get_version + - <<: *query_mime_type + out: | + google-chrome.desktop + - id: test_set_handler_idempot_check + input: *input + output: + current_handlers: ['google-chrome.desktop'] + changed: false + flags: + check: true + mocks: + run_command: + - *get_version + - <<: *query_mime_type + out: | + google-chrome.desktop + - id: test_set_invalid_handler + input: + <<: *input + handler: google-chrome.desktopX + output: + failed: true + msg: Handler must be a .desktop file + mocks: + run_command: + - *get_version diff --git a/tests/unit/plugins/modules/test_xfconf.py b/tests/unit/plugins/modules/test_xfconf.py index f902797ee3..ea89fb93fe 100644 --- a/tests/unit/plugins/modules/test_xfconf.py +++ b/tests/unit/plugins/modules/test_xfconf.py @@ -14,7 +14,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import xfconf -from .helper import Helper, RunCommandMock # pylint: disable=unused-import +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(xfconf, __name__) +UTHelper.from_module(xfconf, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_xfconf.yaml b/tests/unit/plugins/modules/test_xfconf.yaml index c52c8f7c1b..2ba274fdfb 100644 --- a/tests/unit/plugins/modules/test_xfconf.yaml +++ b/tests/unit/plugins/modules/test_xfconf.yaml @@ -4,230 +4,234 @@ # SPDX-License-Identifier: GPL-3.0-or-later --- -- id: test_missing_input - input: {} - output: - failed: true - msg: "missing required arguments: channel, property" -- id: test_property_set_property - input: - channel: xfwm4 - property: /general/inactive_opacity - state: present - value_type: int - value: 90 - output: - changed: true - previous_value: '100' - type: int - value: '90' - version: "4.18.1" - mocks: - run_command: - - command: [/testbin/xfconf-query, --version] - environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} - rc: 0 - out: &version-output | - xfconf-query 4.18.1 +anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} + version_out: &version-output | + xfconf-query 4.18.1 - Copyright (c) 2008-2023 - The Xfce development team. All rights reserved. + Copyright (c) 2008-2023 + The Xfce development team. All rights reserved. - Please report bugs to . - err: "" - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] - environ: *env-def - rc: 0 - out: "100\n" - err: "" - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity, --create, --type, int, --set, '90'] - environ: *env-def - rc: 0 - out: "" - err: "" -- id: test_property_set_property_same_value - input: - channel: xfwm4 - property: /general/inactive_opacity - state: present - value_type: int - value: 90 - output: - changed: false - previous_value: '90' - type: int - value: '90' - version: "4.18.1" - mocks: - run_command: - - command: [/testbin/xfconf-query, --version] - environ: *env-def - rc: 0 - out: *version-output - err: "" - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] - environ: *env-def - rc: 0 - out: "90\n" - err: "" - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity, --create, --type, int, --set, '90'] - environ: *env-def - rc: 0 - out: "" - err: "" -- id: test_property_set_property_bool_false - input: - channel: xfce4-session - property: /general/SaveOnExit - state: present - value_type: bool - value: false - output: - changed: true - previous_value: 'true' - type: bool - value: 'False' - version: "4.18.1" - mocks: - run_command: - - command: [/testbin/xfconf-query, --version] - environ: *env-def - rc: 0 - out: *version-output - err: "" - - command: [/testbin/xfconf-query, --channel, xfce4-session, --property, /general/SaveOnExit] - environ: *env-def - rc: 0 - out: "true\n" - err: "" - - command: [/testbin/xfconf-query, --channel, xfce4-session, --property, /general/SaveOnExit, --create, --type, bool, --set, 'false'] - environ: *env-def - rc: 0 - out: "false\n" - err: "" -- id: test_property_set_array - input: - channel: xfwm4 - property: /general/workspace_names - state: present - value_type: string - value: [A, B, C] - output: - changed: true - previous_value: [Main, Work, Tmp] - type: [string, string, string] - value: [A, B, C] - version: "4.18.1" - mocks: - run_command: - - command: [/testbin/xfconf-query, --version] - environ: *env-def - rc: 0 - out: *version-output - err: "" - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] - environ: *env-def - rc: 0 - out: "Value is an array with 3 items:\n\nMain\nWork\nTmp\n" - err: "" - - command: - - /testbin/xfconf-query - - --channel - - xfwm4 - - --property - - /general/workspace_names - - --create - - --force-array - - --type - - string - - --set - - A - - --type - - string - - --set - - B - - --type - - string - - --set - - C - environ: *env-def - rc: 0 - out: "" - err: "" -- id: test_property_set_array_to_same_value - input: - channel: xfwm4 - property: /general/workspace_names - state: present - value_type: string - value: [A, B, C] - output: - changed: false - previous_value: [A, B, C] - type: [string, string, string] - value: [A, B, C] - version: "4.18.1" - mocks: - run_command: - - command: [/testbin/xfconf-query, --version] - environ: *env-def - rc: 0 - out: *version-output - err: "" - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] - environ: *env-def - rc: 0 - out: "Value is an array with 3 items:\n\nA\nB\nC\n" - err: "" - - command: - - /testbin/xfconf-query - - --channel - - xfwm4 - - --property - - /general/workspace_names - - --create - - --force-array - - --type - - string - - --set - - A - - --type - - string - - --set - - B - - --type - - string - - --set - - C - environ: *env-def - rc: 0 - out: "" - err: "" -- id: test_property_reset_value - input: - channel: xfwm4 - property: /general/workspace_names - state: absent - output: - changed: true - previous_value: [A, B, C] - type: - value: - version: "4.18.1" - mocks: - run_command: - - command: [/testbin/xfconf-query, --version] - environ: *env-def - rc: 0 - out: *version-output - err: "" - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] - environ: *env-def - rc: 0 - out: "Value is an array with 3 items:\n\nA\nB\nC\n" - err: "" - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names, --reset] - environ: *env-def - rc: 0 - out: "" - err: "" + Please report bugs to . +test_cases: + - id: test_missing_input + input: {} + output: + failed: true + msg: 'missing required arguments: channel, property' + - id: test_property_set_property + input: + channel: xfwm4 + property: /general/inactive_opacity + state: present + value_type: int + value: 90 + output: + changed: true + previous_value: '100' + type: int + value: '90' + version: 4.18.1 + mocks: + run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: '' + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] + environ: *env-def + rc: 0 + out: "100\n" + err: '' + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity, --create, --type, int, --set, '90'] + environ: *env-def + rc: 0 + out: '' + err: '' + - id: test_property_set_property_same_value + input: + channel: xfwm4 + property: /general/inactive_opacity + state: present + value_type: int + value: 90 + output: + changed: false + previous_value: '90' + type: int + value: '90' + version: 4.18.1 + mocks: + run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: '' + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] + environ: *env-def + rc: 0 + out: "90\n" + err: '' + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity, --create, --type, int, --set, '90'] + environ: *env-def + rc: 0 + out: '' + err: '' + - id: test_property_set_property_bool_false + input: + channel: xfce4-session + property: /general/SaveOnExit + state: present + value_type: bool + value: false + output: + changed: true + previous_value: 'true' + type: bool + value: 'False' + version: 4.18.1 + mocks: + run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: '' + - command: [/testbin/xfconf-query, --channel, xfce4-session, --property, /general/SaveOnExit] + environ: *env-def + rc: 0 + out: "true\n" + err: '' + - command: [/testbin/xfconf-query, --channel, xfce4-session, --property, /general/SaveOnExit, --create, --type, bool, --set, 'false'] + environ: *env-def + rc: 0 + out: "false\n" + err: '' + - id: test_property_set_array + input: + channel: xfwm4 + property: /general/workspace_names + state: present + value_type: string + value: [A, B, C] + output: + changed: true + previous_value: [Main, Work, Tmp] + type: [string, string, string] + value: [A, B, C] + version: 4.18.1 + mocks: + run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: '' + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] + environ: *env-def + rc: 0 + out: "Value is an array with 3 items:\n\nMain\nWork\nTmp\n" + err: '' + - command: + - /testbin/xfconf-query + - --channel + - xfwm4 + - --property + - /general/workspace_names + - --create + - --force-array + - --type + - string + - --set + - A + - --type + - string + - --set + - B + - --type + - string + - --set + - C + environ: *env-def + rc: 0 + out: '' + err: '' + - id: test_property_set_array_to_same_value + input: + channel: xfwm4 + property: /general/workspace_names + state: present + value_type: string + value: [A, B, C] + output: + changed: false + previous_value: [A, B, C] + type: [string, string, string] + value: [A, B, C] + version: 4.18.1 + mocks: + run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: '' + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] + environ: *env-def + rc: 0 + out: "Value is an array with 3 items:\n\nA\nB\nC\n" + err: '' + - command: + - /testbin/xfconf-query + - --channel + - xfwm4 + - --property + - /general/workspace_names + - --create + - --force-array + - --type + - string + - --set + - A + - --type + - string + - --set + - B + - --type + - string + - --set + - C + environ: *env-def + rc: 0 + out: '' + err: '' + - id: test_property_reset_value + input: + channel: xfwm4 + property: /general/workspace_names + state: absent + output: + changed: true + previous_value: [A, B, C] + type: + value: + version: 4.18.1 + mocks: + run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: '' + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] + environ: *env-def + rc: 0 + out: "Value is an array with 3 items:\n\nA\nB\nC\n" + err: '' + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names, --reset] + environ: *env-def + rc: 0 + out: '' + err: '' diff --git a/tests/unit/plugins/modules/test_xfconf_info.py b/tests/unit/plugins/modules/test_xfconf_info.py index 4cdb92b305..d65e7035cf 100644 --- a/tests/unit/plugins/modules/test_xfconf_info.py +++ b/tests/unit/plugins/modules/test_xfconf_info.py @@ -7,7 +7,7 @@ __metaclass__ = type from ansible_collections.community.general.plugins.modules import xfconf_info -from .helper import Helper, RunCommandMock +from .uthelper import UTHelper, RunCommandMock -Helper.from_module(xfconf_info, __name__, mocks=[RunCommandMock]) +UTHelper.from_module(xfconf_info, __name__, mocks=[RunCommandMock]) diff --git a/tests/unit/plugins/modules/test_xfconf_info.yaml b/tests/unit/plugins/modules/test_xfconf_info.yaml index 8e7ae667c4..d4d0deb39f 100644 --- a/tests/unit/plugins/modules/test_xfconf_info.yaml +++ b/tests/unit/plugins/modules/test_xfconf_info.yaml @@ -14,114 +14,114 @@ anchors: Please report bugs to . test_cases: -- id: test_simple_property_get - input: - channel: xfwm4 - property: /general/inactive_opacity - output: - value: '100' - is_array: false - version: "4.18.1" - mocks: - run_command: - - command: [/testbin/xfconf-query, --version] - environ: *env-def - rc: 0 - out: *version-output - err: "" - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] - environ: *env-def - rc: 0 - out: "100\n" - err: "" -- id: test_simple_property_get_nonexistent - input: - channel: xfwm4 - property: /general/i_dont_exist - output: - version: "4.18.1" - mocks: - run_command: - - command: [/testbin/xfconf-query, --version] - environ: *env-def - rc: 0 - out: *version-output - err: "" - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/i_dont_exist] - environ: *env-def - rc: 1 - out: "" - err: 'Property "/general/i_dont_exist" does not exist on channel "xfwm4".\n' -- id: test_property_no_channel - input: - property: /general/i_dont_exist - output: - failed: true - msg: "missing parameter(s) required by 'property': channel" -- id: test_property_get_array - input: - channel: xfwm4 - property: /general/workspace_names - output: - is_array: true - value_array: [Main, Work, Tmp] - version: "4.18.1" - mocks: - run_command: - - command: [/testbin/xfconf-query, --version] - environ: *env-def - rc: 0 - out: *version-output - err: "" - - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] - environ: *env-def - rc: 0 - out: "Value is an array with 3 items:\n\nMain\nWork\nTmp\n" - err: "" -- id: get_channels - input: {} - output: - channels: [a, b, c] - version: "4.18.1" - mocks: - run_command: - - command: [/testbin/xfconf-query, --version] - environ: *env-def - rc: 0 - out: *version-output - err: "" - - command: [/testbin/xfconf-query, --list] - environ: *env-def - rc: 0 - out: "Channels:\n a\n b\n c\n" - err: "" -- id: get_properties - input: - channel: xfwm4 - output: - properties: - - /general/wrap_cycle - - /general/wrap_layout - - /general/wrap_resistance - - /general/wrap_windows - - /general/wrap_workspaces - - /general/zoom_desktop - version: "4.18.1" - mocks: - run_command: - - command: [/testbin/xfconf-query, --version] - environ: *env-def - rc: 0 - out: *version-output - err: "" - - command: [/testbin/xfconf-query, --list, --channel, xfwm4] - environ: *env-def - rc: 0 - out: | - /general/wrap_cycle - /general/wrap_layout - /general/wrap_resistance - /general/wrap_windows - /general/wrap_workspaces - /general/zoom_desktop - err: "" + - id: test_simple_property_get + input: + channel: xfwm4 + property: /general/inactive_opacity + output: + value: '100' + is_array: false + version: 4.18.1 + mocks: + run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: '' + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/inactive_opacity] + environ: *env-def + rc: 0 + out: "100\n" + err: '' + - id: test_simple_property_get_nonexistent + input: + channel: xfwm4 + property: /general/i_dont_exist + output: + version: 4.18.1 + mocks: + run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: '' + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/i_dont_exist] + environ: *env-def + rc: 1 + out: '' + err: Property "/general/i_dont_exist" does not exist on channel "xfwm4".\n + - id: test_property_no_channel + input: + property: /general/i_dont_exist + output: + failed: true + msg: "missing parameter(s) required by 'property': channel" + - id: test_property_get_array + input: + channel: xfwm4 + property: /general/workspace_names + output: + is_array: true + value_array: [Main, Work, Tmp] + version: 4.18.1 + mocks: + run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: '' + - command: [/testbin/xfconf-query, --channel, xfwm4, --property, /general/workspace_names] + environ: *env-def + rc: 0 + out: "Value is an array with 3 items:\n\nMain\nWork\nTmp\n" + err: '' + - id: get_channels + input: {} + output: + channels: [a, b, c] + version: 4.18.1 + mocks: + run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: '' + - command: [/testbin/xfconf-query, --list] + environ: *env-def + rc: 0 + out: "Channels:\n a\n b\n c\n" + err: '' + - id: get_properties + input: + channel: xfwm4 + output: + properties: + - /general/wrap_cycle + - /general/wrap_layout + - /general/wrap_resistance + - /general/wrap_windows + - /general/wrap_workspaces + - /general/zoom_desktop + version: 4.18.1 + mocks: + run_command: + - command: [/testbin/xfconf-query, --version] + environ: *env-def + rc: 0 + out: *version-output + err: '' + - command: [/testbin/xfconf-query, --list, --channel, xfwm4] + environ: *env-def + rc: 0 + out: | + /general/wrap_cycle + /general/wrap_layout + /general/wrap_resistance + /general/wrap_windows + /general/wrap_workspaces + /general/zoom_desktop + err: '' diff --git a/tests/unit/plugins/modules/helper.py b/tests/unit/plugins/modules/uthelper.py similarity index 74% rename from tests/unit/plugins/modules/helper.py rename to tests/unit/plugins/modules/uthelper.py index 8071bc2aa9..7aea76581b 100644 --- a/tests/unit/plugins/modules/helper.py +++ b/tests/unit/plugins/modules/uthelper.py @@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import os import sys import json @@ -13,49 +14,49 @@ import yaml import pytest -from ansible.module_utils.common._collections_compat import Sequence - - -class Helper(object): +class UTHelper(object): TEST_SPEC_VALID_SECTIONS = ["anchors", "test_cases"] @staticmethod - def from_spec(test_module, ansible_module, test_spec, mocks=None): - helper = Helper(test_module, ansible_module, test_spec=test_spec, mocks=mocks) + def from_spec(ansible_module, test_module, test_spec, mocks=None): + helper = UTHelper(ansible_module, test_module, test_spec=test_spec, mocks=mocks) return helper @staticmethod - def from_file(test_module, ansible_module, filename, mocks=None): - with open(filename, "r") as test_cases: - test_spec = yaml.safe_load(test_cases) - return Helper.from_spec(test_module, ansible_module, test_spec, mocks) + def from_file(ansible_module, test_module, test_spec_filehandle, mocks=None): + test_spec = yaml.safe_load(test_spec_filehandle) + return UTHelper.from_spec(ansible_module, test_module, test_spec, mocks) + # @TODO: calculate the test_module_name automatically, remove one more parameter @staticmethod - def from_module(ansible_module, test_module_name, test_spec=None, mocks=None): + def from_module(ansible_module, test_module_name, mocks=None): test_module = sys.modules[test_module_name] - if test_spec is None: - test_spec = test_module.__file__.replace('.py', '.yaml') - return Helper.from_file(test_module, ansible_module, test_spec) + extensions = ['.yaml', '.yml'] + for ext in extensions: + test_spec_filename = test_module.__file__.replace('.py', ext) + if os.path.exists(test_spec_filename): + with open(test_spec_filename, "r") as test_spec_filehandle: + return UTHelper.from_file(ansible_module, test_module, test_spec_filehandle, mocks=mocks) + + raise Exception("Cannot find test case file for {0} with one of the extensions: {1}".format(test_module.__file__, extensions)) def add_func_to_test_module(self, name, func): setattr(self.test_module, name, func) - def __init__(self, test_module, ansible_module, test_spec, mocks=None): - self.test_module = test_module + def __init__(self, ansible_module, test_module, test_spec, mocks=None): self.ansible_module = ansible_module + self.test_module = test_module self.test_cases = [] self.fixtures = {} - if isinstance(test_spec, Sequence): - test_cases = test_spec - else: # it is a dict - test_cases = test_spec['test_cases'] - spec_diff = set(test_spec.keys()) - set(self.TEST_SPEC_VALID_SECTIONS) - if spec_diff: - raise ValueError("Test specification contain unknown keys: {0}".format(", ".join(spec_diff))) + + spec_diff = set(test_spec.keys()) - set(self.TEST_SPEC_VALID_SECTIONS) + if spec_diff: + raise ValueError("Test specification contain unknown keys: {0}".format(", ".join(spec_diff))) + self.mocks_map = {m.name: m for m in mocks} if mocks else {} - for test_case in test_cases: - tc = ModuleTestCase.make_test_case(test_case, test_module, self.mocks_map) + for spec_test_case in test_spec['test_cases']: + tc = ModuleTestCase.make_test_case(spec_test_case, test_module, self.mocks_map) self.test_cases.append(tc) self.fixtures.update(tc.fixtures) self.set_test_func() @@ -68,12 +69,18 @@ class Helper(object): def set_test_func(self): @pytest.mark.parametrize('test_case', self.test_cases, ids=[tc.id for tc in self.test_cases]) @pytest.mark.usefixtures(*self.fixtures) - def _test_module(mocker, capfd, patch_ansible_module, test_case): + def _test_module(mocker, capfd, patch_ansible_module_uthelper, test_case): """ Run unit tests for each test case in self.test_cases """ - patch_ansible_module(test_case.input) - self.runner.run(mocker, capfd, test_case) + args = {} + args.update(test_case.input) + if test_case.flags.get("check"): + args["_ansible_check_mode"] = test_case.flags.get("check") + if test_case.flags.get("diff"): + args["_ansible_diff"] = test_case.flags.get("diff") + with patch_ansible_module_uthelper(args): + self.runner.run(mocker, capfd, test_case) self.add_func_to_test_module("test_module", _test_module) @@ -145,26 +152,19 @@ class ModuleTestCase: mocks=test_case_spec.get("mocks", {}), flags=test_case_spec.get("flags", {}) ) - tc.build_mocks(test_module, mocks_map) + tc.build_mocks(mocks_map) return tc - def build_mocks(self, test_module, mocks_map): + def build_mocks(self, mocks_map): for mock_name, mock_spec in self.mock_specs.items(): - mock_class = mocks_map.get(mock_name, self.get_mock_class(test_module, mock_name)) + try: + mock_class = mocks_map[mock_name] + except KeyError: + raise Exception("Cannot find TestCaseMock class for: {0}".format(mock_name)) self.mocks[mock_name] = mock_class.build_mock(mock_spec) self._fixtures.update(self.mocks[mock_name].fixtures()) - @staticmethod - def get_mock_class(test_module, mock): - try: - class_name = "".join(x.capitalize() for x in mock.split("_")) + "Mock" - plugin_class = getattr(test_module, class_name) - assert issubclass(plugin_class, TestCaseMock), "Class {0} is not a subclass of TestCaseMock".format(class_name) - return plugin_class - except AttributeError: - raise ValueError("Cannot find class {0} for mock {1}".format(class_name, mock)) - @property def fixtures(self): return dict(self._fixtures) @@ -200,10 +200,6 @@ class ModuleTestCase: class TestCaseMock: - @property - def name(self): - raise NotImplementedError() - @classmethod def build_mock(cls, mock_specs): return cls(mock_specs) @@ -222,9 +218,7 @@ class TestCaseMock: class RunCommandMock(TestCaseMock): - @property - def name(self): - return "run_command" + name = "run_command" def __str__(self): return "".format(specs=self.mock_specs) diff --git a/tests/unit/plugins/modules/utils.py b/tests/unit/plugins/modules/utils.py index 1f7f14722f..e869bce957 100644 --- a/tests/unit/plugins/modules/utils.py +++ b/tests/unit/plugins/modules/utils.py @@ -5,22 +5,25 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import contextlib as _contextlib import json -from ansible_collections.community.general.tests.unit.compat import unittest -from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch from ansible.module_utils import basic from ansible.module_utils.common.text.converters import to_bytes +@_contextlib.contextmanager def set_module_args(args): if '_ansible_remote_tmp' not in args: args['_ansible_remote_tmp'] = '/tmp' if '_ansible_keep_remote_files' not in args: args['_ansible_keep_remote_files'] = False - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) - basic._ANSIBLE_ARGS = to_bytes(args) + serialized_args = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': args})) + with patch.object(basic, '_ANSIBLE_ARGS', serialized_args): + yield class AnsibleExitJson(Exception): diff --git a/tests/unit/plugins/plugin_utils/test_unsafe.py b/tests/unit/plugins/plugin_utils/test_unsafe.py index 3f35ee9337..32d4d1f30f 100644 --- a/tests/unit/plugins/plugin_utils/test_unsafe.py +++ b/tests/unit/plugins/plugin_utils/test_unsafe.py @@ -11,7 +11,12 @@ __metaclass__ = type import pytest -from ansible.utils.unsafe_proxy import AnsibleUnsafe +from ansible_collections.community.internal_test_tools.tests.unit.utils.trust import ( + make_untrusted as _make_untrusted, + make_trusted as _make_trusted, + is_trusted as _is_trusted, + SUPPORTS_DATA_TAGGING, +) from ansible_collections.community.general.plugins.plugin_utils.unsafe import ( make_unsafe, @@ -20,28 +25,14 @@ from ansible_collections.community.general.plugins.plugin_utils.unsafe import ( TEST_MAKE_UNSAFE = [ ( - u'text', + _make_trusted(u'text'), [], [ (), ], ), ( - u'{{text}}', - [ - (), - ], - [], - ), - ( - b'text', - [], - [ - (), - ], - ), - ( - b'{{text}}', + _make_trusted(u'{{text}}'), [ (), ], @@ -49,14 +40,14 @@ TEST_MAKE_UNSAFE = [ ), ( { - 'skey': 'value', - 'ukey': '{{value}}', + _make_trusted('skey'): _make_trusted('value'), + _make_trusted('ukey'): _make_trusted('{{value}}'), 1: [ - 'value', - '{{value}}', + _make_trusted('value'), + _make_trusted('{{value}}'), { - 1.0: '{{value}}', - 2.0: 'value', + 1.0: _make_trusted('{{value}}'), + 2.0: _make_trusted('value'), }, ], }, @@ -72,7 +63,7 @@ TEST_MAKE_UNSAFE = [ ], ), ( - ['value', '{{value}}'], + [_make_trusted('value'), _make_trusted('{{value}}')], [ (1, ), ], @@ -82,6 +73,24 @@ TEST_MAKE_UNSAFE = [ ), ] +if not SUPPORTS_DATA_TAGGING: + TEST_MAKE_UNSAFE.extend([ + ( + _make_trusted(b"text"), + [], + [ + (), + ], + ), + ( + _make_trusted(b"{{text}}"), + [ + (), + ], + [], + ), + ]) + @pytest.mark.parametrize("value, check_unsafe_paths, check_safe_paths", TEST_MAKE_UNSAFE) def test_make_unsafe(value, check_unsafe_paths, check_safe_paths): @@ -91,43 +100,59 @@ def test_make_unsafe(value, check_unsafe_paths, check_safe_paths): obj = unsafe_value for elt in check_path: obj = obj[elt] - assert isinstance(obj, AnsibleUnsafe) + assert not _is_trusted(obj) for check_path in check_safe_paths: obj = unsafe_value for elt in check_path: obj = obj[elt] - assert not isinstance(obj, AnsibleUnsafe) + assert _is_trusted(obj) + + +def test_make_unsafe_idempotence(): + assert make_unsafe(None) is None + + unsafe_str = _make_untrusted('{{test}}') + assert id(make_unsafe(unsafe_str)) == id(unsafe_str) + + safe_str = _make_trusted('{{test}}') + assert id(make_unsafe(safe_str)) != id(safe_str) def test_make_unsafe_dict_key(): value = { - b'test': 1, - u'test': 2, + _make_trusted(u'test'): 2, } + if not SUPPORTS_DATA_TAGGING: + value[_make_trusted(b"test")] = 1 unsafe_value = make_unsafe(value) assert unsafe_value == value for obj in unsafe_value: - assert not isinstance(obj, AnsibleUnsafe) + assert _is_trusted(obj) value = { - b'{{test}}': 1, - u'{{test}}': 2, + _make_trusted(u'{{test}}'): 2, } + if not SUPPORTS_DATA_TAGGING: + value[_make_trusted(b"{{test}}")] = 1 unsafe_value = make_unsafe(value) assert unsafe_value == value for obj in unsafe_value: - assert isinstance(obj, AnsibleUnsafe) + assert not _is_trusted(obj) def test_make_unsafe_set(): - value = set([b'test', u'test']) + value = set([_make_trusted(u'test')]) + if not SUPPORTS_DATA_TAGGING: + value.add(_make_trusted(b"test")) unsafe_value = make_unsafe(value) assert unsafe_value == value for obj in unsafe_value: - assert not isinstance(obj, AnsibleUnsafe) + assert _is_trusted(obj) - value = set([b'{{test}}', u'{{test}}']) + value = set([_make_trusted(u'{{test}}')]) + if not SUPPORTS_DATA_TAGGING: + value.add(_make_trusted(b"{{test}}")) unsafe_value = make_unsafe(value) assert unsafe_value == value for obj in unsafe_value: - assert isinstance(obj, AnsibleUnsafe) + assert not _is_trusted(obj) diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt index cfc8493912..fdcf99cbb4 100644 --- a/tests/unit/requirements.txt +++ b/tests/unit/requirements.txt @@ -47,13 +47,15 @@ elastic-apm ; python_version >= '3.6' # requirements for scaleway modules passlib[argon2] -# requirements for the proxmox modules -proxmoxer < 2.0.0 ; python_version >= '2.7' and python_version <= '3.6' -proxmoxer ; python_version > '3.6' - #requirements for nomad_token modules python-nomad < 2.0.0 ; python_version <= '3.6' python-nomad >= 2.0.0 ; python_version >= '3.7' # requirement for jenkins_build, jenkins_node, jenkins_plugin modules -python-jenkins >= 0.4.12 \ No newline at end of file +python-jenkins >= 0.4.12 + +# requirement for json_patch, json_patch_recipe and json_patch plugins +jsonpatch + +# requirements for the wsl connection plugin +paramiko >= 3.0.0 ; python_version >= '3.6' diff --git a/tests/unit/requirements.yml b/tests/unit/requirements.yml index 586a6a1b37..107fe12569 100644 --- a/tests/unit/requirements.yml +++ b/tests/unit/requirements.yml @@ -4,4 +4,4 @@ # SPDX-License-Identifier: GPL-3.0-or-later collections: -- community.internal_test_tools + - community.internal_test_tools diff --git a/tests/utils/shippable/sanity.sh b/tests/utils/shippable/sanity.sh index 5b88a26778..1ee5140ecb 100755 --- a/tests/utils/shippable/sanity.sh +++ b/tests/utils/shippable/sanity.sh @@ -16,11 +16,6 @@ else base_branch="" fi -if [ "${group}" == "extra" ]; then - ../internal_test_tools/tools/run.py --color --bot --junit - exit -fi - case "${group}" in 1) options=(--skip-test pylint --skip-test ansible-doc --skip-test validate-modules) ;; 2) options=( --test ansible-doc --test validate-modules) ;; @@ -28,17 +23,6 @@ case "${group}" in 4) options=(--test pylint --exclude plugins/modules/) ;; esac -# allow collection migration sanity tests for groups 3 and 4 to pass without updating this script during migration -network_path="lib/ansible/modules/network/" - -if [ -d "${network_path}" ]; then - if [ "${group}" -eq 3 ]; then - options+=(--exclude "${network_path}") - elif [ "${group}" -eq 4 ]; then - options+=("${network_path}") - fi -fi - # shellcheck disable=SC2086 ansible-test sanity --color -v --junit ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \ --docker --base-branch "${base_branch}" \ diff --git a/tests/utils/shippable/shippable.sh b/tests/utils/shippable/shippable.sh index e288cca1cf..134ff6de4b 100755 --- a/tests/utils/shippable/shippable.sh +++ b/tests/utils/shippable/shippable.sh @@ -10,6 +10,7 @@ IFS='/:' read -ra args <<< "$1" ansible_version="${args[0]}" script="${args[1]}" +after_script="${args[2]}" function join { local IFS="$1"; @@ -67,29 +68,32 @@ fi export ANSIBLE_COLLECTIONS_PATHS="${PWD}/../../../" -if [ "${test}" == "sanity/extra" ]; then - retry pip install junit-xml --disable-pip-version-check -fi - # START: HACK install dependencies -if [ "${script}" != "sanity" ] || [ "${test}" == "sanity/extra" ]; then - # Nothing further should be added to this list. - # This is to prevent modules or plugins in this collection having a runtime dependency on other collections. - retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/internal_test_tools" - retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.docker.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/docker" - # NOTE: we're installing with git to work around Galaxy being a huge PITA (https://github.com/ansible/galaxy/issues/2429) - # retry ansible-galaxy -vvv collection install community.internal_test_tools + +COMMUNITY_CRYPTO_BRANCH=main +if [ "${ansible_version}" == "2.16" ]; then + COMMUNITY_CRYPTO_BRANCH=stable-2 +fi +if [ "${script}" == "linux" ] && [ "$after_script" == "ubuntu2004" ]; then + COMMUNITY_CRYPTO_BRANCH=stable-2 fi -if [ "${script}" != "sanity" ] && [ "${script}" != "units" ] && [ "${test}" != "sanity/extra" ]; then +# Nothing further should be added to this list. +# This is to prevent modules or plugins in this collection having a runtime dependency on other collections. +retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/internal_test_tools" +# NOTE: we're installing with git to work around Galaxy being a huge PITA (https://github.com/ansible/galaxy/issues/2429) +# retry ansible-galaxy -vvv collection install community.internal_test_tools + +if [ "${script}" != "sanity" ] && [ "${script}" != "units" ]; then # To prevent Python dependencies on other collections only install other collections for integration tests retry git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/ansible/posix" - retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/crypto" + retry git clone --depth=1 --single-branch --branch "${COMMUNITY_CRYPTO_BRANCH}" https://github.com/ansible-collections/community.crypto.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/crypto" + retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.docker.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/docker" # NOTE: we're installing with git to work around Galaxy being a huge PITA (https://github.com/ansible/galaxy/issues/2429) # retry ansible-galaxy -vvv collection install ansible.posix # retry ansible-galaxy -vvv collection install community.crypto + # retry ansible-galaxy -vvv collection install community.docker fi - # END: HACK export PYTHONIOENCODING='utf-8' @@ -164,10 +168,8 @@ function cleanup ansible-test coverage xml --color -v --requirements --group-by command --group-by version ${stub:+"$stub"} cp -a tests/output/reports/coverage=*.xml "$SHIPPABLE_RESULT_DIR/codecoverage/" - if [ "${ansible_version}" != "2.9" ]; then - # analyze and capture code coverage aggregated by integration test target - ansible-test coverage analyze targets generate -v "$SHIPPABLE_RESULT_DIR/testresults/coverage-analyze-targets.json" - fi + # analyze and capture code coverage aggregated by integration test target + ansible-test coverage analyze targets generate -v "$SHIPPABLE_RESULT_DIR/testresults/coverage-analyze-targets.json" # upload coverage report to codecov.io only when using complete on-demand coverage if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ]; then